Bitcoin ABC  0.26.3
P2P Digital Currency
net_processing.cpp
Go to the documentation of this file.
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2016 The Bitcoin Core developers
3 // Distributed under the MIT software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 
6 #include <net_processing.h>
7 
8 #include <addrman.h>
9 #include <avalanche/avalanche.h>
11 #include <avalanche/peermanager.h>
12 #include <avalanche/processor.h>
13 #include <avalanche/proof.h>
14 #include <avalanche/validation.h>
15 #include <banman.h>
16 #include <blockencodings.h>
17 #include <blockfilter.h>
18 #include <blockvalidity.h>
19 #include <chain.h>
20 #include <chainparams.h>
21 #include <config.h>
22 #include <consensus/amount.h>
23 #include <consensus/validation.h>
24 #include <hash.h>
25 #include <index/blockfilterindex.h>
26 #include <invrequest.h>
27 #include <merkleblock.h>
28 #include <netbase.h>
29 #include <netmessagemaker.h>
30 #include <node/blockstorage.h>
31 #include <policy/fees.h>
32 #include <policy/policy.h>
33 #include <primitives/block.h>
34 #include <primitives/transaction.h>
35 #include <random.h>
36 #include <reverse_iterator.h>
37 #include <scheduler.h>
38 #include <streams.h>
39 #include <tinyformat.h>
40 #include <txmempool.h>
41 #include <txorphanage.h>
42 #include <util/check.h> // For NDEBUG compile time check
43 #include <util/strencodings.h>
44 #include <util/system.h>
45 #include <util/trace.h>
46 #include <validation.h>
47 
48 #include <algorithm>
49 #include <functional>
50 #include <memory>
51 #include <typeinfo>
52 
53 using node::fImporting;
54 using node::fPruneMode;
55 using node::fReindex;
57 
59 static constexpr auto RELAY_TX_CACHE_TIME = 15min;
64 static constexpr auto UNCONDITIONAL_RELAY_DELAY = 2min;
69 static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
70 static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
75 static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
80 // 20 minutes
81 static constexpr int64_t CHAIN_SYNC_TIMEOUT = 20 * 60;
83 // 10 minutes
84 static constexpr int64_t STALE_CHECK_INTERVAL = 10 * 60;
88 static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL = 45;
93 static constexpr std::chrono::seconds MINIMUM_CONNECT_TIME{30};
95 static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
98 static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
101 static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
105 static constexpr std::chrono::minutes PING_INTERVAL{2};
107 static const unsigned int MAX_LOCATOR_SZ = 101;
109 static const unsigned int MAX_INV_SZ = 50000;
110 static_assert(MAX_PROTOCOL_MESSAGE_LENGTH > MAX_INV_SZ * sizeof(CInv),
111  "Max protocol message length must be greater than largest "
112  "possible INV message");
113 
115 static constexpr std::chrono::minutes GETAVAADDR_INTERVAL{2};
116 
121 static constexpr std::chrono::minutes AVALANCHE_AVAPROOFS_TIMEOUT{2};
122 
130 
140 
142  const std::chrono::seconds nonpref_peer_delay;
143 
148  const std::chrono::seconds overloaded_peer_delay;
149 
154  const std::chrono::microseconds getdata_interval;
155 
161 };
162 
164  100, // max_peer_request_in_flight
165  5000, // max_peer_announcements
166  std::chrono::seconds(2), // nonpref_peer_delay
167  std::chrono::seconds(2), // overloaded_peer_delay
168  std::chrono::seconds(60), // getdata_interval
169  PF_RELAY, // bypass_request_limits_permissions
170 };
171 
173  100, // max_peer_request_in_flight
174  5000, // max_peer_announcements
175  std::chrono::seconds(2), // nonpref_peer_delay
176  std::chrono::seconds(2), // overloaded_peer_delay
177  std::chrono::seconds(60), // getdata_interval
178  PF_BYPASS_PROOF_REQUEST_LIMITS, // bypass_request_limits_permissions
179 };
180 
185 static const unsigned int MAX_GETDATA_SZ = 1000;
189 static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
194 static constexpr auto BLOCK_STALLING_TIMEOUT = 2s;
201 static const unsigned int MAX_HEADERS_RESULTS = 2000;
206 static const int MAX_CMPCTBLOCK_DEPTH = 5;
211 static const int MAX_BLOCKTXN_DEPTH = 10;
219 static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
224 static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1;
228 static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
233 static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
235 static const int MAX_UNCONNECTING_HEADERS = 10;
237 static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
241 static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL = 24h;
245 static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL = 30s;
250 static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL = 5s;
255 static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
257 static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB =
261 static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY = 3500;
270  std::chrono::seconds{1},
271  "INVENTORY_RELAY_MAX too low");
272 
276 static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL = 10min;
280 static constexpr auto MAX_FEEFILTER_CHANGE_DELAY = 5min;
285 static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
290 static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
295 static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
297 static constexpr size_t MAX_ADDR_TO_SEND{1000};
302 static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1};
309 
310 inline size_t GetMaxAddrToSend() {
311  return gArgs.GetIntArg("-maxaddrtosend", MAX_ADDR_TO_SEND);
312 }
313 
314 // Internal stuff
315 namespace {
319 struct QueuedBlock {
324  const CBlockIndex *pindex;
326  std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
327 };
328 
342 struct Peer {
344  const NodeId m_id{0};
345 
347  Mutex m_misbehavior_mutex;
349  int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0};
352  bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
353 
355  Mutex m_block_inv_mutex;
361  std::vector<BlockHash> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex);
367  std::vector<BlockHash>
368  m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex);
369 
376  BlockHash m_continuation_block GUARDED_BY(m_block_inv_mutex){};
377 
379  std::atomic<int> m_starting_height{-1};
380 
382  std::atomic<uint64_t> m_ping_nonce_sent{0};
384  std::atomic<std::chrono::microseconds> m_ping_start{0us};
386  std::atomic<bool> m_ping_queued{false};
387 
391  std::vector<CAddress> m_addrs_to_send;
402  std::unique_ptr<CRollingBloomFilter> m_addr_known;
419  std::atomic_bool m_addr_relay_enabled{false};
421  bool m_getaddr_sent{false};
423  mutable Mutex m_addr_send_times_mutex;
425  std::chrono::microseconds
426  m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
428  std::chrono::microseconds
429  m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
434  std::atomic_bool m_wants_addrv2{false};
436  bool m_getaddr_recvd{false};
438  mutable Mutex m_addr_token_bucket_mutex;
443  double m_addr_token_bucket GUARDED_BY(m_addr_token_bucket_mutex){1.0};
445  std::chrono::microseconds m_addr_token_timestamp{
446  GetTime<std::chrono::microseconds>()};
448  std::atomic<uint64_t> m_addr_rate_limited{0};
453  std::atomic<uint64_t> m_addr_processed{0};
454 
459  std::set<TxId> m_orphan_work_set GUARDED_BY(g_cs_orphans);
460 
462  Mutex m_getdata_requests_mutex;
464  std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
465 
466  explicit Peer(NodeId id) : m_id(id) {}
467 };
468 
469 using PeerRef = std::shared_ptr<Peer>;
470 
471 class PeerManagerImpl final : public PeerManager {
472 public:
473  PeerManagerImpl(const CChainParams &chainparams, CConnman &connman,
474  AddrMan &addrman, BanMan *banman,
475  ChainstateManager &chainman, CTxMemPool &pool,
476  bool ignore_incoming_txs);
477 
479  void BlockConnected(const std::shared_ptr<const CBlock> &pblock,
480  const CBlockIndex *pindexConnected) override;
481  void BlockDisconnected(const std::shared_ptr<const CBlock> &block,
482  const CBlockIndex *pindex) override;
483  void UpdatedBlockTip(const CBlockIndex *pindexNew,
484  const CBlockIndex *pindexFork,
485  bool fInitialDownload) override;
486  void BlockChecked(const CBlock &block,
487  const BlockValidationState &state) override;
488  void NewPoWValidBlock(const CBlockIndex *pindex,
489  const std::shared_ptr<const CBlock> &pblock) override;
490 
492  void InitializeNode(const Config &config, CNode *pnode) override;
493  void FinalizeNode(const Config &config, const CNode &node) override;
494  bool ProcessMessages(const Config &config, CNode *pfrom,
495  std::atomic<bool> &interrupt) override;
496  bool SendMessages(const Config &config, CNode *pto) override
497  EXCLUSIVE_LOCKS_REQUIRED(pto->cs_sendProcessing);
498 
500  void StartScheduledTasks(CScheduler &scheduler) override;
501  void CheckForStaleTipAndEvictPeers() override;
502  std::optional<std::string>
503  FetchBlock(const Config &config, NodeId peer_id,
504  const CBlockIndex &block_index) override;
505  bool GetNodeStateStats(NodeId nodeid,
506  CNodeStateStats &stats) const override;
507  bool IgnoresIncomingTxs() override { return m_ignore_incoming_txs; }
508  void SendPings() override;
509  void RelayTransaction(const TxId &txid) override;
510  void RelayProof(const avalanche::ProofId &proofid) override;
511  void SetBestHeight(int height) override { m_best_height = height; };
512  void Misbehaving(const NodeId pnode, const int howmuch,
513  const std::string &message) override;
514  void ProcessMessage(const Config &config, CNode &pfrom,
515  const std::string &msg_type, CDataStream &vRecv,
516  const std::chrono::microseconds time_received,
517  const std::atomic<bool> &interruptMsgProc) override;
518 
519 private:
524  void ConsiderEviction(CNode &pto, int64_t time_in_seconds)
530  void EvictExtraOutboundPeers(std::chrono::seconds now)
532 
537  void ReattemptInitialBroadcast(CScheduler &scheduler);
538 
542  void UpdateAvalancheStatistics() const;
543 
547  void AvalanchePeriodicNetworking(CScheduler &scheduler) const;
548 
553  PeerRef GetPeerRef(NodeId id) const;
554 
559  PeerRef RemovePeer(NodeId id);
560 
561  // overloaded variant of above to operate on CNode*s
562  void Misbehaving(const CNode &node, int howmuch,
563  const std::string &message) {
564  Misbehaving(node.GetId(), howmuch, message);
565  }
566 
579  bool MaybePunishNodeForBlock(NodeId nodeid,
580  const BlockValidationState &state,
581  bool via_compact_block,
582  const std::string &message = "");
583 
590  bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState &state,
591  const std::string &message = "");
592 
602  bool MaybeDiscourageAndDisconnect(CNode &pnode, Peer &peer);
603 
604  void ProcessOrphanTx(const Config &config, std::set<TxId> &orphan_work_set)
607  void ProcessHeadersMessage(const Config &config, CNode &pfrom,
608  const Peer &peer,
609  const std::vector<CBlockHeader> &headers,
610  bool via_compact_block);
611 
612  void SendBlockTransactions(CNode &pfrom, const CBlock &block,
613  const BlockTransactionsRequest &req);
614 
620  void AddTxAnnouncement(const CNode &node, const TxId &txid,
621  std::chrono::microseconds current_time)
623 
629  void
630  AddProofAnnouncement(const CNode &node, const avalanche::ProofId &proofid,
631  std::chrono::microseconds current_time, bool preferred)
632  EXCLUSIVE_LOCKS_REQUIRED(cs_proofrequest);
633 
635  void PushNodeVersion(const Config &config, CNode &pnode, int64_t nTime);
636 
643  void MaybeSendPing(CNode &node_to, Peer &peer,
644  std::chrono::microseconds now);
645 
647  void MaybeSendAddr(CNode &node, Peer &peer,
648  std::chrono::microseconds current_time);
649 
651  void MaybeSendFeefilter(CNode &node,
652  std::chrono::microseconds current_time);
653 
663  void RelayAddress(NodeId originator, const CAddress &addr, bool fReachable);
664 
665  const CChainParams &m_chainparams;
666  CConnman &m_connman;
667  AddrMan &m_addrman;
672  BanMan *const m_banman;
673  ChainstateManager &m_chainman;
674  CTxMemPool &m_mempool;
676 
677  Mutex cs_proofrequest;
679  m_proofrequest GUARDED_BY(cs_proofrequest);
680 
682  std::atomic<int> m_best_height{-1};
683 
685  int64_t m_stale_tip_check_time;
686 
688  const bool m_ignore_incoming_txs;
689 
694  bool m_initial_sync_finished{false};
695 
700  mutable Mutex m_peer_mutex;
707  std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
708 
710  int nSyncStarted GUARDED_BY(cs_main) = 0;
711 
718  std::map<BlockHash, std::pair<NodeId, bool>>
719  mapBlockSource GUARDED_BY(cs_main);
720 
722  int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
723 
724  bool AlreadyHaveTx(const TxId &txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
725 
745  std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
746  uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
747 
753  Mutex m_recent_confirmed_transactions_mutex;
754  std::unique_ptr<CRollingBloomFilter> m_recent_confirmed_transactions
755  GUARDED_BY(m_recent_confirmed_transactions_mutex);
756 
758  bool IsBlockRequested(const BlockHash &hash)
760 
766  void RemoveBlockRequest(const BlockHash &hash)
768 
775  bool BlockRequested(const Config &config, NodeId nodeid,
776  const CBlockIndex &block,
777  std::list<QueuedBlock>::iterator **pit = nullptr)
779 
780  bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
781 
786  void FindNextBlocksToDownload(NodeId nodeid, unsigned int count,
787  std::vector<const CBlockIndex *> &vBlocks,
788  NodeId &nodeStaller)
790 
791  std::map<BlockHash, std::pair<NodeId, std::list<QueuedBlock>::iterator>>
792  mapBlocksInFlight GUARDED_BY(cs_main);
793 
795  std::atomic<int64_t> m_last_tip_update{0};
796 
801  CTransactionRef FindTxForGetData(const CNode &peer, const TxId &txid,
802  const std::chrono::seconds mempool_req,
803  const std::chrono::seconds now)
805 
806  void ProcessGetData(const Config &config, CNode &pfrom, Peer &peer,
807  const std::atomic<bool> &interruptMsgProc)
808  EXCLUSIVE_LOCKS_REQUIRED(peer.m_getdata_requests_mutex)
810 
812  void ProcessBlock(const Config &config, CNode &node,
813  const std::shared_ptr<const CBlock> &block,
814  bool force_processing);
815 
817  typedef std::map<TxId, CTransactionRef> MapRelay;
818  MapRelay mapRelay GUARDED_BY(cs_main);
819 
824  std::deque<std::pair<std::chrono::microseconds, MapRelay::iterator>>
825  g_relay_expiration GUARDED_BY(cs_main);
826 
833  void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
835 
837  std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
838 
840  int m_peers_downloading_from GUARDED_BY(cs_main) = 0;
841 
843  TxOrphanage m_orphanage;
844 
845  void AddToCompactExtraTransactions(const CTransactionRef &tx)
847 
855  std::vector<std::pair<TxHash, CTransactionRef>>
856  vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
858  size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
859 
863  void ProcessBlockAvailability(NodeId nodeid)
868  void UpdateBlockAvailability(NodeId nodeid, const BlockHash &hash)
870  bool CanDirectFetch(const Consensus::Params &consensusParams)
872 
879  bool BlockRequestAllowed(const CBlockIndex *pindex,
880  const Consensus::Params &consensusParams)
882  bool AlreadyHaveBlock(const BlockHash &block_hash)
884  bool AlreadyHaveProof(const avalanche::ProofId &proofid);
885  void ProcessGetBlockData(const Config &config, CNode &pfrom, Peer &peer,
886  const CInv &inv, CConnman &connman);
887 
907  bool PrepareBlockFilterRequest(
908  CNode &peer, const CChainParams &chain_params,
909  BlockFilterType filter_type, uint32_t start_height,
910  const BlockHash &stop_hash, uint32_t max_height_diff,
911  const CBlockIndex *&stop_index, BlockFilterIndex *&filter_index);
912 
923  void ProcessGetCFilters(CNode &peer, CDataStream &vRecv,
924  const CChainParams &chain_params,
925  CConnman &connman);
936  void ProcessGetCFHeaders(CNode &peer, CDataStream &vRecv,
937  const CChainParams &chain_params,
938  CConnman &connman);
939 
950  void ProcessGetCFCheckPt(CNode &peer, CDataStream &vRecv,
951  const CChainParams &chain_params,
952  CConnman &connman);
953 
960  uint32_t GetAvalancheVoteForBlock(const BlockHash &hash)
962 
970  bool SetupAddressRelay(CNode &node, Peer &peer);
971 
977  bool ReceivedAvalancheProof(CNode &peer, const avalanche::ProofRef &proof);
978 };
979 } // namespace
980 
981 namespace {
993 Mutex cs_invalidProofs;
994 std::unique_ptr<CRollingBloomFilter> invalidProofs GUARDED_BY(cs_invalidProofs);
995 
997 int nPreferredDownload GUARDED_BY(cs_main) = 0;
998 } // namespace
999 
1000 namespace {
1007 struct CNodeState {
1009  const CService address;
1011  const CBlockIndex *pindexBestKnownBlock;
1013  BlockHash hashLastUnknownBlock;
1015  const CBlockIndex *pindexLastCommonBlock;
1017  const CBlockIndex *pindexBestHeaderSent;
1019  int nUnconnectingHeaders;
1021  bool fSyncStarted;
1023  std::chrono::microseconds m_headers_sync_timeout{0us};
1026  std::chrono::microseconds m_stalling_since{0us};
1027  std::list<QueuedBlock> vBlocksInFlight;
1030  std::chrono::microseconds m_downloading_since{0us};
1031  int nBlocksInFlight;
1033  bool fPreferredDownload;
1036  bool fPreferHeaders;
1039  bool fPreferHeaderAndIDs;
1046  bool fProvidesHeaderAndIDs;
1052  bool fSupportsDesiredCmpctVersion;
1053 
1080  struct ChainSyncTimeoutState {
1083  int64_t m_timeout;
1085  const CBlockIndex *m_work_header;
1087  bool m_sent_getheaders;
1090  bool m_protect;
1091  };
1092 
1093  ChainSyncTimeoutState m_chain_sync;
1094 
1096  int64_t m_last_block_announcement;
1097 
1098  struct AvalancheState {
1099  std::chrono::time_point<std::chrono::steady_clock> last_poll;
1100  };
1101 
1102  AvalancheState m_avalanche_state;
1103 
1105  bool m_is_inbound;
1106 
1108  CRollingBloomFilter m_recently_announced_invs =
1110 
1112  CRollingBloomFilter m_recently_announced_proofs =
1114 
1115  CNodeState(CAddress addrIn, bool is_inbound)
1116  : address(addrIn), m_is_inbound(is_inbound) {
1117  pindexBestKnownBlock = nullptr;
1118  hashLastUnknownBlock = BlockHash();
1119  pindexLastCommonBlock = nullptr;
1120  pindexBestHeaderSent = nullptr;
1121  nUnconnectingHeaders = 0;
1122  fSyncStarted = false;
1123  nBlocksInFlight = 0;
1124  fPreferredDownload = false;
1125  fPreferHeaders = false;
1126  fPreferHeaderAndIDs = false;
1127  fProvidesHeaderAndIDs = false;
1128  fSupportsDesiredCmpctVersion = false;
1129  m_chain_sync = {0, nullptr, false, false};
1130  m_last_block_announcement = 0;
1131  m_recently_announced_invs.reset();
1132  m_recently_announced_proofs.reset();
1133  }
1134 };
1135 
1137 static std::map<NodeId, CNodeState> mapNodeState GUARDED_BY(cs_main);
1138 
1139 static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
1140  std::map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
1141  if (it == mapNodeState.end()) {
1142  return nullptr;
1143  }
1144 
1145  return &it->second;
1146 }
1147 
1153 static bool IsAddrCompatible(const Peer &peer, const CAddress &addr) {
1154  return peer.m_wants_addrv2 || addr.IsAddrV1Compatible();
1155 }
1156 
1157 static void AddAddressKnown(Peer &peer, const CAddress &addr) {
1158  assert(peer.m_addr_known);
1159  peer.m_addr_known->insert(addr.GetKey());
1160 }
1161 
1162 static void PushAddress(Peer &peer, const CAddress &addr,
1163  FastRandomContext &insecure_rand) {
1164  // Known checking here is only to save space from duplicates.
1165  // Before sending, we'll filter it again for known addresses that were
1166  // added after addresses were pushed.
1167  assert(peer.m_addr_known);
1168  if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) &&
1169  IsAddrCompatible(peer, addr)) {
1170  if (peer.m_addrs_to_send.size() >= GetMaxAddrToSend()) {
1171  peer.m_addrs_to_send[insecure_rand.randrange(
1172  peer.m_addrs_to_send.size())] = addr;
1173  } else {
1174  peer.m_addrs_to_send.push_back(addr);
1175  }
1176  }
1177 }
1178 
1179 static bool isPreferredDownloadPeer(const CNode &pfrom) {
1180  LOCK(cs_main);
1181  const CNodeState *state = State(pfrom.GetId());
1182  return state && state->fPreferredDownload;
1183 }
1184 
1185 static void UpdatePreferredDownload(const CNode &node, CNodeState *state)
1187  nPreferredDownload -= state->fPreferredDownload;
1188 
1189  // Whether this node should be marked as a preferred download node.
1190  state->fPreferredDownload =
1191  (!node.IsInboundConn() || node.HasPermission(PF_NOBAN)) &&
1192  !node.IsAddrFetchConn() && !node.fClient;
1193 
1194  nPreferredDownload += state->fPreferredDownload;
1195 }
1196 
1197 bool PeerManagerImpl::IsBlockRequested(const BlockHash &hash) {
1198  return mapBlocksInFlight.find(hash) != mapBlocksInFlight.end();
1199 }
1200 
1201 void PeerManagerImpl::RemoveBlockRequest(const BlockHash &hash) {
1202  auto it = mapBlocksInFlight.find(hash);
1203 
1204  if (it == mapBlocksInFlight.end()) {
1205  // Block was not requested
1206  return;
1207  }
1208 
1209  auto [node_id, list_it] = it->second;
1210  CNodeState *state = State(node_id);
1211  assert(state != nullptr);
1212 
1213  if (state->vBlocksInFlight.begin() == list_it) {
1214  // First block on the queue was received, update the start download time
1215  // for the next one
1216  state->m_downloading_since = std::max(
1217  state->m_downloading_since, GetTime<std::chrono::microseconds>());
1218  }
1219  state->vBlocksInFlight.erase(list_it);
1220 
1221  state->nBlocksInFlight--;
1222  if (state->nBlocksInFlight == 0) {
1223  // Last validated block on the queue was received.
1224  m_peers_downloading_from--;
1225  }
1226  state->m_stalling_since = 0us;
1227  mapBlocksInFlight.erase(it);
1228 }
1229 
1230 bool PeerManagerImpl::BlockRequested(const Config &config, NodeId nodeid,
1231  const CBlockIndex &block,
1232  std::list<QueuedBlock>::iterator **pit) {
1233  const BlockHash &hash{block.GetBlockHash()};
1234 
1235  CNodeState *state = State(nodeid);
1236  assert(state != nullptr);
1237 
1238  // Short-circuit most stuff in case it is from the same node.
1239  std::map<BlockHash,
1240  std::pair<NodeId, std::list<QueuedBlock>::iterator>>::iterator
1241  itInFlight = mapBlocksInFlight.find(hash);
1242  if (itInFlight != mapBlocksInFlight.end() &&
1243  itInFlight->second.first == nodeid) {
1244  if (pit) {
1245  *pit = &itInFlight->second.second;
1246  }
1247  return false;
1248  }
1249 
1250  // Make sure it's not listed somewhere already.
1251  RemoveBlockRequest(hash);
1252 
1253  std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(
1254  state->vBlocksInFlight.end(),
1255  {&block, std::unique_ptr<PartiallyDownloadedBlock>(
1256  pit ? new PartiallyDownloadedBlock(config, &m_mempool)
1257  : nullptr)});
1258  state->nBlocksInFlight++;
1259  if (state->nBlocksInFlight == 1) {
1260  // We're starting a block download (batch) from this peer.
1261  state->m_downloading_since = GetTime<std::chrono::microseconds>();
1262  m_peers_downloading_from++;
1263  }
1264 
1265  itInFlight = mapBlocksInFlight
1266  .insert(std::make_pair(hash, std::make_pair(nodeid, it)))
1267  .first;
1268 
1269  if (pit) {
1270  *pit = &itInFlight->second.second;
1271  }
1272 
1273  return true;
1274 }
1275 
1276 void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) {
1278  CNodeState *nodestate = State(nodeid);
1279  if (!nodestate) {
1280  LogPrint(BCLog::NET, "node state unavailable: peer=%d\n", nodeid);
1281  return;
1282  }
1283  if (!nodestate->fProvidesHeaderAndIDs) {
1284  return;
1285  }
1286  for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin();
1287  it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
1288  if (*it == nodeid) {
1289  lNodesAnnouncingHeaderAndIDs.erase(it);
1290  lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
1291  return;
1292  }
1293  }
1294  m_connman.ForNode(nodeid, [this](CNode *pfrom) EXCLUSIVE_LOCKS_REQUIRED(
1295  ::cs_main) {
1297  uint64_t nCMPCTBLOCKVersion = 1;
1298  if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
1299  // As per BIP152, we only get 3 of our peers to announce
1300  // blocks using compact encodings.
1301  m_connman.ForNode(
1302  lNodesAnnouncingHeaderAndIDs.front(),
1303  [this, nCMPCTBLOCKVersion](CNode *pnodeStop) {
1304  m_connman.PushMessage(
1305  pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion())
1306  .Make(NetMsgType::SENDCMPCT,
1307  /*fAnnounceUsingCMPCTBLOCK=*/false,
1308  nCMPCTBLOCKVersion));
1309  // save BIP152 bandwidth state: we select peer to be
1310  // low-bandwidth
1311  pnodeStop->m_bip152_highbandwidth_to = false;
1312  return true;
1313  });
1314  lNodesAnnouncingHeaderAndIDs.pop_front();
1315  }
1316  m_connman.PushMessage(pfrom,
1317  CNetMsgMaker(pfrom->GetCommonVersion())
1319  /*fAnnounceUsingCMPCTBLOCK=*/true,
1320  nCMPCTBLOCKVersion));
1321  // save BIP152 bandwidth state: we select peer to be high-bandwidth
1322  pfrom->m_bip152_highbandwidth_to = true;
1323  lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
1324  return true;
1325  });
1326 }
1327 
1328 bool PeerManagerImpl::TipMayBeStale() {
1330  const Consensus::Params &consensusParams = m_chainparams.GetConsensus();
1331  if (m_last_tip_update == 0) {
1332  m_last_tip_update = GetTime();
1333  }
1334  return m_last_tip_update <
1335  GetTime() - consensusParams.nPowTargetSpacing * 3 &&
1336  mapBlocksInFlight.empty();
1337 }
1338 
1339 bool PeerManagerImpl::CanDirectFetch(const Consensus::Params &consensusParams) {
1340  return m_chainman.ActiveChain().Tip()->GetBlockTime() >
1341  GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
1342 }
1343 
1344 static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex)
1346  if (state->pindexBestKnownBlock &&
1347  pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) {
1348  return true;
1349  }
1350  if (state->pindexBestHeaderSent &&
1351  pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) {
1352  return true;
1353  }
1354  return false;
1355 }
1356 
1357 void PeerManagerImpl::ProcessBlockAvailability(NodeId nodeid) {
1358  CNodeState *state = State(nodeid);
1359  assert(state != nullptr);
1360 
1361  if (!state->hashLastUnknownBlock.IsNull()) {
1362  const CBlockIndex *pindex =
1363  m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock);
1364  if (pindex && pindex->nChainWork > 0) {
1365  if (state->pindexBestKnownBlock == nullptr ||
1366  pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1367  state->pindexBestKnownBlock = pindex;
1368  }
1369  state->hashLastUnknownBlock.SetNull();
1370  }
1371  }
1372 }
1373 
1374 void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid,
1375  const BlockHash &hash) {
1376  CNodeState *state = State(nodeid);
1377  assert(state != nullptr);
1378 
1379  ProcessBlockAvailability(nodeid);
1380 
1381  const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
1382  if (pindex && pindex->nChainWork > 0) {
1383  // An actually better block was announced.
1384  if (state->pindexBestKnownBlock == nullptr ||
1385  pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
1386  state->pindexBestKnownBlock = pindex;
1387  }
1388  } else {
1389  // An unknown block was announced; just assume that the latest one is
1390  // the best one.
1391  state->hashLastUnknownBlock = hash;
1392  }
1393 }
1394 
1395 void PeerManagerImpl::FindNextBlocksToDownload(
1396  NodeId nodeid, unsigned int count,
1397  std::vector<const CBlockIndex *> &vBlocks, NodeId &nodeStaller) {
1398  if (count == 0) {
1399  return;
1400  }
1401 
1402  vBlocks.reserve(vBlocks.size() + count);
1403  CNodeState *state = State(nodeid);
1404  assert(state != nullptr);
1405 
1406  // Make sure pindexBestKnownBlock is up to date, we'll need it.
1407  ProcessBlockAvailability(nodeid);
1408 
1409  if (state->pindexBestKnownBlock == nullptr ||
1410  state->pindexBestKnownBlock->nChainWork <
1411  m_chainman.ActiveChain().Tip()->nChainWork ||
1412  state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
1413  // This peer has nothing interesting.
1414  return;
1415  }
1416 
1417  if (state->pindexLastCommonBlock == nullptr) {
1418  // Bootstrap quickly by guessing a parent of our best tip is the forking
1419  // point. Guessing wrong in either direction is not a problem.
1420  state->pindexLastCommonBlock =
1421  m_chainman
1422  .ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight,
1423  m_chainman.ActiveChain().Height())];
1424  }
1425 
1426  // If the peer reorganized, our previous pindexLastCommonBlock may not be an
1427  // ancestor of its current tip anymore. Go back enough to fix that.
1428  state->pindexLastCommonBlock = LastCommonAncestor(
1429  state->pindexLastCommonBlock, state->pindexBestKnownBlock);
1430  if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) {
1431  return;
1432  }
1433 
1434  std::vector<const CBlockIndex *> vToFetch;
1435  const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
1436  // Never fetch further than the best block we know the peer has, or more
1437  // than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last linked block we have in
1438  // common with this peer. The +1 is so we can detect stalling, namely if we
1439  // would be able to download that next block if the window were 1 larger.
1440  int nWindowEnd =
1441  state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
1442  int nMaxHeight =
1443  std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
1444  NodeId waitingfor = -1;
1445  while (pindexWalk->nHeight < nMaxHeight) {
1446  // Read up to 128 (or more, if more blocks than that are needed)
1447  // successors of pindexWalk (towards pindexBestKnownBlock) into
1448  // vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as
1449  // expensive as iterating over ~100 CBlockIndex* entries anyway.
1450  int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight,
1451  std::max<int>(count - vBlocks.size(), 128));
1452  vToFetch.resize(nToFetch);
1453  pindexWalk = state->pindexBestKnownBlock->GetAncestor(
1454  pindexWalk->nHeight + nToFetch);
1455  vToFetch[nToFetch - 1] = pindexWalk;
1456  for (unsigned int i = nToFetch - 1; i > 0; i--) {
1457  vToFetch[i - 1] = vToFetch[i]->pprev;
1458  }
1459 
1460  // Iterate over those blocks in vToFetch (in forward direction), adding
1461  // the ones that are not yet downloaded and not in flight to vBlocks. In
1462  // the meantime, update pindexLastCommonBlock as long as all ancestors
1463  // are already downloaded, or if it's already part of our chain (and
1464  // therefore don't need it even if pruned).
1465  for (const CBlockIndex *pindex : vToFetch) {
1466  if (!pindex->IsValid(BlockValidity::TREE)) {
1467  // We consider the chain that this peer is on invalid.
1468  return;
1469  }
1470  if (pindex->nStatus.hasData() ||
1471  m_chainman.ActiveChain().Contains(pindex)) {
1472  if (pindex->HaveTxsDownloaded()) {
1473  state->pindexLastCommonBlock = pindex;
1474  }
1475  } else if (!IsBlockRequested(pindex->GetBlockHash())) {
1476  // The block is not already downloaded, and not yet in flight.
1477  if (pindex->nHeight > nWindowEnd) {
1478  // We reached the end of the window.
1479  if (vBlocks.size() == 0 && waitingfor != nodeid) {
1480  // We aren't able to fetch anything, but we would be if
1481  // the download window was one larger.
1482  nodeStaller = waitingfor;
1483  }
1484  return;
1485  }
1486  vBlocks.push_back(pindex);
1487  if (vBlocks.size() == count) {
1488  return;
1489  }
1490  } else if (waitingfor == -1) {
1491  // This is the first already-in-flight block.
1492  waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
1493  }
1494  }
1495  }
1496 }
1497 
1498 } // namespace
1499 
1500 template <class InvId>
1501 static bool TooManyAnnouncements(const CNode &node,
1502  const InvRequestTracker<InvId> &requestTracker,
1503  const DataRequestParameters &requestParams) {
1504  return !node.HasPermission(
1505  requestParams.bypass_request_limits_permissions) &&
1506  requestTracker.Count(node.GetId()) >=
1507  requestParams.max_peer_announcements;
1508 }
1509 
1516 template <class InvId>
1517 static std::chrono::microseconds
1519  const InvRequestTracker<InvId> &requestTracker,
1520  const DataRequestParameters &requestParams,
1521  std::chrono::microseconds current_time, bool preferred) {
1522  auto delay = std::chrono::microseconds{0};
1523 
1524  if (!preferred) {
1525  delay += requestParams.nonpref_peer_delay;
1526  }
1527 
1528  if (!node.HasPermission(requestParams.bypass_request_limits_permissions) &&
1529  requestTracker.CountInFlight(node.GetId()) >=
1530  requestParams.max_peer_request_in_flight) {
1531  delay += requestParams.overloaded_peer_delay;
1532  }
1533 
1534  return current_time + delay;
1535 }
1536 
1537 void PeerManagerImpl::PushNodeVersion(const Config &config, CNode &pnode,
1538  int64_t nTime) {
1539  // Note that pnode.GetLocalServices() is a reflection of the local
1540  // services we were offering when the CNode object was created for this
1541  // peer.
1542  ServiceFlags nLocalNodeServices = pnode.GetLocalServices();
1543  uint64_t nonce = pnode.GetLocalNonce();
1544  const int nNodeStartingHeight{m_best_height};
1545  NodeId nodeid = pnode.GetId();
1546  CAddress addr = pnode.addr;
1547  uint64_t extraEntropy = pnode.GetLocalExtraEntropy();
1548 
1549  CAddress addrYou =
1550  addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible()
1551  ? addr
1552  : CAddress(CService(), addr.nServices);
1553  CAddress addrMe = CAddress(CService(), nLocalNodeServices);
1554 
1555  const bool tx_relay = !m_ignore_incoming_txs &&
1556  pnode.m_tx_relay != nullptr && !pnode.IsFeelerConn();
1557  m_connman.PushMessage(
1560  uint64_t(nLocalNodeServices), nTime, addrYou, addrMe,
1561  nonce, userAgent(config), nNodeStartingHeight,
1562  tx_relay, extraEntropy));
1563 
1564  if (fLogIPs) {
1566  "send version message: version %d, blocks=%d, us=%s, them=%s, "
1567  "txrelay=%d, peer=%d\n",
1568  PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(),
1569  addrYou.ToString(), tx_relay, nodeid);
1570  } else {
1572  "send version message: version %d, blocks=%d, us=%s, "
1573  "txrelay=%d, peer=%d\n",
1574  PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(),
1575  tx_relay, nodeid);
1576  }
1577 }
1578 
1579 void PeerManagerImpl::AddTxAnnouncement(
1580  const CNode &node, const TxId &txid,
1581  std::chrono::microseconds current_time) {
1582  // For m_txrequest and state
1584 
1585  if (TooManyAnnouncements(node, m_txrequest, TX_REQUEST_PARAMS)) {
1586  return;
1587  }
1588 
1589  const bool preferred = isPreferredDownloadPeer(node);
1590  auto reqtime = ComputeRequestTime(node, m_txrequest, TX_REQUEST_PARAMS,
1591  current_time, preferred);
1592 
1593  m_txrequest.ReceivedInv(node.GetId(), txid, preferred, reqtime);
1594 }
1595 
1596 void PeerManagerImpl::AddProofAnnouncement(
1597  const CNode &node, const avalanche::ProofId &proofid,
1598  std::chrono::microseconds current_time, bool preferred) {
1599  // For m_proofrequest
1600  AssertLockHeld(cs_proofrequest);
1601 
1602  if (TooManyAnnouncements(node, m_proofrequest, PROOF_REQUEST_PARAMS)) {
1603  return;
1604  }
1605 
1606  auto reqtime = ComputeRequestTime(
1607  node, m_proofrequest, PROOF_REQUEST_PARAMS, current_time, preferred);
1608 
1609  m_proofrequest.ReceivedInv(node.GetId(), proofid, preferred, reqtime);
1610 }
1611 
1612 // This function is used for testing the stale tip eviction logic, see
1613 // denialofservice_tests.cpp
1614 void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) {
1615  LOCK(cs_main);
1616  CNodeState *state = State(node);
1617  if (state) {
1618  state->m_last_block_announcement = time_in_seconds;
1619  }
1620 }
1621 
1622 void PeerManagerImpl::InitializeNode(const Config &config, CNode *pnode) {
1623  CAddress addr = pnode->addr;
1624  NodeId nodeid = pnode->GetId();
1625  {
1626  LOCK(cs_main);
1627  mapNodeState.emplace_hint(
1628  mapNodeState.end(), std::piecewise_construct,
1629  std::forward_as_tuple(nodeid),
1630  std::forward_as_tuple(addr, pnode->IsInboundConn()));
1631  assert(m_txrequest.Count(nodeid) == 0);
1632  }
1633  {
1634  PeerRef peer = std::make_shared<Peer>(nodeid);
1635  LOCK(m_peer_mutex);
1636  m_peer_map.emplace_hint(m_peer_map.end(), nodeid, std::move(peer));
1637  }
1638  if (!pnode->IsInboundConn()) {
1639  PushNodeVersion(config, *pnode, GetTime());
1640  }
1641 }
1642 
1643 void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler &scheduler) {
1644  std::set<TxId> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
1645 
1646  for (const TxId &txid : unbroadcast_txids) {
1647  // Sanity check: all unbroadcast txns should exist in the mempool
1648  if (m_mempool.exists(txid)) {
1649  RelayTransaction(txid);
1650  } else {
1651  m_mempool.RemoveUnbroadcastTx(txid, true);
1652  }
1653  }
1654 
1656  // Get and sanitize the list of proofids to broadcast. The RelayProof
1657  // call is done in a second loop to avoid locking cs_vNodes while
1658  // cs_peerManager is locked which would cause a potential deadlock due
1659  // to reversed lock order.
1660  auto unbroadcasted_proofids =
1661  g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
1662  auto unbroadcasted_proofids = pm.getUnbroadcastProofs();
1663 
1664  auto it = unbroadcasted_proofids.begin();
1665  while (it != unbroadcasted_proofids.end()) {
1666  // Sanity check: all unbroadcast proofs should be bound to a
1667  // peer in the peermanager
1668  if (!pm.isBoundToPeer(*it)) {
1669  pm.removeUnbroadcastProof(*it);
1670  it = unbroadcasted_proofids.erase(it);
1671  continue;
1672  }
1673 
1674  ++it;
1675  }
1676 
1677  return unbroadcasted_proofids;
1678  });
1679 
1680  // Remaining proofids are the ones to broadcast
1681  for (const auto &proofid : unbroadcasted_proofids) {
1682  RelayProof(proofid);
1683  }
1684  }
1685 
1686  // Schedule next run for 10-15 minutes in the future.
1687  // We add randomness on every cycle to avoid the possibility of P2P
1688  // fingerprinting.
1689  const auto reattemptBroadcastInterval = 10min + GetRandMillis(5min);
1690  scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
1691  reattemptBroadcastInterval);
1692 }
1693 
1694 void PeerManagerImpl::UpdateAvalancheStatistics() const {
1695  m_connman.ForEachNode(
1696  [](CNode *pnode) { pnode->updateAvailabilityScore(); });
1697 }
1698 
1699 void PeerManagerImpl::AvalanchePeriodicNetworking(CScheduler &scheduler) const {
1700  const auto now = GetTime<std::chrono::seconds>();
1701  std::vector<NodeId> avanode_ids;
1702  bool fQuorumEstablished;
1703  bool fShouldRequestMoreNodes;
1704 
1705  if (!g_avalanche) {
1706  // Not enabled or not ready yet, retry later
1707  goto scheduleLater;
1708  }
1709 
1710  fQuorumEstablished = g_avalanche->isQuorumEstablished();
1711  fShouldRequestMoreNodes =
1712  g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
1713  return pm.shouldRequestMoreNodes();
1714  });
1715 
1716  m_connman.ForEachNode([&](CNode *pnode) {
1717  // Build a list of the avalanche peers nodeids
1718  if (pnode->m_avalanche_enabled &&
1719  (!fQuorumEstablished || !pnode->IsInboundConn())) {
1720  avanode_ids.push_back(pnode->GetId());
1721  }
1722 
1723  // If a proof radix tree timed out, cleanup
1724  if (pnode->m_proof_relay &&
1725  now > (pnode->m_proof_relay->lastSharedProofsUpdate.load() +
1727  pnode->m_proof_relay->sharedProofs = {};
1728  }
1729  });
1730 
1731  if (avanode_ids.empty()) {
1732  // No node is available for messaging, retry later
1733  goto scheduleLater;
1734  }
1735 
1736  Shuffle(avanode_ids.begin(), avanode_ids.end(), FastRandomContext());
1737 
1738  // Request avalanche addresses from our peers
1739  for (NodeId avanodeId : avanode_ids) {
1740  m_connman.ForNode(avanodeId, [&](CNode *pavanode) {
1742  "Requesting more avalanche addresses from peer %d\n",
1743  avanodeId);
1744  m_connman.PushMessage(pavanode,
1745  CNetMsgMaker(pavanode->GetCommonVersion())
1747  PeerRef peer = GetPeerRef(avanodeId);
1748  WITH_LOCK(peer->m_addr_token_bucket_mutex,
1749  peer->m_addr_token_bucket += GetMaxAddrToSend());
1750  return true;
1751  });
1752 
1753  // If we have no reason to believe that we need more nodes, only request
1754  // addresses from one of our peers.
1755  if (fQuorumEstablished && !fShouldRequestMoreNodes) {
1756  break;
1757  }
1758  }
1759 
1760  if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
1761  // Don't request proofs while in IBD. We're likely to orphan them
1762  // because we don't have the UTXOs.
1763  goto scheduleLater;
1764  }
1765 
1766  // If we never had an avaproofs message yet, be kind and only request to a
1767  // subset of our peers as we expect a ton of avaproofs message in the
1768  // process.
1769  if (g_avalanche->getAvaproofsNodeCounter() == 0) {
1770  avanode_ids.resize(std::min<size_t>(avanode_ids.size(), 3));
1771  }
1772 
1773  for (NodeId nodeid : avanode_ids) {
1774  // Send a getavaproofs to one of our peers
1775  m_connman.ForNode(nodeid, [&](CNode *pavanode) {
1777  "Requesting compact proofs from peer %d\n",
1778  pavanode->GetId());
1779  if (pavanode->m_proof_relay) {
1780  m_connman.PushMessage(pavanode,
1781  CNetMsgMaker(pavanode->GetCommonVersion())
1783 
1784  pavanode->m_proof_relay->compactproofs_requested = true;
1785  }
1786  return true;
1787  });
1788  }
1789 
1790 scheduleLater:
1791  // Schedule next run for 2-5 minutes in the future.
1792  // We add randomness on every cycle to avoid the possibility of P2P
1793  // fingerprinting.
1794  const auto avalanchePeriodicNetworkingInterval = 2min + GetRandMillis(3min);
1795  scheduler.scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
1796  avalanchePeriodicNetworkingInterval);
1797 }
1798 
1799 void PeerManagerImpl::FinalizeNode(const Config &config, const CNode &node) {
1800  NodeId nodeid = node.GetId();
1801  int misbehavior{0};
1802  {
1803  LOCK(cs_main);
1804  {
1805  // We remove the PeerRef from g_peer_map here, but we don't always
1806  // destruct the Peer. Sometimes another thread is still holding a
1807  // PeerRef, so the refcount is >= 1. Be careful not to do any
1808  // processing here that assumes Peer won't be changed before it's
1809  // destructed.
1810  PeerRef peer = RemovePeer(nodeid);
1811  assert(peer != nullptr);
1812  misbehavior = WITH_LOCK(peer->m_misbehavior_mutex,
1813  return peer->m_misbehavior_score);
1814  LOCK(m_peer_mutex);
1815  m_peer_map.erase(nodeid);
1816  }
1817  CNodeState *state = State(nodeid);
1818  assert(state != nullptr);
1819 
1820  if (state->fSyncStarted) {
1821  nSyncStarted--;
1822  }
1823 
1824  for (const QueuedBlock &entry : state->vBlocksInFlight) {
1825  mapBlocksInFlight.erase(entry.pindex->GetBlockHash());
1826  }
1827  WITH_LOCK(g_cs_orphans, m_orphanage.EraseForPeer(nodeid));
1828  m_txrequest.DisconnectedPeer(nodeid);
1829  nPreferredDownload -= state->fPreferredDownload;
1830  m_peers_downloading_from -= (state->nBlocksInFlight != 0);
1831  assert(m_peers_downloading_from >= 0);
1832  m_outbound_peers_with_protect_from_disconnect -=
1833  state->m_chain_sync.m_protect;
1834  assert(m_outbound_peers_with_protect_from_disconnect >= 0);
1835 
1836  mapNodeState.erase(nodeid);
1837 
1838  if (mapNodeState.empty()) {
1839  // Do a consistency check after the last peer is removed.
1840  assert(mapBlocksInFlight.empty());
1841  assert(nPreferredDownload == 0);
1842  assert(m_peers_downloading_from == 0);
1843  assert(m_outbound_peers_with_protect_from_disconnect == 0);
1844  assert(m_txrequest.Size() == 0);
1845  }
1846  }
1847 
1848  if (node.fSuccessfullyConnected && misbehavior == 0 &&
1849  !node.IsBlockOnlyConn() && !node.IsInboundConn()) {
1850  // Only change visible addrman state for full outbound peers. We don't
1851  // call Connected() for feeler connections since they don't have
1852  // fSuccessfullyConnected set.
1853  m_addrman.Connected(node.addr);
1854  }
1855 
1856  WITH_LOCK(cs_proofrequest, m_proofrequest.DisconnectedPeer(nodeid));
1857 
1858  LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
1859 }
1860 
1861 PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const {
1862  LOCK(m_peer_mutex);
1863  auto it = m_peer_map.find(id);
1864  return it != m_peer_map.end() ? it->second : nullptr;
1865 }
1866 
1867 PeerRef PeerManagerImpl::RemovePeer(NodeId id) {
1868  PeerRef ret;
1869  LOCK(m_peer_mutex);
1870  auto it = m_peer_map.find(id);
1871  if (it != m_peer_map.end()) {
1872  ret = std::move(it->second);
1873  m_peer_map.erase(it);
1874  }
1875  return ret;
1876 }
1877 
1878 bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid,
1879  CNodeStateStats &stats) const {
1880  {
1881  LOCK(cs_main);
1882  CNodeState *state = State(nodeid);
1883  if (state == nullptr) {
1884  return false;
1885  }
1886  stats.nSyncHeight = state->pindexBestKnownBlock
1887  ? state->pindexBestKnownBlock->nHeight
1888  : -1;
1889  stats.nCommonHeight = state->pindexLastCommonBlock
1890  ? state->pindexLastCommonBlock->nHeight
1891  : -1;
1892  for (const QueuedBlock &queue : state->vBlocksInFlight) {
1893  if (queue.pindex) {
1894  stats.vHeightInFlight.push_back(queue.pindex->nHeight);
1895  }
1896  }
1897  }
1898 
1899  PeerRef peer = GetPeerRef(nodeid);
1900  if (peer == nullptr) {
1901  return false;
1902  }
1903  stats.m_starting_height = peer->m_starting_height;
1904  // It is common for nodes with good ping times to suddenly become lagged,
1905  // due to a new block arriving or other large transfer.
1906  // Merely reporting pingtime might fool the caller into thinking the node
1907  // was still responsive, since pingtime does not update until the ping is
1908  // complete, which might take a while. So, if a ping is taking an unusually
1909  // long time in flight, the caller can immediately detect that this is
1910  // happening.
1911  std::chrono::microseconds ping_wait{0};
1912  if ((0 != peer->m_ping_nonce_sent) &&
1913  (0 != peer->m_ping_start.load().count())) {
1914  ping_wait =
1915  GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
1916  }
1917 
1918  stats.m_ping_wait = ping_wait;
1919  stats.m_addr_processed = peer->m_addr_processed.load();
1920  stats.m_addr_rate_limited = peer->m_addr_rate_limited.load();
1921  stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load();
1922 
1923  return true;
1924 }
1925 
1926 void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef &tx) {
1927  size_t max_extra_txn = gArgs.GetIntArg(
1928  "-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
1929  if (max_extra_txn <= 0) {
1930  return;
1931  }
1932 
1933  if (!vExtraTxnForCompact.size()) {
1934  vExtraTxnForCompact.resize(max_extra_txn);
1935  }
1936 
1937  vExtraTxnForCompact[vExtraTxnForCompactIt] =
1938  std::make_pair(tx->GetHash(), tx);
1939  vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
1940 }
1941 
1942 void PeerManagerImpl::Misbehaving(const NodeId pnode, const int howmuch,
1943  const std::string &message) {
1944  assert(howmuch > 0);
1945 
1946  PeerRef peer = GetPeerRef(pnode);
1947  if (peer == nullptr) {
1948  return;
1949  }
1950 
1951  LOCK(peer->m_misbehavior_mutex);
1952 
1953  peer->m_misbehavior_score += howmuch;
1954  const std::string message_prefixed =
1955  message.empty() ? "" : (": " + message);
1956  if (peer->m_misbehavior_score >= DISCOURAGEMENT_THRESHOLD &&
1957  peer->m_misbehavior_score - howmuch < DISCOURAGEMENT_THRESHOLD) {
1959  "Misbehaving: peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED%s\n",
1960  pnode, peer->m_misbehavior_score - howmuch,
1961  peer->m_misbehavior_score, message_prefixed);
1962  peer->m_should_discourage = true;
1963  } else {
1964  LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s\n", pnode,
1965  peer->m_misbehavior_score - howmuch, peer->m_misbehavior_score,
1966  message_prefixed);
1967  }
1968 }
1969 
1970 bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid,
1971  const BlockValidationState &state,
1972  bool via_compact_block,
1973  const std::string &message) {
1974  switch (state.GetResult()) {
1976  break;
1977  // The node is providing invalid data:
1980  if (!via_compact_block) {
1981  Misbehaving(nodeid, 100, message);
1982  return true;
1983  }
1984  break;
1986  LOCK(cs_main);
1987  CNodeState *node_state = State(nodeid);
1988  if (node_state == nullptr) {
1989  break;
1990  }
1991 
1992  // Ban outbound (but not inbound) peers if on an invalid chain.
1993  // Exempt HB compact block peers. Manual connections are always
1994  // protected from discouragement.
1995  if (!via_compact_block && !node_state->m_is_inbound) {
1996  Misbehaving(nodeid, 100, message);
1997  return true;
1998  }
1999  break;
2000  }
2004  Misbehaving(nodeid, 100, message);
2005  return true;
2007  // TODO: Use the state object to report this is probably not the
2008  // best idea. This is effectively unreachable, unless there is a bug
2009  // somewhere.
2010  Misbehaving(nodeid, 20, message);
2011  return true;
2012  // Conflicting (but not necessarily invalid) data or different policy:
2014  // TODO: Handle this much more gracefully (10 DoS points is super
2015  // arbitrary)
2016  Misbehaving(nodeid, 10, message);
2017  return true;
2020  break;
2021  }
2022  if (message != "") {
2023  LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
2024  }
2025  return false;
2026 }
2027 
2028 bool PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid,
2029  const TxValidationState &state,
2030  const std::string &message) {
2031  switch (state.GetResult()) {
2033  break;
2034  // The node is providing invalid data:
2036  Misbehaving(nodeid, 100, message);
2037  return true;
2038  // Conflicting (but not necessarily invalid) data or different policy:
2047  break;
2048  }
2049  if (message != "") {
2050  LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
2051  }
2052  return false;
2053 }
2054 
2055 bool PeerManagerImpl::BlockRequestAllowed(
2056  const CBlockIndex *pindex, const Consensus::Params &consensusParams) {
2058  if (m_chainman.ActiveChain().Contains(pindex)) {
2059  return true;
2060  }
2061  return pindex->IsValid(BlockValidity::SCRIPTS) &&
2062  (pindexBestHeader != nullptr) &&
2063  (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() <
2066  *pindexBestHeader, consensusParams) <
2068 }
2069 
2070 std::optional<std::string>
2071 PeerManagerImpl::FetchBlock(const Config &config, NodeId peer_id,
2072  const CBlockIndex &block_index) {
2073  if (fImporting) {
2074  return "Importing...";
2075  }
2076  if (fReindex) {
2077  return "Reindexing...";
2078  }
2079 
2080  LOCK(cs_main);
2081  // Ensure this peer exists and hasn't been disconnected
2082  CNodeState *state = State(peer_id);
2083  if (state == nullptr) {
2084  return "Peer does not exist";
2085  }
2086  // Mark block as in-flight unless it already is (for this peer).
2087  // If a block was already in-flight for a different peer, its BLOCKTXN
2088  // response will be dropped.
2089  if (!BlockRequested(config, peer_id, block_index)) {
2090  return "Already requested from this peer";
2091  }
2092 
2093  // Construct message to request the block
2094  const BlockHash &hash{block_index.GetBlockHash()};
2095  const std::vector<CInv> invs{CInv(MSG_BLOCK, hash)};
2096 
2097  // Send block request message to the peer
2098  if (!m_connman.ForNode(peer_id, [this, &invs](CNode *node) {
2099  const CNetMsgMaker msgMaker(node->GetCommonVersion());
2100  this->m_connman.PushMessage(
2101  node, msgMaker.Make(NetMsgType::GETDATA, invs));
2102  return true;
2103  })) {
2104  return "Node not fully connected";
2105  }
2106 
2107  LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", hash.ToString(),
2108  peer_id);
2109  return std::nullopt;
2110 }
2111 
2112 std::unique_ptr<PeerManager>
2113 PeerManager::make(const CChainParams &chainparams, CConnman &connman,
2114  AddrMan &addrman, BanMan *banman, ChainstateManager &chainman,
2115  CTxMemPool &pool, bool ignore_incoming_txs) {
2116  return std::make_unique<PeerManagerImpl>(chainparams, connman, addrman,
2117  banman, chainman, pool,
2118  ignore_incoming_txs);
2119 }
2120 
2121 PeerManagerImpl::PeerManagerImpl(const CChainParams &chainparams,
2122  CConnman &connman, AddrMan &addrman,
2123  BanMan *banman, ChainstateManager &chainman,
2124  CTxMemPool &pool, bool ignore_incoming_txs)
2125  : m_chainparams(chainparams), m_connman(connman), m_addrman(addrman),
2126  m_banman(banman), m_chainman(chainman), m_mempool(pool),
2127  m_stale_tip_check_time(0), m_ignore_incoming_txs(ignore_incoming_txs) {
2128  // Initialize global variables that cannot be constructed at startup.
2129  recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
2130 
2131  {
2132  LOCK(cs_invalidProofs);
2133  invalidProofs = std::make_unique<CRollingBloomFilter>(100000, 0.000001);
2134  }
2135 
2136  // Blocks don't typically have more than 4000 transactions, so this should
2137  // be at least six blocks (~1 hr) worth of transactions that we can store.
2138  // If the number of transactions appearing in a block goes up, or if we are
2139  // seeing getdata requests more than an hour after initial announcement, we
2140  // can increase this number.
2141  // The false positive rate of 1/1M should come out to less than 1
2142  // transaction per day that would be inadvertently ignored (which is the
2143  // same probability that we have in the reject filter).
2144  m_recent_confirmed_transactions.reset(
2145  new CRollingBloomFilter(24000, 0.000001));
2146 }
2147 
2148 void PeerManagerImpl::StartScheduledTasks(CScheduler &scheduler) {
2149  // Stale tip checking and peer eviction are on two different timers, but we
2150  // don't want them to get out of sync due to drift in the scheduler, so we
2151  // combine them in one function and schedule at the quicker (peer-eviction)
2152  // timer.
2153  static_assert(
2155  "peer eviction timer should be less than stale tip check timer");
2156  scheduler.scheduleEvery(
2157  [this]() {
2158  this->CheckForStaleTipAndEvictPeers();
2159  return true;
2160  },
2161  std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
2162 
2163  // schedule next run for 10-15 minutes in the future
2164  const auto reattemptBroadcastInterval = 10min + GetRandMillis(5min);
2165  scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
2166  reattemptBroadcastInterval);
2167 
2168  // Update the avalanche statistics on a schedule
2169  scheduler.scheduleEvery(
2170  [this]() {
2171  UpdateAvalancheStatistics();
2172  return true;
2173  },
2175 
2176  // schedule next run for 2-5 minutes in the future
2177  const auto avalanchePeriodicNetworkingInterval = 2min + GetRandMillis(3min);
2178  scheduler.scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
2179  avalanchePeriodicNetworkingInterval);
2180 }
2181 
2187 void PeerManagerImpl::BlockConnected(
2188  const std::shared_ptr<const CBlock> &pblock, const CBlockIndex *pindex) {
2189  m_orphanage.EraseForBlock(*pblock);
2190  m_last_tip_update = GetTime();
2191 
2192  {
2193  LOCK(m_recent_confirmed_transactions_mutex);
2194  for (const CTransactionRef &ptx : pblock->vtx) {
2195  m_recent_confirmed_transactions->insert(ptx->GetId());
2196  }
2197  }
2198  {
2199  LOCK(cs_main);
2200  for (const auto &ptx : pblock->vtx) {
2201  m_txrequest.ForgetInvId(ptx->GetId());
2202  }
2203  }
2204 }
2205 
2206 void PeerManagerImpl::BlockDisconnected(
2207  const std::shared_ptr<const CBlock> &block, const CBlockIndex *pindex) {
2208  // To avoid relay problems with transactions that were previously
2209  // confirmed, clear our filter of recently confirmed transactions whenever
2210  // there's a reorg.
2211  // This means that in a 1-block reorg (where 1 block is disconnected and
2212  // then another block reconnected), our filter will drop to having only one
2213  // block's worth of transactions in it, but that should be fine, since
2214  // presumably the most common case of relaying a confirmed transaction
2215  // should be just after a new block containing it is found.
2216  LOCK(m_recent_confirmed_transactions_mutex);
2217  m_recent_confirmed_transactions->reset();
2218 }
2219 
2220 // All of the following cache a recent block, and are protected by
2221 // cs_most_recent_block
2223 static std::shared_ptr<const CBlock>
2224  most_recent_block GUARDED_BY(cs_most_recent_block);
2225 static std::shared_ptr<const CBlockHeaderAndShortTxIDs>
2226  most_recent_compact_block GUARDED_BY(cs_most_recent_block);
2227 static uint256 most_recent_block_hash GUARDED_BY(cs_most_recent_block);
2228 
2233 void PeerManagerImpl::NewPoWValidBlock(
2234  const CBlockIndex *pindex, const std::shared_ptr<const CBlock> &pblock) {
2235  std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock =
2236  std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock);
2237  const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
2238 
2239  LOCK(cs_main);
2240 
2241  static int nHighestFastAnnounce = 0;
2242  if (pindex->nHeight <= nHighestFastAnnounce) {
2243  return;
2244  }
2245  nHighestFastAnnounce = pindex->nHeight;
2246 
2247  uint256 hashBlock(pblock->GetHash());
2248 
2249  {
2251  most_recent_block_hash = hashBlock;
2252  most_recent_block = pblock;
2253  most_recent_compact_block = pcmpctblock;
2254  }
2255 
2256  m_connman.ForEachNode(
2257  [this, &pcmpctblock, pindex, &msgMaker,
2258  &hashBlock](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
2260 
2261  // TODO: Avoid the repeated-serialization here
2263  pnode->fDisconnect) {
2264  return;
2265  }
2266  ProcessBlockAvailability(pnode->GetId());
2267  CNodeState &state = *State(pnode->GetId());
2268  // If the peer has, or we announced to them the previous block
2269  // already, but we don't think they have this one, go ahead and
2270  // announce it.
2271  if (state.fPreferHeaderAndIDs && !PeerHasHeader(&state, pindex) &&
2272  PeerHasHeader(&state, pindex->pprev)) {
2274  "%s sending header-and-ids %s to peer=%d\n",
2275  "PeerManager::NewPoWValidBlock", hashBlock.ToString(),
2276  pnode->GetId());
2277  m_connman.PushMessage(
2278  pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
2279  state.pindexBestHeaderSent = pindex;
2280  }
2281  });
2282 }
2283 
2288 void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew,
2289  const CBlockIndex *pindexFork,
2290  bool fInitialDownload) {
2291  SetBestHeight(pindexNew->nHeight);
2292  SetServiceFlagsIBDCache(!fInitialDownload);
2293 
2294  // Don't relay inventory during initial block download.
2295  if (fInitialDownload) {
2296  return;
2297  }
2298 
2299  // Find the hashes of all blocks that weren't previously in the best chain.
2300  std::vector<BlockHash> vHashes;
2301  const CBlockIndex *pindexToAnnounce = pindexNew;
2302  while (pindexToAnnounce != pindexFork) {
2303  vHashes.push_back(pindexToAnnounce->GetBlockHash());
2304  pindexToAnnounce = pindexToAnnounce->pprev;
2305  if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
2306  // Limit announcements in case of a huge reorganization. Rely on the
2307  // peer's synchronization mechanism in that case.
2308  break;
2309  }
2310  }
2311 
2312  {
2313  LOCK(m_peer_mutex);
2314  for (auto &it : m_peer_map) {
2315  Peer &peer = *it.second;
2316  LOCK(peer.m_block_inv_mutex);
2317  for (const BlockHash &hash : reverse_iterate(vHashes)) {
2318  peer.m_blocks_for_headers_relay.push_back(hash);
2319  }
2320  }
2321  }
2322 
2323  m_connman.WakeMessageHandler();
2324 }
2325 
2330 void PeerManagerImpl::BlockChecked(const CBlock &block,
2331  const BlockValidationState &state) {
2332  LOCK(cs_main);
2333 
2334  const BlockHash hash = block.GetHash();
2335  std::map<BlockHash, std::pair<NodeId, bool>>::iterator it =
2336  mapBlockSource.find(hash);
2337 
2338  // If the block failed validation, we know where it came from and we're
2339  // still connected to that peer, maybe punish.
2340  if (state.IsInvalid() && it != mapBlockSource.end() &&
2341  State(it->second.first)) {
2342  MaybePunishNodeForBlock(/*nodeid=*/it->second.first, state,
2343  /*via_compact_block=*/!it->second.second);
2344  }
2345  // Check that:
2346  // 1. The block is valid
2347  // 2. We're not in initial block download
2348  // 3. This is currently the best block we're aware of. We haven't updated
2349  // the tip yet so we have no way to check this directly here. Instead we
2350  // just check that there are currently no other blocks in flight.
2351  else if (state.IsValid() &&
2352  !m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
2353  mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
2354  if (it != mapBlockSource.end()) {
2355  MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
2356  }
2357  }
2358 
2359  if (it != mapBlockSource.end()) {
2360  mapBlockSource.erase(it);
2361  }
2362 }
2363 
2365 //
2366 // Messages
2367 //
2368 
2369 bool PeerManagerImpl::AlreadyHaveTx(const TxId &txid) {
2370  assert(recentRejects);
2371  if (m_chainman.ActiveChain().Tip()->GetBlockHash() !=
2372  hashRecentRejectsChainTip) {
2373  // If the chain tip has changed previously rejected transactions
2374  // might be now valid, e.g. due to a nLockTime'd tx becoming
2375  // valid, or a double-spend. Reset the rejects filter and give
2376  // those txs a second chance.
2377  hashRecentRejectsChainTip =
2378  m_chainman.ActiveChain().Tip()->GetBlockHash();
2379  recentRejects->reset();
2380  }
2381 
2382  if (m_orphanage.HaveTx(txid)) {
2383  return true;
2384  }
2385 
2386  {
2387  LOCK(m_recent_confirmed_transactions_mutex);
2388  if (m_recent_confirmed_transactions->contains(txid)) {
2389  return true;
2390  }
2391  }
2392 
2393  return recentRejects->contains(txid) || m_mempool.exists(txid);
2394 }
2395 
2396 bool PeerManagerImpl::AlreadyHaveBlock(const BlockHash &block_hash) {
2397  return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr;
2398 }
2399 
2400 bool PeerManagerImpl::AlreadyHaveProof(const avalanche::ProofId &proofid) {
2402 
2403  auto localProof = g_avalanche->getLocalProof();
2404  if (localProof && localProof->getId() == proofid) {
2405  return true;
2406  }
2407 
2408  const bool hasProof = g_avalanche->withPeerManager(
2409  [&proofid](avalanche::PeerManager &pm) { return pm.exists(proofid); });
2410 
2411  LOCK(cs_invalidProofs);
2412  return hasProof || invalidProofs->contains(proofid);
2413 }
2414 
2415 void PeerManagerImpl::SendPings() {
2416  LOCK(m_peer_mutex);
2417  for (auto &it : m_peer_map) {
2418  it.second->m_ping_queued = true;
2419  }
2420 }
2421 
2422 void PeerManagerImpl::RelayTransaction(const TxId &txid) {
2423  m_connman.ForEachNode(
2424  [&txid](CNode *pnode) { pnode->PushTxInventory(txid); });
2425 }
2426 
2427 void PeerManagerImpl::RelayProof(const avalanche::ProofId &proofid) {
2428  m_connman.ForEachNode(
2429  [&proofid](CNode *pnode) { pnode->PushProofInventory(proofid); });
2430 }
2431 
2432 void PeerManagerImpl::RelayAddress(NodeId originator, const CAddress &addr,
2433  bool fReachable) {
2434  // We choose the same nodes within a given 24h window (if the list of
2435  // connected nodes does not change) and we don't relay to nodes that already
2436  // know an address. So within 24h we will likely relay a given address once.
2437  // This is to prevent a peer from unjustly giving their address better
2438  // propagation by sending it to us repeatedly.
2439 
2440  if (!fReachable && !addr.IsRelayable()) {
2441  return;
2442  }
2443 
2444  // Relay to a limited number of other nodes
2445  // Use deterministic randomness to send to the same nodes for 24 hours
2446  // at a time so the m_addr_knowns of the chosen nodes prevent repeats
2447  const uint64_t hash_addr{CServiceHash(0, 0)(addr)};
2448  const CSipHasher hasher{
2450  .Write(hash_addr)
2451  .Write((GetTime() + hash_addr) / (24 * 60 * 60))};
2452  FastRandomContext insecure_rand;
2453 
2454  // Relay reachable addresses to 2 peers. Unreachable addresses are relayed
2455  // randomly to 1 or 2 peers.
2456  unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
2457  std::array<std::pair<uint64_t, Peer *>, 2> best{
2458  {{0, nullptr}, {0, nullptr}}};
2459  assert(nRelayNodes <= best.size());
2460 
2461  LOCK(m_peer_mutex);
2462 
2463  for (auto &[id, peer] : m_peer_map) {
2464  if (peer->m_addr_relay_enabled && id != originator &&
2465  IsAddrCompatible(*peer, addr)) {
2466  uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize();
2467  for (unsigned int i = 0; i < nRelayNodes; i++) {
2468  if (hashKey > best[i].first) {
2469  std::copy(best.begin() + i, best.begin() + nRelayNodes - 1,
2470  best.begin() + i + 1);
2471  best[i] = std::make_pair(hashKey, peer.get());
2472  break;
2473  }
2474  }
2475  }
2476  };
2477 
2478  for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
2479  PushAddress(*best[i].second, addr, insecure_rand);
2480  }
2481 }
2482 
2483 void PeerManagerImpl::ProcessGetBlockData(const Config &config, CNode &pfrom,
2484  Peer &peer, const CInv &inv,
2485  CConnman &connman) {
2486  const Consensus::Params &consensusParams =
2487  config.GetChainParams().GetConsensus();
2488 
2489  const BlockHash hash(inv.hash);
2490 
2491  bool send = false;
2492  std::shared_ptr<const CBlock> a_recent_block;
2493  std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
2494  {
2496  a_recent_block = most_recent_block;
2497  a_recent_compact_block = most_recent_compact_block;
2498  }
2499 
2500  bool need_activate_chain = false;
2501  {
2502  LOCK(cs_main);
2503  const CBlockIndex *pindex =
2504  m_chainman.m_blockman.LookupBlockIndex(hash);
2505  if (pindex) {
2506  if (pindex->HaveTxsDownloaded() &&
2507  !pindex->IsValid(BlockValidity::SCRIPTS) &&
2508  pindex->IsValid(BlockValidity::TREE)) {
2509  // If we have the block and all of its parents, but have not yet
2510  // validated it, we might be in the middle of connecting it (ie
2511  // in the unlock of cs_main before ActivateBestChain but after
2512  // AcceptBlock). In this case, we need to run ActivateBestChain
2513  // prior to checking the relay conditions below.
2514  need_activate_chain = true;
2515  }
2516  }
2517  } // release cs_main before calling ActivateBestChain
2518  if (need_activate_chain) {
2519  BlockValidationState state;
2520  if (!m_chainman.ActiveChainstate().ActivateBestChain(config, state,
2521  a_recent_block)) {
2522  LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
2523  state.ToString());
2524  }
2525  }
2526 
2527  LOCK(cs_main);
2528  const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
2529  if (pindex) {
2530  send = BlockRequestAllowed(pindex, consensusParams);
2531  if (!send) {
2533  "%s: ignoring request from peer=%i for old "
2534  "block that isn't in the main chain\n",
2535  __func__, pfrom.GetId());
2536  }
2537  }
2538  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2539  // Disconnect node in case we have reached the outbound limit for serving
2540  // historical blocks.
2541  if (send && connman.OutboundTargetReached(true) &&
2542  (((pindexBestHeader != nullptr) &&
2543  (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() >
2545  inv.IsMsgFilteredBlk()) &&
2546  // nodes with the download permission may exceed target
2547  !pfrom.HasPermission(PF_DOWNLOAD)) {
2549  "historical block serving limit reached, disconnect peer=%d\n",
2550  pfrom.GetId());
2551 
2552  // disconnect node
2553  pfrom.fDisconnect = true;
2554  send = false;
2555  }
2556  // Avoid leaking prune-height by never sending blocks below the
2557  // NODE_NETWORK_LIMITED threshold.
2558  // Add two blocks buffer extension for possible races
2559  if (send && !pfrom.HasPermission(PF_NOBAN) &&
2560  ((((pfrom.GetLocalServices() & NODE_NETWORK_LIMITED) ==
2562  ((pfrom.GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) &&
2563  (m_chainman.ActiveChain().Tip()->nHeight - pindex->nHeight >
2564  (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2)))) {
2566  "Ignore block request below NODE_NETWORK_LIMITED "
2567  "threshold from peer=%d\n",
2568  pfrom.GetId());
2569 
2570  // disconnect node and prevent it from stalling (would otherwise wait
2571  // for the missing block)
2572  pfrom.fDisconnect = true;
2573  send = false;
2574  }
2575  // Pruned nodes may have deleted the block, so check whether it's available
2576  // before trying to send.
2577  if (send && pindex->nStatus.hasData()) {
2578  std::shared_ptr<const CBlock> pblock;
2579  if (a_recent_block &&
2580  a_recent_block->GetHash() == pindex->GetBlockHash()) {
2581  pblock = a_recent_block;
2582  } else {
2583  // Send block from disk
2584  std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
2585  if (!ReadBlockFromDisk(*pblockRead, pindex, consensusParams)) {
2586  assert(!"cannot load block from disk");
2587  }
2588  pblock = pblockRead;
2589  }
2590  if (inv.IsMsgBlk()) {
2591  connman.PushMessage(&pfrom,
2592  msgMaker.Make(NetMsgType::BLOCK, *pblock));
2593  } else if (inv.IsMsgFilteredBlk()) {
2594  bool sendMerkleBlock = false;
2595  CMerkleBlock merkleBlock;
2596  if (pfrom.m_tx_relay != nullptr) {
2597  LOCK(pfrom.m_tx_relay->cs_filter);
2598  if (pfrom.m_tx_relay->pfilter) {
2599  sendMerkleBlock = true;
2600  merkleBlock =
2601  CMerkleBlock(*pblock, *pfrom.m_tx_relay->pfilter);
2602  }
2603  }
2604  if (sendMerkleBlock) {
2605  connman.PushMessage(
2606  &pfrom,
2607  msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
2608  // CMerkleBlock just contains hashes, so also push any
2609  // transactions in the block the client did not see. This avoids
2610  // hurting performance by pointlessly requiring a round-trip.
2611  // Note that there is currently no way for a node to request any
2612  // single transactions we didn't send here - they must either
2613  // disconnect and retry or request the full block. Thus, the
2614  // protocol spec specified allows for us to provide duplicate
2615  // txn here, however we MUST always provide at least what the
2616  // remote peer needs.
2617  typedef std::pair<size_t, uint256> PairType;
2618  for (PairType &pair : merkleBlock.vMatchedTxn) {
2619  connman.PushMessage(
2620  &pfrom, msgMaker.Make(NetMsgType::TX,
2621  *pblock->vtx[pair.first]));
2622  }
2623  }
2624  // else
2625  // no response
2626  } else if (inv.IsMsgCmpctBlk()) {
2627  // If a peer is asking for old blocks, we're almost guaranteed they
2628  // won't have a useful mempool to match against a compact block, and
2629  // we don't feel like constructing the object for them, so instead
2630  // we respond with the full, non-compact block.
2631  int nSendFlags = 0;
2632  if (CanDirectFetch(consensusParams) &&
2633  pindex->nHeight >=
2634  m_chainman.ActiveChain().Height() - MAX_CMPCTBLOCK_DEPTH) {
2635  CBlockHeaderAndShortTxIDs cmpctblock(*pblock);
2636  connman.PushMessage(
2637  &pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK,
2638  cmpctblock));
2639  } else {
2640  connman.PushMessage(
2641  &pfrom,
2642  msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
2643  }
2644  }
2645 
2646  {
2647  LOCK(peer.m_block_inv_mutex);
2648  // Trigger the peer node to send a getblocks request for the next
2649  // batch of inventory.
2650  if (hash == peer.m_continuation_block) {
2651  // Send immediately. This must send even if redundant, and
2652  // we want it right after the last block so they don't wait for
2653  // other stuff first.
2654  std::vector<CInv> vInv;
2655  vInv.push_back(CInv(
2656  MSG_BLOCK, m_chainman.ActiveChain().Tip()->GetBlockHash()));
2657  connman.PushMessage(&pfrom,
2658  msgMaker.Make(NetMsgType::INV, vInv));
2659  peer.m_continuation_block = BlockHash();
2660  }
2661  }
2662  }
2663 }
2664 
2666 PeerManagerImpl::FindTxForGetData(const CNode &peer, const TxId &txid,
2667  const std::chrono::seconds mempool_req,
2668  const std::chrono::seconds now) {
2669  auto txinfo = m_mempool.info(txid);
2670  if (txinfo.tx) {
2671  // If a TX could have been INVed in reply to a MEMPOOL request,
2672  // or is older than UNCONDITIONAL_RELAY_DELAY, permit the request
2673  // unconditionally.
2674  if ((mempool_req.count() && txinfo.m_time <= mempool_req) ||
2675  txinfo.m_time <= now - UNCONDITIONAL_RELAY_DELAY) {
2676  return std::move(txinfo.tx);
2677  }
2678  }
2679 
2680  {
2681  LOCK(cs_main);
2682 
2683  // Otherwise, the transaction must have been announced recently.
2684  if (State(peer.GetId())->m_recently_announced_invs.contains(txid)) {
2685  // If it was, it can be relayed from either the mempool...
2686  if (txinfo.tx) {
2687  return std::move(txinfo.tx);
2688  }
2689  // ... or the relay pool.
2690  auto mi = mapRelay.find(txid);
2691  if (mi != mapRelay.end()) {
2692  return mi->second;
2693  }
2694  }
2695  }
2696 
2697  return {};
2698 }
2699 
2702 static avalanche::ProofRef
2703 FindProofForGetData(const CNode &peer, const avalanche::ProofId &proofid,
2704  const std::chrono::seconds now) {
2705  avalanche::ProofRef proof;
2706 
2707  bool send_unconditionally =
2708  g_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
2709  return pm.forPeer(proofid, [&](const avalanche::Peer &peer) {
2710  proof = peer.proof;
2711 
2712  // If we know that proof for long enough, allow for requesting
2713  // it.
2714  return peer.registration_time <=
2716  });
2717  });
2718 
2719  if (!proof) {
2720  // Always send our local proof if it gets requested, assuming it's
2721  // valid. This will make it easier to bind with peers upon startup where
2722  // the status of our proof is unknown pending for a block. Note that it
2723  // still needs to have been announced first (presumably via an avahello
2724  // message).
2725  proof = g_avalanche->getLocalProof();
2726  }
2727 
2728  // We don't have this proof
2729  if (!proof) {
2730  return avalanche::ProofRef();
2731  }
2732 
2733  if (send_unconditionally) {
2734  return proof;
2735  }
2736 
2737  // Otherwise, the proofs must have been announced recently.
2738  LOCK(cs_main);
2739  if (State(peer.GetId())->m_recently_announced_proofs.contains(proofid)) {
2740  return proof;
2741  }
2742 
2743  return avalanche::ProofRef();
2744 }
2745 
2746 void PeerManagerImpl::ProcessGetData(
2747  const Config &config, CNode &pfrom, Peer &peer,
2748  const std::atomic<bool> &interruptMsgProc) {
2750 
2751  std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
2752  std::vector<CInv> vNotFound;
2753  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2754 
2755  const std::chrono::seconds now = GetTime<std::chrono::seconds>();
2756  // Get last mempool request time
2757  const std::chrono::seconds mempool_req =
2758  pfrom.m_tx_relay != nullptr
2759  ? pfrom.m_tx_relay->m_last_mempool_req.load()
2760  : std::chrono::seconds::min();
2761 
2762  // Process as many TX or AVA_PROOF items from the front of the getdata
2763  // queue as possible, since they're common and it's efficient to batch
2764  // process them.
2765  while (it != peer.m_getdata_requests.end()) {
2766  if (interruptMsgProc) {
2767  return;
2768  }
2769  // The send buffer provides backpressure. If there's no space in
2770  // the buffer, pause processing until the next call.
2771  if (pfrom.fPauseSend) {
2772  break;
2773  }
2774 
2775  const CInv &inv = *it;
2776 
2777  if (it->IsMsgProof()) {
2778  const avalanche::ProofId proofid(inv.hash);
2779  auto proof = FindProofForGetData(pfrom, proofid, now);
2780  if (proof) {
2781  m_connman.PushMessage(
2782  &pfrom, msgMaker.Make(NetMsgType::AVAPROOF, *proof));
2783  g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
2784  pm.removeUnbroadcastProof(proofid);
2785  });
2786  } else {
2787  vNotFound.push_back(inv);
2788  }
2789 
2790  ++it;
2791  continue;
2792  }
2793 
2794  if (it->IsMsgTx()) {
2795  if (pfrom.m_tx_relay == nullptr) {
2796  // Ignore GETDATA requests for transactions from blocks-only
2797  // peers.
2798  continue;
2799  }
2800 
2801  const TxId txid(inv.hash);
2802  CTransactionRef tx =
2803  FindTxForGetData(pfrom, txid, mempool_req, now);
2804  if (tx) {
2805  int nSendFlags = 0;
2806  m_connman.PushMessage(
2807  &pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx));
2808  m_mempool.RemoveUnbroadcastTx(txid);
2809  // As we're going to send tx, make sure its unconfirmed parents
2810  // are made requestable.
2811  std::vector<TxId> parent_ids_to_add;
2812  {
2813  LOCK(m_mempool.cs);
2814  auto txiter = m_mempool.GetIter(tx->GetId());
2815  if (txiter) {
2816  const CTxMemPoolEntry::Parents &parents =
2817  (*txiter)->GetMemPoolParentsConst();
2818  parent_ids_to_add.reserve(parents.size());
2819  for (const CTxMemPoolEntry &parent : parents) {
2820  if (parent.GetTime() >
2821  now - UNCONDITIONAL_RELAY_DELAY) {
2822  parent_ids_to_add.push_back(
2823  parent.GetTx().GetId());
2824  }
2825  }
2826  }
2827  }
2828  for (const TxId &parent_txid : parent_ids_to_add) {
2829  // Relaying a transaction with a recent but unconfirmed
2830  // parent.
2831  if (WITH_LOCK(pfrom.m_tx_relay->cs_tx_inventory,
2832  return !pfrom.m_tx_relay->filterInventoryKnown
2833  .contains(parent_txid))) {
2834  LOCK(cs_main);
2835  State(pfrom.GetId())
2836  ->m_recently_announced_invs.insert(parent_txid);
2837  }
2838  }
2839  } else {
2840  vNotFound.push_back(inv);
2841  }
2842 
2843  ++it;
2844  continue;
2845  }
2846 
2847  // It's neither a proof nor a transaction
2848  break;
2849  }
2850 
2851  // Only process one BLOCK item per call, since they're uncommon and can be
2852  // expensive to process.
2853  if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
2854  const CInv &inv = *it++;
2855  if (inv.IsGenBlkMsg()) {
2856  ProcessGetBlockData(config, pfrom, peer, inv, m_connman);
2857  }
2858  // else: If the first item on the queue is an unknown type, we erase it
2859  // and continue processing the queue on the next call.
2860  }
2861 
2862  peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
2863 
2864  if (!vNotFound.empty()) {
2865  // Let the peer know that we didn't find what it asked for, so it
2866  // doesn't have to wait around forever. SPV clients care about this
2867  // message: it's needed when they are recursively walking the
2868  // dependencies of relevant unconfirmed transactions. SPV clients want
2869  // to do that because they want to know about (and store and rebroadcast
2870  // and risk analyze) the dependencies of transactions relevant to them,
2871  // without having to download the entire memory pool. Also, other nodes
2872  // can use these messages to automatically request a transaction from
2873  // some other peer that annnounced it, and stop waiting for us to
2874  // respond. In normal operation, we often send NOTFOUND messages for
2875  // parents of transactions that we relay; if a peer is missing a parent,
2876  // they may assume we have them and request the parents from us.
2877  m_connman.PushMessage(&pfrom,
2878  msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
2879  }
2880 }
2881 
2882 void PeerManagerImpl::SendBlockTransactions(
2883  CNode &pfrom, const CBlock &block, const BlockTransactionsRequest &req) {
2884  BlockTransactions resp(req);
2885  for (size_t i = 0; i < req.indices.size(); i++) {
2886  if (req.indices[i] >= block.vtx.size()) {
2887  Misbehaving(pfrom, 100,
2888  "getblocktxn with out-of-bounds tx indices");
2889  return;
2890  }
2891  resp.txn[i] = block.vtx[req.indices[i]];
2892  }
2893  LOCK(cs_main);
2894  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2895  int nSendFlags = 0;
2896  m_connman.PushMessage(
2897  &pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
2898 }
2899 
2900 void PeerManagerImpl::ProcessHeadersMessage(
2901  const Config &config, CNode &pfrom, const Peer &peer,
2902  const std::vector<CBlockHeader> &headers, bool via_compact_block) {
2903  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
2904  size_t nCount = headers.size();
2905 
2906  if (nCount == 0) {
2907  // Nothing interesting. Stop asking this peers for more headers.
2908  return;
2909  }
2910 
2911  bool received_new_header = false;
2912  const CBlockIndex *pindexLast = nullptr;
2913  {
2914  LOCK(cs_main);
2915  CNodeState *nodestate = State(pfrom.GetId());
2916 
2917  // If this looks like it could be a block announcement (nCount <
2918  // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
2919  // don't connect:
2920  // - Send a getheaders message in response to try to connect the chain.
2921  // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
2922  // don't connect before giving DoS points
2923  // - Once a headers message is received that is valid and does connect,
2924  // nUnconnectingHeaders gets reset back to 0.
2925  if (!m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock) &&
2926  nCount < MAX_BLOCKS_TO_ANNOUNCE) {
2927  nodestate->nUnconnectingHeaders++;
2928  m_connman.PushMessage(
2929  &pfrom, msgMaker.Make(NetMsgType::GETHEADERS,
2930  m_chainman.ActiveChain().GetLocator(
2932  uint256()));
2933  LogPrint(
2934  BCLog::NET,
2935  "received header %s: missing prev block %s, sending getheaders "
2936  "(%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
2937  headers[0].GetHash().ToString(),
2938  headers[0].hashPrevBlock.ToString(), pindexBestHeader->nHeight,
2939  pfrom.GetId(), nodestate->nUnconnectingHeaders);
2940  // Set hashLastUnknownBlock for this peer, so that if we eventually
2941  // get the headers - even from a different peer - we can use this
2942  // peer to download.
2943  UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash());
2944 
2945  if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS ==
2946  0) {
2947  // The peer is sending us many headers we can't connect.
2948  Misbehaving(pfrom, 20,
2949  strprintf("%d non-connecting headers",
2950  nodestate->nUnconnectingHeaders));
2951  }
2952  return;
2953  }
2954 
2955  BlockHash hashLastBlock;
2956  for (const CBlockHeader &header : headers) {
2957  if (!hashLastBlock.IsNull() &&
2958  header.hashPrevBlock != hashLastBlock) {
2959  Misbehaving(pfrom, 20, "non-continuous headers sequence");
2960  return;
2961  }
2962  hashLastBlock = header.GetHash();
2963  }
2964 
2965  // If we don't have the last header, then they'll have given us
2966  // something new (if these headers are valid).
2967  if (!m_chainman.m_blockman.LookupBlockIndex(hashLastBlock)) {
2968  received_new_header = true;
2969  }
2970  }
2971 
2972  BlockValidationState state;
2973  if (!m_chainman.ProcessNewBlockHeaders(config, headers, state,
2974  &pindexLast)) {
2975  if (state.IsInvalid()) {
2976  MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block,
2977  "invalid header received");
2978  return;
2979  }
2980  }
2981 
2982  {
2983  LOCK(cs_main);
2984  CNodeState *nodestate = State(pfrom.GetId());
2985  if (nodestate->nUnconnectingHeaders > 0) {
2987  "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n",
2988  pfrom.GetId(), nodestate->nUnconnectingHeaders);
2989  }
2990  nodestate->nUnconnectingHeaders = 0;
2991 
2992  assert(pindexLast);
2993  UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash());
2994 
2995  // From here, pindexBestKnownBlock should be guaranteed to be non-null,
2996  // because it is set in UpdateBlockAvailability. Some nullptr checks are
2997  // still present, however, as belt-and-suspenders.
2998 
2999  if (received_new_header &&
3000  pindexLast->nChainWork >
3001  m_chainman.ActiveChain().Tip()->nChainWork) {
3002  nodestate->m_last_block_announcement = GetTime();
3003  }
3004 
3005  if (nCount == MAX_HEADERS_RESULTS) {
3006  // Headers message had its maximum size; the peer may have more
3007  // headers.
3008  // TODO: optimize: if pindexLast is an ancestor of
3009  // m_chainman.ActiveChain().Tip or pindexBestHeader, continue from
3010  // there instead.
3011  LogPrint(
3012  BCLog::NET,
3013  "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
3014  pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
3015  m_connman.PushMessage(
3016  &pfrom,
3017  msgMaker.Make(NetMsgType::GETHEADERS,
3018  m_chainman.ActiveChain().GetLocator(pindexLast),
3019  uint256()));
3020  }
3021 
3022  bool fCanDirectFetch = CanDirectFetch(m_chainparams.GetConsensus());
3023  // If this set of headers is valid and ends in a block with at least as
3024  // much work as our tip, download as much as possible.
3025  if (fCanDirectFetch && pindexLast->IsValid(BlockValidity::TREE) &&
3026  m_chainman.ActiveChain().Tip()->nChainWork <=
3027  pindexLast->nChainWork) {
3028  std::vector<const CBlockIndex *> vToFetch;
3029  const CBlockIndex *pindexWalk = pindexLast;
3030  // Calculate all the blocks we'd need to switch to pindexLast, up to
3031  // a limit.
3032  while (pindexWalk &&
3033  !m_chainman.ActiveChain().Contains(pindexWalk) &&
3034  vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
3035  if (!pindexWalk->nStatus.hasData() &&
3036  !IsBlockRequested(pindexWalk->GetBlockHash())) {
3037  // We don't have this block, and it's not yet in flight.
3038  vToFetch.push_back(pindexWalk);
3039  }
3040  pindexWalk = pindexWalk->pprev;
3041  }
3042  // If pindexWalk still isn't on our main chain, we're looking at a
3043  // very large reorg at a time we think we're close to caught up to
3044  // the main chain -- this shouldn't really happen. Bail out on the
3045  // direct fetch and rely on parallel download instead.
3046  if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
3047  LogPrint(
3048  BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
3049  pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
3050  } else {
3051  std::vector<CInv> vGetData;
3052  // Download as much as possible, from earliest to latest.
3053  for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
3054  if (nodestate->nBlocksInFlight >=
3056  // Can't download any more from this peer
3057  break;
3058  }
3059  vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
3060  BlockRequested(config, pfrom.GetId(), *pindex);
3061  LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
3062  pindex->GetBlockHash().ToString(), pfrom.GetId());
3063  }
3064  if (vGetData.size() > 1) {
3066  "Downloading blocks toward %s (%d) via headers "
3067  "direct fetch\n",
3068  pindexLast->GetBlockHash().ToString(),
3069  pindexLast->nHeight);
3070  }
3071  if (vGetData.size() > 0) {
3072  if (nodestate->fSupportsDesiredCmpctVersion &&
3073  vGetData.size() == 1 && mapBlocksInFlight.size() == 1 &&
3074  pindexLast->pprev->IsValid(BlockValidity::CHAIN)) {
3075  // In any case, we want to download using a compact
3076  // block, not a regular one.
3077  vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
3078  }
3079  m_connman.PushMessage(
3080  &pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
3081  }
3082  }
3083  }
3084  // If we're in IBD, we want outbound peers that will serve us a useful
3085  // chain. Disconnect peers that are on chains with insufficient work.
3086  if (m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
3087  nCount != MAX_HEADERS_RESULTS) {
3088  // When nCount < MAX_HEADERS_RESULTS, we know we have no more
3089  // headers to fetch from this peer.
3090  if (nodestate->pindexBestKnownBlock &&
3091  nodestate->pindexBestKnownBlock->nChainWork <
3093  // This peer has too little work on their headers chain to help
3094  // us sync -- disconnect if it is an outbound disconnection
3095  // candidate.
3096  // Note: We compare their tip to nMinimumChainWork (rather than
3097  // m_chainman.ActiveChain().Tip()) because we won't start block
3098  // download until we have a headers chain that has at least
3099  // nMinimumChainWork, even if a peer has a chain past our tip,
3100  // as an anti-DoS measure.
3101  if (pfrom.IsOutboundOrBlockRelayConn()) {
3102  LogPrintf("Disconnecting outbound peer %d -- headers "
3103  "chain has insufficient work\n",
3104  pfrom.GetId());
3105  pfrom.fDisconnect = true;
3106  }
3107  }
3108  }
3109 
3110  // If this is an outbound full-relay peer, check to see if we should
3111  // protect it from the bad/lagging chain logic.
3112  // Note that outbound block-relay peers are excluded from this
3113  // protection, and thus always subject to eviction under the bad/lagging
3114  // chain logic.
3115  // See ChainSyncTimeoutState.
3116  if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() &&
3117  nodestate->pindexBestKnownBlock != nullptr) {
3118  if (m_outbound_peers_with_protect_from_disconnect <
3120  nodestate->pindexBestKnownBlock->nChainWork >=
3121  m_chainman.ActiveChain().Tip()->nChainWork &&
3122  !nodestate->m_chain_sync.m_protect) {
3124  "Protecting outbound peer=%d from eviction\n",
3125  pfrom.GetId());
3126  nodestate->m_chain_sync.m_protect = true;
3127  ++m_outbound_peers_with_protect_from_disconnect;
3128  }
3129  }
3130  }
3131 }
3132 
3142 void PeerManagerImpl::ProcessOrphanTx(const Config &config,
3143  std::set<TxId> &orphan_work_set) {
3146  while (!orphan_work_set.empty()) {
3147  const TxId orphanTxId = *orphan_work_set.begin();
3148  orphan_work_set.erase(orphan_work_set.begin());
3149 
3150  const auto [porphanTx, from_peer] = m_orphanage.GetTx(orphanTxId);
3151  if (porphanTx == nullptr) {
3152  continue;
3153  }
3154 
3155  const MempoolAcceptResult result =
3156  m_chainman.ProcessTransaction(porphanTx);
3157  const TxValidationState &state = result.m_state;
3159  LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n",
3160  orphanTxId.ToString());
3161  RelayTransaction(orphanTxId);
3162  m_orphanage.AddChildrenToWorkSet(*porphanTx, orphan_work_set);
3163  m_orphanage.EraseTx(orphanTxId);
3164  break;
3165  } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
3166  if (state.IsInvalid()) {
3168  " invalid orphan tx %s from peer=%d. %s\n",
3169  orphanTxId.ToString(), from_peer, state.ToString());
3170  // Punish peer that gave us an invalid orphan tx
3171  MaybePunishNodeForTx(from_peer, state);
3172  }
3173  // Has inputs but not accepted to mempool
3174  // Probably non-standard or insufficient fee
3175  LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n",
3176  orphanTxId.ToString());
3177 
3178  assert(recentRejects);
3179  recentRejects->insert(orphanTxId);
3180 
3181  m_orphanage.EraseTx(orphanTxId);
3182  break;
3183  }
3184  }
3185 }
3186 
3187 bool PeerManagerImpl::PrepareBlockFilterRequest(
3188  CNode &peer, const CChainParams &chain_params, BlockFilterType filter_type,
3189  uint32_t start_height, const BlockHash &stop_hash, uint32_t max_height_diff,
3190  const CBlockIndex *&stop_index, BlockFilterIndex *&filter_index) {
3191  const bool supported_filter_type =
3192  (filter_type == BlockFilterType::BASIC &&
3194  if (!supported_filter_type) {
3196  "peer %d requested unsupported block filter type: %d\n",
3197  peer.GetId(), static_cast<uint8_t>(filter_type));
3198  peer.fDisconnect = true;
3199  return false;
3200  }
3201 
3202  {
3203  LOCK(cs_main);
3204  stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash);
3205 
3206  // Check that the stop block exists and the peer would be allowed to
3207  // fetch it.
3208  if (!stop_index ||
3209  !BlockRequestAllowed(stop_index, chain_params.GetConsensus())) {
3210  LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
3211  peer.GetId(), stop_hash.ToString());
3212  peer.fDisconnect = true;
3213  return false;
3214  }
3215  }
3216 
3217  uint32_t stop_height = stop_index->nHeight;
3218  if (start_height > stop_height) {
3219  LogPrint(
3220  BCLog::NET,
3221  "peer %d sent invalid getcfilters/getcfheaders with " /* Continued
3222  */
3223  "start height %d and stop height %d\n",
3224  peer.GetId(), start_height, stop_height);
3225  peer.fDisconnect = true;
3226  return false;
3227  }
3228  if (stop_height - start_height >= max_height_diff) {
3230  "peer %d requested too many cfilters/cfheaders: %d / %d\n",
3231  peer.GetId(), stop_height - start_height + 1, max_height_diff);
3232  peer.fDisconnect = true;
3233  return false;
3234  }
3235 
3236  filter_index = GetBlockFilterIndex(filter_type);
3237  if (!filter_index) {
3238  LogPrint(BCLog::NET, "Filter index for supported type %s not found\n",
3239  BlockFilterTypeName(filter_type));
3240  return false;
3241  }
3242 
3243  return true;
3244 }
3245 
3246 void PeerManagerImpl::ProcessGetCFilters(CNode &peer, CDataStream &vRecv,
3247  const CChainParams &chain_params,
3248  CConnman &connman) {
3249  uint8_t filter_type_ser;
3250  uint32_t start_height;
3251  BlockHash stop_hash;
3252 
3253  vRecv >> filter_type_ser >> start_height >> stop_hash;
3254 
3255  const BlockFilterType filter_type =
3256  static_cast<BlockFilterType>(filter_type_ser);
3257 
3258  const CBlockIndex *stop_index;
3259  BlockFilterIndex *filter_index;
3260  if (!PrepareBlockFilterRequest(
3261  peer, chain_params, filter_type, start_height, stop_hash,
3262  MAX_GETCFILTERS_SIZE, stop_index, filter_index)) {
3263  return;
3264  }
3265 
3266  std::vector<BlockFilter> filters;
3267  if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
3269  "Failed to find block filter in index: filter_type=%s, "
3270  "start_height=%d, stop_hash=%s\n",
3271  BlockFilterTypeName(filter_type), start_height,
3272  stop_hash.ToString());
3273  return;
3274  }
3275 
3276  for (const auto &filter : filters) {
3278  .Make(NetMsgType::CFILTER, filter);
3279  connman.PushMessage(&peer, std::move(msg));
3280  }
3281 }
3282 
3283 void PeerManagerImpl::ProcessGetCFHeaders(CNode &peer, CDataStream &vRecv,
3284  const CChainParams &chain_params,
3285  CConnman &connman) {
3286  uint8_t filter_type_ser;
3287  uint32_t start_height;
3288  BlockHash stop_hash;
3289 
3290  vRecv >> filter_type_ser >> start_height >> stop_hash;
3291 
3292  const BlockFilterType filter_type =
3293  static_cast<BlockFilterType>(filter_type_ser);
3294 
3295  const CBlockIndex *stop_index;
3296  BlockFilterIndex *filter_index;
3297  if (!PrepareBlockFilterRequest(
3298  peer, chain_params, filter_type, start_height, stop_hash,
3299  MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) {
3300  return;
3301  }
3302 
3303  uint256 prev_header;
3304  if (start_height > 0) {
3305  const CBlockIndex *const prev_block =
3306  stop_index->GetAncestor(static_cast<int>(start_height - 1));
3307  if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
3309  "Failed to find block filter header in index: "
3310  "filter_type=%s, block_hash=%s\n",
3311  BlockFilterTypeName(filter_type),
3312  prev_block->GetBlockHash().ToString());
3313  return;
3314  }
3315  }
3316 
3317  std::vector<uint256> filter_hashes;
3318  if (!filter_index->LookupFilterHashRange(start_height, stop_index,
3319  filter_hashes)) {
3321  "Failed to find block filter hashes in index: filter_type=%s, "
3322  "start_height=%d, stop_hash=%s\n",
3323  BlockFilterTypeName(filter_type), start_height,
3324  stop_hash.ToString());
3325  return;
3326  }
3327 
3328  CSerializedNetMsg msg =
3330  .Make(NetMsgType::CFHEADERS, filter_type_ser,
3331  stop_index->GetBlockHash(), prev_header, filter_hashes);
3332  connman.PushMessage(&peer, std::move(msg));
3333 }
3334 
3335 void PeerManagerImpl::ProcessGetCFCheckPt(CNode &peer, CDataStream &vRecv,
3336  const CChainParams &chain_params,
3337  CConnman &connman) {
3338  uint8_t filter_type_ser;
3339  BlockHash stop_hash;
3340 
3341  vRecv >> filter_type_ser >> stop_hash;
3342 
3343  const BlockFilterType filter_type =
3344  static_cast<BlockFilterType>(filter_type_ser);
3345 
3346  const CBlockIndex *stop_index;
3347  BlockFilterIndex *filter_index;
3348  if (!PrepareBlockFilterRequest(
3349  peer, chain_params, filter_type, /*start_height=*/0, stop_hash,
3350  /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
3351  stop_index, filter_index)) {
3352  return;
3353  }
3354 
3355  std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
3356 
3357  // Populate headers.
3358  const CBlockIndex *block_index = stop_index;
3359  for (int i = headers.size() - 1; i >= 0; i--) {
3360  int height = (i + 1) * CFCHECKPT_INTERVAL;
3361  block_index = block_index->GetAncestor(height);
3362 
3363  if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
3365  "Failed to find block filter header in index: "
3366  "filter_type=%s, block_hash=%s\n",
3367  BlockFilterTypeName(filter_type),
3368  block_index->GetBlockHash().ToString());
3369  return;
3370  }
3371  }
3372 
3374  .Make(NetMsgType::CFCHECKPT, filter_type_ser,
3375  stop_index->GetBlockHash(), headers);
3376  connman.PushMessage(&peer, std::move(msg));
3377 }
3378 
3379 bool IsAvalancheMessageType(const std::string &msg_type) {
3380  return msg_type == NetMsgType::AVAHELLO ||
3381  msg_type == NetMsgType::AVAPOLL ||
3382  msg_type == NetMsgType::AVARESPONSE ||
3383  msg_type == NetMsgType::AVAPROOF ||
3384  msg_type == NetMsgType::GETAVAADDR ||
3385  msg_type == NetMsgType::GETAVAPROOFS ||
3386  msg_type == NetMsgType::AVAPROOFS ||
3387  msg_type == NetMsgType::AVAPROOFSREQ;
3388 }
3389 
3390 uint32_t PeerManagerImpl::GetAvalancheVoteForBlock(const BlockHash &hash) {
3392 
3393  const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
3394 
3395  // Unknown block.
3396  if (!pindex) {
3397  return -1;
3398  }
3399 
3400  // Invalid block
3401  if (pindex->nStatus.isInvalid()) {
3402  return 1;
3403  }
3404 
3405  // Parked block
3406  if (pindex->nStatus.isOnParkedChain()) {
3407  return 2;
3408  }
3409 
3410  const CBlockIndex *pindexTip = m_chainman.ActiveChain().Tip();
3411  const CBlockIndex *pindexFork = LastCommonAncestor(pindex, pindexTip);
3412 
3413  // Active block.
3414  if (pindex == pindexFork) {
3415  return 0;
3416  }
3417 
3418  // Fork block.
3419  if (pindexFork != pindexTip) {
3420  return 3;
3421  }
3422 
3423  // Missing block data.
3424  if (!pindex->nStatus.hasData()) {
3425  return -2;
3426  }
3427 
3428  // This block is built on top of the tip, we have the data, it
3429  // is pending connection or rejection.
3430  return -3;
3431 };
3432 
3443 static uint32_t getAvalancheVoteForTx(CTxMemPool &mempool, const TxId &id) {
3444  return -1;
3445 };
3446 
3453 static uint32_t getAvalancheVoteForProof(const avalanche::ProofId &id) {
3455 
3456  // Rejected proof
3457  if (WITH_LOCK(cs_invalidProofs, return invalidProofs->contains(id))) {
3458  return 1;
3459  }
3460 
3461  return g_avalanche->withPeerManager([&id](avalanche::PeerManager &pm) {
3462  // The proof is actively bound to a peer
3463  if (pm.isBoundToPeer(id)) {
3464  return 0;
3465  }
3466 
3467  // Unknown proof
3468  if (!pm.exists(id)) {
3469  return -1;
3470  }
3471 
3472  // Immature proof
3473  if (pm.isImmature(id)) {
3474  return 2;
3475  }
3476 
3477  // Not immature, but in conflict with an actively bound proof
3478  if (pm.isInConflictingPool(id)) {
3479  return 3;
3480  }
3481 
3482  // The proof is known, not rejected, not immature, not a conflict, but
3483  // for some reason unbound. This should not happen if the above pools
3484  // are managed correctly, but added for robustness.
3485  return -2;
3486  });
3487 };
3488 
3489 void PeerManagerImpl::ProcessBlock(const Config &config, CNode &node,
3490  const std::shared_ptr<const CBlock> &block,
3491  bool force_processing) {
3492  bool new_block{false};
3493  m_chainman.ProcessNewBlock(config, block, force_processing, &new_block);
3494  if (new_block) {
3495  node.m_last_block_time = GetTime<std::chrono::seconds>();
3496  } else {
3497  LOCK(cs_main);
3498  mapBlockSource.erase(block->GetHash());
3499  }
3500 }
3501 
3502 void PeerManagerImpl::ProcessMessage(
3503  const Config &config, CNode &pfrom, const std::string &msg_type,
3504  CDataStream &vRecv, const std::chrono::microseconds time_received,
3505  const std::atomic<bool> &interruptMsgProc) {
3506  LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n",
3507  SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
3508 
3509  PeerRef peer = GetPeerRef(pfrom.GetId());
3510  if (peer == nullptr) {
3511  return;
3512  }
3513 
3514  if (IsAvalancheMessageType(msg_type)) {
3515  if (!g_avalanche) {
3517  "Avalanche is not initialized, ignoring %s message\n",
3518  msg_type);
3519  return;
3520  }
3521 
3522  if (!isAvalancheEnabled(gArgs)) {
3523  // If avalanche is not enabled, ignore avalanche messages
3524  return;
3525  }
3526  }
3527 
3528  if (msg_type == NetMsgType::VERSION) {
3529  // Each connection can only send one version message
3530  if (pfrom.nVersion != 0) {
3531  Misbehaving(pfrom, 1, "redundant version message");
3532  return;
3533  }
3534 
3535  int64_t nTime;
3536  CAddress addrMe;
3537  CAddress addrFrom;
3538  uint64_t nNonce = 1;
3539  uint64_t nServiceInt;
3540  ServiceFlags nServices;
3541  int nVersion;
3542  std::string cleanSubVer;
3543  int starting_height = -1;
3544  bool fRelay = true;
3545  uint64_t nExtraEntropy = 1;
3546 
3547  vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
3548  if (nTime < 0) {
3549  nTime = 0;
3550  }
3551  nServices = ServiceFlags(nServiceInt);
3552  if (!pfrom.IsInboundConn()) {
3553  m_addrman.SetServices(pfrom.addr, nServices);
3554  }
3555  if (pfrom.ExpectServicesFromConn() &&
3556  !HasAllDesirableServiceFlags(nServices)) {
3558  "peer=%d does not offer the expected services "
3559  "(%08x offered, %08x expected); disconnecting\n",
3560  pfrom.GetId(), nServices,
3561  GetDesirableServiceFlags(nServices));
3562  pfrom.fDisconnect = true;
3563  return;
3564  }
3565 
3566  if (pfrom.IsAvalancheOutboundConnection() &&
3567  !(nServices & NODE_AVALANCHE)) {
3568  LogPrint(
3570  "peer=%d does not offer the avalanche service; disconnecting\n",
3571  pfrom.GetId());
3572  pfrom.fDisconnect = true;
3573  return;
3574  }
3575 
3576  if (nVersion < MIN_PEER_PROTO_VERSION) {
3577  // disconnect from peers older than this proto version
3579  "peer=%d using obsolete version %i; disconnecting\n",
3580  pfrom.GetId(), nVersion);
3581  pfrom.fDisconnect = true;
3582  return;
3583  }
3584 
3585  if (!vRecv.empty()) {
3586  vRecv >> addrFrom >> nNonce;
3587  }
3588  if (!vRecv.empty()) {
3589  std::string strSubVer;
3590  vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
3591  cleanSubVer = SanitizeString(strSubVer);
3592  }
3593  if (!vRecv.empty()) {
3594  vRecv >> starting_height;
3595  }
3596  if (!vRecv.empty()) {
3597  vRecv >> fRelay;
3598  }
3599  if (!vRecv.empty()) {
3600  vRecv >> nExtraEntropy;
3601  }
3602  // Disconnect if we connected to ourself
3603  if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce)) {
3604  LogPrintf("connected to self at %s, disconnecting\n",
3605  pfrom.addr.ToString());
3606  pfrom.fDisconnect = true;
3607  return;
3608  }
3609 
3610  if (pfrom.IsInboundConn() && addrMe.IsRoutable()) {
3611  SeenLocal(addrMe);
3612  }
3613 
3614  // Inbound peers send us their version message when they connect.
3615  // We send our version message in response.
3616  if (pfrom.IsInboundConn()) {
3617  PushNodeVersion(config, pfrom, GetAdjustedTime());
3618  }
3619 
3620  // Change version
3621  const int greatest_common_version =
3622  std::min(nVersion, PROTOCOL_VERSION);
3623  pfrom.SetCommonVersion(greatest_common_version);
3624  pfrom.nVersion = nVersion;
3625 
3626  const CNetMsgMaker msg_maker(greatest_common_version);
3627 
3628  m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::VERACK));
3629 
3630  // Signal ADDRv2 support (BIP155).
3631  m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDADDRV2));
3632 
3633  pfrom.nServices = nServices;
3634  pfrom.SetAddrLocal(addrMe);
3635  {
3636  LOCK(pfrom.cs_SubVer);
3637  pfrom.cleanSubVer = cleanSubVer;
3638  }
3639  peer->m_starting_height = starting_height;
3640 
3641  // set nodes not relaying blocks and tx and not serving (parts) of the
3642  // historical blockchain as "clients"
3643  pfrom.fClient = (!(nServices & NODE_NETWORK) &&
3644  !(nServices & NODE_NETWORK_LIMITED));
3645 
3646  // set nodes not capable of serving the complete blockchain history as
3647  // "limited nodes"
3648  pfrom.m_limited_node =
3649  (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED));
3650 
3651  if (pfrom.m_tx_relay != nullptr) {
3652  LOCK(pfrom.m_tx_relay->cs_filter);
3653  // set to true after we get the first filter* message
3654  pfrom.m_tx_relay->fRelayTxes = fRelay;
3655  }
3656 
3657  pfrom.nRemoteHostNonce = nNonce;
3658  pfrom.nRemoteExtraEntropy = nExtraEntropy;
3659 
3660  // Potentially mark this peer as a preferred download peer.
3661  {
3662  LOCK(cs_main);
3663  UpdatePreferredDownload(pfrom, State(pfrom.GetId()));
3664  }
3665 
3666  // Self advertisement & GETADDR logic
3667  if (!pfrom.IsInboundConn() && SetupAddressRelay(pfrom, *peer)) {
3668  // For outbound peers, we try to relay our address (so that other
3669  // nodes can try to find us more quickly, as we have no guarantee
3670  // that an outbound peer is even aware of how to reach us) and do a
3671  // one-time address fetch (to help populate/update our addrman). If
3672  // we're starting up for the first time, our addrman may be pretty
3673  // empty and no one will know who we are, so these mechanisms are
3674  // important to help us connect to the network.
3675  //
3676  // We skip this for block-relay-only peers. We want to avoid
3677  // potentially leaking addr information and we do not want to
3678  // indicate to the peer that we will participate in addr relay.
3679  if (fListen &&
3680  !m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
3681  CAddress addr =
3682  GetLocalAddress(&pfrom.addr, pfrom.GetLocalServices());
3683  FastRandomContext insecure_rand;
3684  if (addr.IsRoutable()) {
3686  "ProcessMessages: advertising address %s\n",
3687  addr.ToString());
3688  PushAddress(*peer, addr, insecure_rand);
3689  } else if (IsPeerAddrLocalGood(&pfrom)) {
3690  addr.SetIP(addrMe);
3692  "ProcessMessages: advertising address %s\n",
3693  addr.ToString());
3694  PushAddress(*peer, addr, insecure_rand);
3695  }
3696  }
3697 
3698  // Get recent addresses
3699  m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version)
3700  .Make(NetMsgType::GETADDR));
3701  peer->m_getaddr_sent = true;
3702  // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND
3703  // addresses in response (bypassing the
3704  // MAX_ADDR_PROCESSING_TOKEN_BUCKET limit).
3705  WITH_LOCK(peer->m_addr_token_bucket_mutex,
3706  peer->m_addr_token_bucket += GetMaxAddrToSend());
3707  }
3708 
3709  if (!pfrom.IsInboundConn()) {
3710  // For non-inbound connections, we update the addrman to record
3711  // connection success so that addrman will have an up-to-date
3712  // notion of which peers are online and available.
3713  //
3714  // While we strive to not leak information about block-relay-only
3715  // connections via the addrman, not moving an address to the tried
3716  // table is also potentially detrimental because new-table entries
3717  // are subject to eviction in the event of addrman collisions. We
3718  // mitigate the information-leak by never calling
3719  // AddrMan::Connected() on block-relay-only peers; see
3720  // FinalizeNode().
3721  //
3722  // This moves an address from New to Tried table in Addrman,
3723  // resolves tried-table collisions, etc.
3724  m_addrman.Good(pfrom.addr);
3725  }
3726 
3727  std::string remoteAddr;
3728  if (fLogIPs) {
3729  remoteAddr = ", peeraddr=" + pfrom.addr.ToString();
3730  }
3731 
3733  "receive version message: [%s] %s: version %d, blocks=%d, "
3734  "us=%s, txrelay=%d, peer=%d%s\n",
3735  pfrom.addr.ToString(), cleanSubVer, pfrom.nVersion,
3736  peer->m_starting_height, addrMe.ToString(), fRelay,
3737  pfrom.GetId(), remoteAddr);
3738 
3739  // Ignore time offsets that are improbable (before the Genesis block)
3740  // and may underflow the nTimeOffset calculation.
3741  int64_t currentTime = GetTime();
3742  if (nTime >= int64_t(m_chainparams.GenesisBlock().nTime)) {
3743  int64_t nTimeOffset = nTime - currentTime;
3744  pfrom.nTimeOffset = nTimeOffset;
3745  AddTimeData(pfrom.addr, nTimeOffset);
3746  } else {
3747  Misbehaving(pfrom, 20,
3748  "Ignoring invalid timestamp in version message");
3749  }
3750 
3751  // Feeler connections exist only to verify if address is online.
3752  if (pfrom.IsFeelerConn()) {
3754  "feeler connection completed peer=%d; disconnecting\n",
3755  pfrom.GetId());
3756  pfrom.fDisconnect = true;
3757  }
3758  return;
3759  }
3760 
3761  if (pfrom.nVersion == 0) {
3762  // Must have a version message before anything else
3763  Misbehaving(pfrom, 10, "non-version message before version handshake");
3764  return;
3765  }
3766 
3767  // At this point, the outgoing message serialization version can't change.
3768  const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
3769 
3770  if (msg_type == NetMsgType::VERACK) {
3771  if (pfrom.fSuccessfullyConnected) {
3773  "ignoring redundant verack message from peer=%d\n",
3774  pfrom.GetId());
3775  return;
3776  }
3777 
3778  if (!pfrom.IsInboundConn()) {
3779  LogPrintf(
3780  "New outbound peer connected: version: %d, blocks=%d, "
3781  "peer=%d%s (%s)\n",
3782  pfrom.nVersion.load(), peer->m_starting_height, pfrom.GetId(),
3783  (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToString())
3784  : ""),
3785  pfrom.ConnectionTypeAsString());
3786  }
3787 
3788  if (pfrom.GetCommonVersion() >= SENDHEADERS_VERSION) {
3789  // Tell our peer we prefer to receive headers rather than inv's
3790  // We send this to non-NODE NETWORK peers as well, because even
3791  // non-NODE NETWORK peers can announce blocks (such as pruning
3792  // nodes)
3793  m_connman.PushMessage(&pfrom,
3794  msgMaker.Make(NetMsgType::SENDHEADERS));
3795  }
3796 
3797  if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) {
3798  // Tell our peer we are willing to provide version 1 or 2
3799  // cmpctblocks. However, we do not request new block announcements
3800  // using cmpctblock messages. We send this to non-NODE NETWORK peers
3801  // as well, because they may wish to request compact blocks from us.
3802  bool fAnnounceUsingCMPCTBLOCK = false;
3803  uint64_t nCMPCTBLOCKVersion = 1;
3804  m_connman.PushMessage(&pfrom,
3805  msgMaker.Make(NetMsgType::SENDCMPCT,
3806  fAnnounceUsingCMPCTBLOCK,
3807  nCMPCTBLOCKVersion));
3808  }
3809 
3811  if (g_avalanche->sendHello(&pfrom)) {
3812  LogPrint(BCLog::AVALANCHE, "Send avahello to peer %d\n",
3813  pfrom.GetId());
3814 
3815  auto localProof = g_avalanche->getLocalProof();
3816 
3817  if (localProof) {
3818  // Add our proof id to the list or the recently announced
3819  // proof INVs to this peer. This is used for filtering which
3820  // INV can be requested for download.
3821  LOCK(cs_main);
3822  State(pfrom.GetId())
3823  ->m_recently_announced_proofs.insert(
3824  localProof->getId());
3825  }
3826  }
3827  }
3828 
3829  pfrom.fSuccessfullyConnected = true;
3830  return;
3831  }
3832 
3833  if (!pfrom.fSuccessfullyConnected) {
3834  // Must have a verack message before anything else
3835  Misbehaving(pfrom, 10, "non-verack message before version handshake");
3836  return;
3837  }
3838 
3839  if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) {
3840  int stream_version = vRecv.GetVersion();
3841  if (msg_type == NetMsgType::ADDRV2) {
3842  // Add ADDRV2_FORMAT to the version so that the CNetAddr and
3843  // CAddress unserialize methods know that an address in v2 format is
3844  // coming.
3845  stream_version |= ADDRV2_FORMAT;
3846  }
3847 
3848  OverrideStream<CDataStream> s(&vRecv, vRecv.GetType(), stream_version);
3849  std::vector<CAddress> vAddr;
3850 
3851  s >> vAddr;
3852 
3853  if (!SetupAddressRelay(pfrom, *peer)) {
3854  LogPrint(BCLog::NET, "ignoring %s message from %s peer=%d\n",
3855  msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId());
3856  return;
3857  }
3858 
3859  if (vAddr.size() > GetMaxAddrToSend()) {
3860  Misbehaving(
3861  pfrom, 20,
3862  strprintf("%s message size = %u", msg_type, vAddr.size()));
3863  return;
3864  }
3865 
3866  // Store the new addresses
3867  std::vector<CAddress> vAddrOk;
3868  int64_t nNow = GetAdjustedTime();
3869  int64_t nSince = nNow - 10 * 60;
3870 
3871  // Update/increment addr rate limiting bucket.
3872  const auto current_time = GetTime<std::chrono::microseconds>();
3873  {
3874  LOCK(peer->m_addr_token_bucket_mutex);
3875  if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) {
3876  // Don't increment bucket if it's already full
3877  const auto time_diff =
3878  std::max(current_time - peer->m_addr_token_timestamp, 0us);
3879  const double increment =
3881  peer->m_addr_token_bucket =
3882  std::min<double>(peer->m_addr_token_bucket + increment,
3884  }
3885  }
3886  peer->m_addr_token_timestamp = current_time;
3887 
3888  const bool rate_limited =
3890  uint64_t num_proc = 0;
3891  uint64_t num_rate_limit = 0;
3892  Shuffle(vAddr.begin(), vAddr.end(), FastRandomContext());
3893  for (CAddress &addr : vAddr) {
3894  if (interruptMsgProc) {
3895  return;
3896  }
3897 
3898  {
3899  LOCK(peer->m_addr_token_bucket_mutex);
3900  // Apply rate limiting.
3901  if (peer->m_addr_token_bucket < 1.0) {
3902  if (rate_limited) {
3903  ++num_rate_limit;
3904  continue;
3905  }
3906  } else {
3907  peer->m_addr_token_bucket -= 1.0;
3908  }
3909  }
3910 
3911  // We only bother storing full nodes, though this may include things
3912  // which we would not make an outbound connection to, in part
3913  // because we may make feeler connections to them.
3914  if (!MayHaveUsefulAddressDB(addr.nServices) &&
3916  continue;
3917  }
3918 
3919  if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60) {
3920  addr.nTime = nNow - 5 * 24 * 60 * 60;
3921  }
3922  AddAddressKnown(*peer, addr);
3923  if (m_banman &&
3924  (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
3925  // Do not process banned/discouraged addresses beyond
3926  // remembering we received them
3927  continue;
3928  }
3929  ++num_proc;
3930  bool fReachable = IsReachable(addr);
3931  if (addr.nTime > nSince && !peer->m_getaddr_sent &&
3932  vAddr.size() <= 10 && addr.IsRoutable()) {
3933  // Relay to a limited number of other nodes
3934  RelayAddress(pfrom.GetId(), addr, fReachable);
3935  }
3936  // Do not store addresses outside our network
3937  if (fReachable) {
3938  vAddrOk.push_back(addr);
3939  }
3940  }
3941  peer->m_addr_processed += num_proc;
3942  peer->m_addr_rate_limited += num_rate_limit;
3944  "Received addr: %u addresses (%u processed, %u rate-limited) "
3945  "from peer=%d\n",
3946  vAddr.size(), num_proc, num_rate_limit, pfrom.GetId());
3947 
3948  m_addrman.Add(vAddrOk, pfrom.addr, 2 * 60 * 60);
3949  if (vAddr.size() < 1000) {
3950  peer->m_getaddr_sent = false;
3951  }
3952 
3953  // AddrFetch: Require multiple addresses to avoid disconnecting on
3954  // self-announcements
3955  if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) {
3957  "addrfetch connection completed peer=%d; disconnecting\n",
3958  pfrom.GetId());
3959  pfrom.fDisconnect = true;
3960  }
3961  return;
3962  }
3963 
3964  if (msg_type == NetMsgType::SENDADDRV2) {
3965  peer->m_wants_addrv2 = true;
3966  return;
3967  }
3968 
3969  if (msg_type == NetMsgType::SENDHEADERS) {
3970  LOCK(cs_main);
3971  State(pfrom.GetId())->fPreferHeaders = true;
3972  return;
3973  }
3974 
3975  if (msg_type == NetMsgType::SENDCMPCT) {
3976  bool fAnnounceUsingCMPCTBLOCK = false;
3977  uint64_t nCMPCTBLOCKVersion = 0;
3978  vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
3979  if (nCMPCTBLOCKVersion == 1) {
3980  LOCK(cs_main);
3981  // fProvidesHeaderAndIDs is used to "lock in" version of compact
3982  // blocks we send.
3983  if (!State(pfrom.GetId())->fProvidesHeaderAndIDs) {
3984  State(pfrom.GetId())->fProvidesHeaderAndIDs = true;
3985  }
3986 
3987  State(pfrom.GetId())->fPreferHeaderAndIDs =
3988  fAnnounceUsingCMPCTBLOCK;
3989  // save whether peer selects us as BIP152 high-bandwidth peer
3990  // (receiving sendcmpct(1) signals high-bandwidth,
3991  // sendcmpct(0) low-bandwidth)
3992  pfrom.m_bip152_highbandwidth_from = fAnnounceUsingCMPCTBLOCK;
3993  if (!State(pfrom.GetId())->fSupportsDesiredCmpctVersion) {
3994  State(pfrom.GetId())->fSupportsDesiredCmpctVersion = true;
3995  }
3996  }
3997  return;
3998  }
3999 
4000  if (msg_type == NetMsgType::INV) {
4001  std::vector<CInv> vInv;
4002  vRecv >> vInv;
4003  if (vInv.size() > MAX_INV_SZ) {
4004  Misbehaving(pfrom, 20,
4005  strprintf("inv message size = %u", vInv.size()));
4006  return;
4007  }
4008 
4009  // Reject tx INVs when the -blocksonly setting is enabled, or this is a
4010  // block-relay-only peer
4011  bool reject_tx_invs{m_ignore_incoming_txs ||
4012  (pfrom.m_tx_relay == nullptr)};
4013 
4014  // Allow peers with relay permission to send data other than blocks
4015  // in blocks only mode
4016  if (pfrom.HasPermission(PF_RELAY)) {
4017  reject_tx_invs = false;
4018  }
4019 
4020  const auto current_time = GetTime<std::chrono::microseconds>();
4021  std::optional<BlockHash> best_block;
4022 
4023  auto logInv = [&](const CInv &inv, bool fAlreadyHave) {
4024  LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(),
4025  fAlreadyHave ? "have" : "new", pfrom.GetId());
4026  };
4027 
4028  for (CInv &inv : vInv) {
4029  if (interruptMsgProc) {
4030  return;
4031  }
4032 
4033  if (inv.IsMsgBlk()) {
4034  LOCK(cs_main);
4035  const bool fAlreadyHave = AlreadyHaveBlock(BlockHash(inv.hash));
4036  logInv(inv, fAlreadyHave);
4037 
4038  const BlockHash hash{inv.hash};
4039  UpdateBlockAvailability(pfrom.GetId(), hash);
4040  if (!fAlreadyHave && !fImporting && !fReindex &&
4041  !IsBlockRequested(hash)) {
4042  // Headers-first is the primary method of announcement on
4043  // the network. If a node fell back to sending blocks by
4044  // inv, it's probably for a re-org. The final block hash
4045  // provided should be the highest, so send a getheaders and
4046  // then fetch the blocks we need to catch up.
4047  best_block = std::move(hash);
4048  }
4049 
4050  continue;
4051  }
4052 
4053  if (inv.IsMsgProof()) {
4054  const avalanche::ProofId proofid(inv.hash);
4055  const bool fAlreadyHave = AlreadyHaveProof(proofid);
4056  logInv(inv, fAlreadyHave);
4057  pfrom.AddKnownProof(proofid);
4058 
4059  if (!fAlreadyHave && g_avalanche && isAvalancheEnabled(gArgs) &&
4060  !m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
4061  const bool preferred = isPreferredDownloadPeer(pfrom);
4062 
4063  LOCK(cs_proofrequest);
4064  AddProofAnnouncement(pfrom, proofid, current_time,
4065  preferred);
4066  }
4067  continue;
4068  }
4069 
4070  if (inv.IsMsgTx()) {
4071  LOCK(cs_main);
4072  const TxId txid(inv.hash);
4073  const bool fAlreadyHave = AlreadyHaveTx(txid);
4074  logInv(inv, fAlreadyHave);
4075 
4076  pfrom.AddKnownTx(txid);
4077  if (reject_tx_invs) {
4079  "transaction (%s) inv sent in violation of "
4080  "protocol, disconnecting peer=%d\n",
4081  txid.ToString(), pfrom.GetId());
4082  pfrom.fDisconnect = true;
4083  return;
4084  } else if (!fAlreadyHave && !m_chainman.ActiveChainstate()
4086  AddTxAnnouncement(pfrom, txid, current_time);
4087  }
4088 
4089  continue;
4090  }
4091 
4093  "Unknown inv type \"%s\" received from peer=%d\n",
4094  inv.ToString(), pfrom.GetId());
4095  }
4096 
4097  if (best_block) {
4098  m_connman.PushMessage(
4099  &pfrom, msgMaker.Make(NetMsgType::GETHEADERS,
4100  m_chainman.ActiveChain().GetLocator(
4102  *best_block));
4103  LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
4104  pindexBestHeader->nHeight, best_block->ToString(),
4105  pfrom.GetId());
4106  }
4107 
4108  return;
4109  }
4110 
4111  if (msg_type == NetMsgType::GETDATA) {
4112  std::vector<CInv> vInv;
4113  vRecv >> vInv;
4114  if (vInv.size() > MAX_INV_SZ) {
4115  Misbehaving(pfrom, 20,
4116  strprintf("getdata message size = %u", vInv.size()));
4117  return;
4118  }
4119 
4120  LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n",
4121  vInv.size(), pfrom.GetId());
4122 
4123  if (vInv.size() > 0) {
4124  LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n",
4125  vInv[0].ToString(), pfrom.GetId());
4126  }
4127 
4128  {
4129  LOCK(peer->m_getdata_requests_mutex);
4130  peer->m_getdata_requests.insert(peer->m_getdata_requests.end(),
4131  vInv.begin(), vInv.end());
4132  ProcessGetData(config, pfrom, *peer, interruptMsgProc);
4133  }
4134 
4135  return;
4136  }
4137 
4138  if (msg_type == NetMsgType::GETBLOCKS) {
4139  CBlockLocator locator;
4140  uint256 hashStop;
4141  vRecv >> locator >> hashStop;
4142 
4143  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
4145  "getblocks locator size %lld > %d, disconnect peer=%d\n",
4146  locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
4147  pfrom.fDisconnect = true;
4148  return;
4149  }
4150 
4151  // We might have announced the currently-being-connected tip using a
4152  // compact block, which resulted in the peer sending a getblocks
4153  // request, which we would otherwise respond to without the new block.
4154  // To avoid this situation we simply verify that we are on our best
4155  // known chain now. This is super overkill, but we handle it better
4156  // for getheaders requests, and there are no known nodes which support
4157  // compact blocks but still use getblocks to request blocks.
4158  {
4159  std::shared_ptr<const CBlock> a_recent_block;
4160  {
4162  a_recent_block = most_recent_block;
4163  }
4164  BlockValidationState state;
4165  if (!m_chainman.ActiveChainstate().ActivateBestChain(
4166  config, state, a_recent_block)) {
4167  LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
4168  state.ToString());
4169  }
4170  }
4171 
4172  LOCK(cs_main);
4173 
4174  // Find the last block the caller has in the main chain
4175  const CBlockIndex *pindex =
4176  m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
4177 
4178  // Send the rest of the chain
4179  if (pindex) {
4180  pindex = m_chainman.ActiveChain().Next(pindex);
4181  }
4182  int nLimit = 500;
4183  LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n",
4184  (pindex ? pindex->nHeight : -1),
4185  hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit,
4186  pfrom.GetId());
4187  for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) {
4188  if (pindex->GetBlockHash() == hashStop) {
4189  LogPrint(BCLog::NET, " getblocks stopping at %d %s\n",
4190  pindex->nHeight, pindex->GetBlockHash().ToString());
4191  break;
4192  }
4193  // If pruning, don't inv blocks unless we have on disk and are
4194  // likely to still have for some reasonable time window (1 hour)
4195  // that block relay might require.
4196  const int nPrunedBlocksLikelyToHave =
4198  3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
4199  if (fPruneMode &&
4200  (!pindex->nStatus.hasData() ||
4201  pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight -
4202  nPrunedBlocksLikelyToHave)) {
4203  LogPrint(
4204  BCLog::NET,
4205  " getblocks stopping, pruned or too old block at %d %s\n",
4206  pindex->nHeight, pindex->GetBlockHash().ToString());
4207  break;
4208  }
4209  WITH_LOCK(
4210  peer->m_block_inv_mutex,
4211  peer->m_blocks_for_inv_relay.push_back(pindex->GetBlockHash()));
4212  if (--nLimit <= 0) {
4213  // When this block is requested, we'll send an inv that'll
4214  // trigger the peer to getblocks the next batch of inventory.
4215  LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n",
4216  pindex->nHeight, pindex->GetBlockHash().ToString());
4217  WITH_LOCK(peer->m_block_inv_mutex, {
4218  peer->m_continuation_block = pindex->GetBlockHash();
4219  });
4220  break;
4221  }
4222  }
4223  return;
4224  }
4225 
4226  if (msg_type == NetMsgType::GETBLOCKTXN) {
4228  vRecv >> req;
4229 
4230  std::shared_ptr<const CBlock> recent_block;
4231  {
4233  if (most_recent_block_hash == req.blockhash) {
4234  recent_block = most_recent_block;
4235  }
4236  // Unlock cs_most_recent_block to avoid cs_main lock inversion
4237  }
4238  if (recent_block) {
4239  SendBlockTransactions(pfrom, *recent_block, req);
4240  return;
4241  }
4242 
4243  {
4244  LOCK(cs_main);
4245 
4246  const CBlockIndex *pindex =
4247  m_chainman.m_blockman.LookupBlockIndex(req.blockhash);
4248  if (!pindex || !pindex->nStatus.hasData()) {
4249  LogPrint(
4250  BCLog::NET,
4251  "Peer %d sent us a getblocktxn for a block we don't have\n",
4252  pfrom.GetId());
4253  return;
4254  }
4255 
4256  if (pindex->nHeight >=
4257  m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) {
4258  CBlock block;
4259  bool ret = ReadBlockFromDisk(block, pindex,
4260  m_chainparams.GetConsensus());
4261  assert(ret);
4262 
4263  SendBlockTransactions(pfrom, block, req);
4264  return;
4265  }
4266  }
4267 
4268  // If an older block is requested (should never happen in practice,
4269  // but can happen in tests) send a block response instead of a
4270  // blocktxn response. Sending a full block response instead of a
4271  // small blocktxn response is preferable in the case where a peer
4272  // might maliciously send lots of getblocktxn requests to trigger
4273  // expensive disk reads, because it will require the peer to
4274  // actually receive all the data read from disk over the network.
4276  "Peer %d sent us a getblocktxn for a block > %i deep\n",
4277  pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
4278  CInv inv;
4279  inv.type = MSG_BLOCK;
4280  inv.hash = req.blockhash;
4281  WITH_LOCK(peer->m_getdata_requests_mutex,
4282  peer->m_getdata_requests.push_back(inv));
4283  // The message processing loop will go around again (without pausing)
4284  // and we'll respond then (without cs_main)
4285  return;
4286  }
4287 
4288  if (msg_type == NetMsgType::GETHEADERS) {
4289  CBlockLocator locator;
4290  BlockHash hashStop;
4291  vRecv >> locator >> hashStop;
4292 
4293  if (locator.vHave.size() > MAX_LOCATOR_SZ) {
4295  "getheaders locator size %lld > %d, disconnect peer=%d\n",
4296  locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
4297  pfrom.fDisconnect = true;
4298  return;
4299  }
4300 
4301  LOCK(cs_main);
4302  if (m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
4303  !pfrom.HasPermission(PF_DOWNLOAD)) {
4305  "Ignoring getheaders from peer=%d because node is in "
4306  "initial block download\n",
4307  pfrom.GetId());
4308  return;
4309  }
4310 
4311  CNodeState *nodestate = State(pfrom.GetId());
4312  const CBlockIndex *pindex = nullptr;
4313  if (locator.IsNull()) {
4314  // If locator is null, return the hashStop block
4315  pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop);
4316  if (!pindex) {
4317  return;
4318  }
4319 
4320  if (!BlockRequestAllowed(pindex, m_chainparams.GetConsensus())) {
4322  "%s: ignoring request from peer=%i for old block "
4323  "header that isn't in the main chain\n",
4324  __func__, pfrom.GetId());
4325  return;
4326  }
4327  } else {
4328  // Find the last block the caller has in the main chain
4329  pindex =
4330  m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
4331  if (pindex) {
4332  pindex = m_chainman.ActiveChain().Next(pindex);
4333  }
4334  }
4335 
4336  // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx
4337  // count at the end
4338  std::vector<CBlock> vHeaders;
4339  int nLimit = MAX_HEADERS_RESULTS;
4340  LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n",
4341  (pindex ? pindex->nHeight : -1),
4342  hashStop.IsNull() ? "end" : hashStop.ToString(),
4343  pfrom.GetId());
4344  for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) {
4345  vHeaders.push_back(pindex->GetBlockHeader());
4346  if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) {
4347  break;
4348  }
4349  }
4350  // pindex can be nullptr either if we sent
4351  // m_chainman.ActiveChain().Tip() OR if our peer has
4352  // m_chainman.ActiveChain().Tip() (and thus we are sending an empty
4353  // headers message). In both cases it's safe to update
4354  // pindexBestHeaderSent to be our tip.
4355  //
4356  // It is important that we simply reset the BestHeaderSent value here,
4357  // and not max(BestHeaderSent, newHeaderSent). We might have announced
4358  // the currently-being-connected tip using a compact block, which
4359  // resulted in the peer sending a headers request, which we respond to
4360  // without the new block. By resetting the BestHeaderSent, we ensure we
4361  // will re-announce the new block via headers (or compact blocks again)
4362  // in the SendMessages logic.
4363  nodestate->pindexBestHeaderSent =
4364  pindex ? pindex : m_chainman.ActiveChain().Tip();
4365  m_connman.PushMessage(&pfrom,
4366  msgMaker.Make(NetMsgType::HEADERS, vHeaders));
4367  return;
4368  }
4369 
4370  if (msg_type == NetMsgType::TX) {
4371  // Stop processing the transaction early if
4372  // 1) We are in blocks only mode and peer has no relay permission
4373  // 2) This peer is a block-relay-only peer
4374  if ((m_ignore_incoming_txs && !pfrom.HasPermission(PF_RELAY)) ||
4375  (pfrom.m_tx_relay == nullptr)) {
4377  "transaction sent in violation of protocol peer=%d\n",
4378  pfrom.GetId());
4379  pfrom.fDisconnect = true;
4380  return;
4381  }
4382 
4383  CTransactionRef ptx;
4384  vRecv >> ptx;
4385  const CTransaction &tx = *ptx;
4386  const TxId &txid = tx.GetId();
4387  pfrom.AddKnownTx(txid);
4388 
4390 
4391  m_txrequest.ReceivedResponse(pfrom.GetId(), txid);
4392 
4393  if (AlreadyHaveTx(txid)) {
4394  if (pfrom.HasPermission(PF_FORCERELAY)) {
4395  // Always relay transactions received from peers with
4396  // forcerelay permission, even if they were already in the
4397  // mempool, allowing the node to function as a gateway for
4398  // nodes hidden behind it.
4399  if (!m_mempool.exists(tx.GetId())) {
4400  LogPrintf("Not relaying non-mempool transaction %s from "
4401  "forcerelay peer=%d\n",
4402  tx.GetId().ToString(), pfrom.GetId());
4403  } else {
4404  LogPrintf("Force relaying tx %s from peer=%d\n",
4405  tx.GetId().ToString(), pfrom.GetId());
4406  RelayTransaction(tx.GetId());
4407  }
4408  }
4409  return;
4410  }
4411 
4412  const MempoolAcceptResult result = m_chainman.ProcessTransaction(ptx);
4413  const TxValidationState &state = result.m_state;
4414 
4416  // As this version of the transaction was acceptable, we can forget
4417  // about any requests for it.
4418  m_txrequest.ForgetInvId(tx.GetId());
4419  RelayTransaction(tx.GetId());
4420  m_orphanage.AddChildrenToWorkSet(tx, peer->m_orphan_work_set);
4421 
4422  pfrom.m_last_tx_time = GetTime<std::chrono::seconds>();
4423 
4425  "AcceptToMemoryPool: peer=%d: accepted %s "
4426  "(poolsz %u txn, %u kB)\n",
4427  pfrom.GetId(), tx.GetId().ToString(), m_mempool.size(),
4428  m_mempool.DynamicMemoryUsage() / 1000);
4429 
4430  // Recursively process any orphan transactions that depended on this
4431  // one
4432  ProcessOrphanTx(config, peer->m_orphan_work_set);
4433  } else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) {
4434  // It may be the case that the orphans parents have all been
4435  // rejected.
4436  bool fRejectedParents = false;
4437 
4438  // Deduplicate parent txids, so that we don't have to loop over
4439  // the same parent txid more than once down below.
4440  std::vector<TxId> unique_parents;
4441  unique_parents.reserve(tx.vin.size());
4442  for (const CTxIn &txin : tx.vin) {
4443  // We start with all parents, and then remove duplicates below.
4444  unique_parents.push_back(txin.prevout.GetTxId());
4445  }
4446  std::sort(unique_parents.begin(), unique_parents.end());
4447  unique_parents.erase(
4448  std::unique(unique_parents.begin(), unique_parents.end()),
4449  unique_parents.end());
4450  for (const TxId &parent_txid : unique_parents) {
4451  if (recentRejects->contains(parent_txid)) {
4452  fRejectedParents = true;
4453  break;
4454  }
4455  }
4456  if (!fRejectedParents) {
4457  const auto current_time = GetTime<std::chrono::microseconds>();
4458 
4459  for (const TxId &parent_txid : unique_parents) {
4460  // FIXME: MSG_TX should use a TxHash, not a TxId.
4461  pfrom.AddKnownTx(parent_txid);
4462  if (!AlreadyHaveTx(parent_txid)) {
4463  AddTxAnnouncement(pfrom, parent_txid, current_time);
4464  }
4465  }
4466 
4467  if (m_orphanage.AddTx(ptx, pfrom.GetId())) {
4468  AddToCompactExtraTransactions(ptx);
4469  }
4470 
4471  // Once added to the orphan pool, a tx is considered
4472  // AlreadyHave, and we shouldn't request it anymore.
4473  m_txrequest.ForgetInvId(tx.GetId());
4474 
4475  // DoS prevention: do not allow m_orphanage to grow
4476  // unbounded (see CVE-2012-3789)
4477  unsigned int nMaxOrphanTx = (unsigned int)std::max(
4478  int64_t(0),
4479  gArgs.GetIntArg("-maxorphantx",
4481  unsigned int nEvicted = m_orphanage.LimitOrphans(nMaxOrphanTx);
4482  if (nEvicted > 0) {
4484  "orphanage overflow, removed %u tx\n", nEvicted);
4485  }
4486  } else {
4488  "not keeping orphan with rejected parents %s\n",
4489  tx.GetId().ToString());
4490  // We will continue to reject this tx since it has rejected
4491  // parents so avoid re-requesting it from other peers.
4492  recentRejects->insert(tx.GetId());
4493  m_txrequest.ForgetInvId(tx.GetId());
4494  }
4495  } else {
4496  assert(recentRejects);
4497  recentRejects->insert(tx.GetId());
4498  m_txrequest.ForgetInvId(tx.GetId());
4499 
4500  if (RecursiveDynamicUsage(*ptx) < 100000) {
4501  AddToCompactExtraTransactions(ptx);
4502  }
4503  }
4504 
4505  // If a tx has been detected by recentRejects, we will have reached
4506  // this point and the tx will have been ignored. Because we haven't
4507  // submitted the tx to our mempool, we won't have computed a DoS
4508  // score for it or determined exactly why we consider it invalid.
4509  //
4510  // This means we won't penalize any peer subsequently relaying a DoSy
4511  // tx (even if we penalized the first peer who gave it to us) because
4512  // we have to account for recentRejects showing false positives. In
4513  // other words, we shouldn't penalize a peer if we aren't *sure* they
4514  // submitted a DoSy tx.
4515  //
4516  // Note that recentRejects doesn't just record DoSy or invalid
4517  // transactions, but any tx not accepted by the mempool, which may be
4518  // due to node policy (vs. consensus). So we can't blanket penalize a
4519  // peer simply for relaying a tx that our recentRejects has caught,
4520  // regardless of false positives.
4521 
4522  if (state.IsInvalid()) {
4524  "%s from peer=%d was not accepted: %s\n",
4525  tx.GetHash().ToString(), pfrom.GetId(), state.ToString());
4526  MaybePunishNodeForTx(pfrom.GetId(), state);
4527  }
4528  return;
4529  }
4530 
4531  if (msg_type == NetMsgType::CMPCTBLOCK) {
4532  // Ignore cmpctblock received while importing
4533  if (fImporting || fReindex) {
4535  "Unexpected cmpctblock message received from peer %d\n",
4536  pfrom.GetId());
4537  return;
4538  }
4539 
4540  CBlockHeaderAndShortTxIDs cmpctblock;
4541  try {
4542  vRecv >> cmpctblock;
4543  } catch (std::ios_base::failure &e) {
4544  // This block has non contiguous or overflowing indexes
4545  Misbehaving(pfrom, 100, "cmpctblock-bad-indexes");
4546  return;
4547  }
4548 
4549  bool received_new_header = false;
4550 
4551  {
4552  LOCK(cs_main);
4553 
4554  if (!m_chainman.m_blockman.LookupBlockIndex(
4555  cmpctblock.header.hashPrevBlock)) {
4556  // Doesn't connect (or is genesis), instead of DoSing in
4557  // AcceptBlockHeader, request deeper headers
4558  if (!m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
4559  m_connman.PushMessage(
4560  &pfrom,
4561  msgMaker.Make(NetMsgType::GETHEADERS,
4562  m_chainman.ActiveChain().GetLocator(
4564  uint256()));
4565  }
4566  return;
4567  }
4568 
4569  if (!m_chainman.m_blockman.LookupBlockIndex(
4570  cmpctblock.header.GetHash())) {
4571  received_new_header = true;
4572  }
4573  }
4574 
4575  const CBlockIndex *pindex = nullptr;
4576  BlockValidationState state;
4577  if (!m_chainman.ProcessNewBlockHeaders(config, {cmpctblock.header},
4578  state, &pindex)) {
4579  if (state.IsInvalid()) {
4580  MaybePunishNodeForBlock(pfrom.GetId(), state,
4581  /*via_compact_block*/ true,
4582  "invalid header via cmpctblock");
4583  return;
4584  }
4585  }
4586 
4587  // When we succeed in decoding a block's txids from a cmpctblock
4588  // message we typically jump to the BLOCKTXN handling code, with a
4589  // dummy (empty) BLOCKTXN message, to re-use the logic there in
4590  // completing processing of the putative block (without cs_main).
4591  bool fProcessBLOCKTXN = false;
4593 
4594  // If we end up treating this as a plain headers message, call that as
4595  // well
4596  // without cs_main.
4597  bool fRevertToHeaderProcessing = false;
4598 
4599  // Keep a CBlock for "optimistic" compactblock reconstructions (see
4600  // below)
4601  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4602  bool fBlockReconstructed = false;
4603 
4604  {
4606  // If AcceptBlockHeader returned true, it set pindex
4607  assert(pindex);
4608  UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
4609 
4610  CNodeState *nodestate = State(pfrom.GetId());
4611 
4612  // If this was a new header with more work than our tip, update the
4613  // peer's last block announcement time
4614  if (received_new_header &&
4615  pindex->nChainWork >
4616  m_chainman.ActiveChain().Tip()->nChainWork) {
4617  nodestate->m_last_block_announcement = GetTime();
4618  }
4619 
4620  std::map<BlockHash,
4621  std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
4622  iterator blockInFlightIt =
4623  mapBlocksInFlight.find(pindex->GetBlockHash());
4624  bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
4625 
4626  if (pindex->nStatus.hasData()) {
4627  // Nothing to do here
4628  return;
4629  }
4630 
4631  if (pindex->nChainWork <=
4632  m_chainman.ActiveChain()
4633  .Tip()
4634  ->nChainWork || // We know something better
4635  pindex->nTx != 0) {
4636  // We had this block at some point, but pruned it
4637  if (fAlreadyInFlight) {
4638  // We requested this block for some reason, but our mempool
4639  // will probably be useless so we just grab the block via
4640  // normal getdata.
4641  std::vector<CInv> vInv(1);
4642  vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
4643  m_connman.PushMessage(
4644  &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
4645  }
4646  return;
4647  }
4648 
4649  // If we're not close to tip yet, give up and let parallel block
4650  // fetch work its magic.
4651  if (!fAlreadyInFlight &&
4652  !CanDirectFetch(m_chainparams.GetConsensus())) {
4653  return;
4654  }
4655 
4656  // We want to be a bit conservative just to be extra careful about
4657  // DoS possibilities in compact block processing...
4658  if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) {
4659  if ((!fAlreadyInFlight && nodestate->nBlocksInFlight <
4661  (fAlreadyInFlight &&
4662  blockInFlightIt->second.first == pfrom.GetId())) {
4663  std::list<QueuedBlock>::iterator *queuedBlockIt = nullptr;
4664  if (!BlockRequested(config, pfrom.GetId(), *pindex,
4665  &queuedBlockIt)) {
4666  if (!(*queuedBlockIt)->partialBlock) {
4667  (*queuedBlockIt)
4668  ->partialBlock.reset(
4669  new PartiallyDownloadedBlock(config,
4670  &m_mempool));
4671  } else {
4672  // The block was already in flight using compact
4673  // blocks from the same peer.
4674  LogPrint(BCLog::NET, "Peer sent us compact block "
4675  "we were already syncing!\n");
4676  return;
4677  }
4678  }
4679 
4680  PartiallyDownloadedBlock &partialBlock =
4681  *(*queuedBlockIt)->partialBlock;
4682  ReadStatus status =
4683  partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
4684  if (status == READ_STATUS_INVALID) {
4685  // Reset in-flight state in case Misbehaving does not
4686  // result in a disconnect
4687  RemoveBlockRequest(pindex->GetBlockHash());
4688  Misbehaving(pfrom, 100, "invalid compact block");
4689  return;
4690  } else if (status == READ_STATUS_FAILED) {
4691  // Duplicate txindices, the block is now in-flight, so
4692  // just request it.
4693  std::vector<CInv> vInv(1);
4694  vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
4695  m_connman.PushMessage(
4696  &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
4697  return;
4698  }
4699 
4701  for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
4702  if (!partialBlock.IsTxAvailable(i)) {
4703  req.indices.push_back(i);
4704  }
4705  }
4706  if (req.indices.empty()) {
4707  // Dirty hack to jump to BLOCKTXN code (TODO: move
4708  // message handling into their own functions)
4709  BlockTransactions txn;
4710  txn.blockhash = cmpctblock.header.GetHash();
4711  blockTxnMsg << txn;
4712  fProcessBLOCKTXN = true;
4713  } else {
4714  req.blockhash = pindex->GetBlockHash();
4715  m_connman.PushMessage(
4716  &pfrom,
4717  msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
4718  }
4719  } else {
4720  // This block is either already in flight from a different
4721  // peer, or this peer has too many blocks outstanding to
4722  // download from. Optimistically try to reconstruct anyway
4723  // since we might be able to without any round trips.
4724  PartiallyDownloadedBlock tempBlock(config, &m_mempool);
4725  ReadStatus status =
4726  tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
4727  if (status != READ_STATUS_OK) {
4728  // TODO: don't ignore failures
4729  return;
4730  }
4731  std::vector<CTransactionRef> dummy;
4732  status = tempBlock.FillBlock(*pblock, dummy);
4733  if (status == READ_STATUS_OK) {
4734  fBlockReconstructed = true;
4735  }
4736  }
4737  } else {
4738  if (fAlreadyInFlight) {
4739  // We requested this block, but its far into the future, so
4740  // our mempool will probably be useless - request the block
4741  // normally.
4742  std::vector<CInv> vInv(1);
4743  vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
4744  m_connman.PushMessage(
4745  &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
4746  return;
4747  } else {
4748  // If this was an announce-cmpctblock, we want the same
4749  // treatment as a header message.
4750  fRevertToHeaderProcessing = true;
4751  }
4752  }
4753  } // cs_main
4754 
4755  if (fProcessBLOCKTXN) {
4756  return ProcessMessage(config, pfrom, NetMsgType::BLOCKTXN,
4757  blockTxnMsg, time_received, interruptMsgProc);
4758  }
4759 
4760  if (fRevertToHeaderProcessing) {
4761  // Headers received from HB compact block peers are permitted to be
4762  // relayed before full validation (see BIP 152), so we don't want to
4763  // disconnect the peer if the header turns out to be for an invalid
4764  // block. Note that if a peer tries to build on an invalid chain,
4765  // that will be detected and the peer will be banned.
4766  return ProcessHeadersMessage(config, pfrom, *peer,
4767  {cmpctblock.header},
4768  /*via_compact_block=*/true);
4769  }
4770 
4771  if (fBlockReconstructed) {
4772  // If we got here, we were able to optimistically reconstruct a
4773  // block that is in flight from some other peer.
4774  {
4775  LOCK(cs_main);
4776  mapBlockSource.emplace(pblock->GetHash(),
4777  std::make_pair(pfrom.GetId(), false));
4778  }
4779  // Setting force_processing to true means that we bypass some of
4780  // our anti-DoS protections in AcceptBlock, which filters
4781  // unrequested blocks that might be trying to waste our resources
4782  // (eg disk space). Because we only try to reconstruct blocks when
4783  // we're close to caught up (via the CanDirectFetch() requirement
4784  // above, combined with the behavior of not requesting blocks until
4785  // we have a chain with at least nMinimumChainWork), and we ignore
4786  // compact blocks with less work than our tip, it is safe to treat
4787  // reconstructed compact blocks as having been requested.
4788  ProcessBlock(config, pfrom, pblock, /*force_processing=*/true);
4789  // hold cs_main for CBlockIndex::IsValid()
4790  LOCK(cs_main);
4791  if (pindex->IsValid(BlockValidity::TRANSACTIONS)) {
4792  // Clear download state for this block, which is in process from
4793  // some other peer. We do this after calling. ProcessNewBlock so
4794  // that a malleated cmpctblock announcement can't be used to
4795  // interfere with block relay.
4796  RemoveBlockRequest(pblock->GetHash());
4797  }
4798  }
4799  return;
4800  }
4801 
4802  if (msg_type == NetMsgType::BLOCKTXN) {
4803  // Ignore blocktxn received while importing
4804  if (fImporting || fReindex) {
4806  "Unexpected blocktxn message received from peer %d\n",
4807  pfrom.GetId());
4808  return;
4809  }
4810 
4811  BlockTransactions resp;
4812  vRecv >> resp;
4813 
4814  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4815  bool fBlockRead = false;
4816  {
4817  LOCK(cs_main);
4818 
4819  std::map<BlockHash,
4820  std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
4821  iterator it = mapBlocksInFlight.find(resp.blockhash);
4822  if (it == mapBlocksInFlight.end() ||
4823  !it->second.second->partialBlock ||
4824  it->second.first != pfrom.GetId()) {
4826  "Peer %d sent us block transactions for block "
4827  "we weren't expecting\n",
4828  pfrom.GetId());
4829  return;
4830  }
4831 
4832  PartiallyDownloadedBlock &partialBlock =
4833  *it->second.second->partialBlock;
4834  ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
4835  if (status == READ_STATUS_INVALID) {
4836  // Reset in-flight state in case of Misbehaving does not
4837  // result in a disconnect.
4838  RemoveBlockRequest(resp.blockhash);
4839  Misbehaving(
4840  pfrom, 100,
4841  "invalid compact block/non-matching block transactions");
4842  return;
4843  } else if (status == READ_STATUS_FAILED) {
4844  // Might have collided, fall back to getdata now :(
4845  std::vector<CInv> invs;
4846  invs.push_back(CInv(MSG_BLOCK, resp.blockhash));
4847  m_connman.PushMessage(&pfrom,
4848  msgMaker.Make(NetMsgType::GETDATA, invs));
4849  } else {
4850  // Block is either okay, or possibly we received
4851  // READ_STATUS_CHECKBLOCK_FAILED.
4852  // Note that CheckBlock can only fail for one of a few reasons:
4853  // 1. bad-proof-of-work (impossible here, because we've already
4854  // accepted the header)
4855  // 2. merkleroot doesn't match the transactions given (already
4856  // caught in FillBlock with READ_STATUS_FAILED, so
4857  // impossible here)
4858  // 3. the block is otherwise invalid (eg invalid coinbase,
4859  // block is too big, too many sigChecks, etc).
4860  // So if CheckBlock failed, #3 is the only possibility.
4861  // Under BIP 152, we don't DoS-ban unless proof of work is
4862  // invalid (we don't require all the stateless checks to have
4863  // been run). This is handled below, so just treat this as
4864  // though the block was successfully read, and rely on the
4865  // handling in ProcessNewBlock to ensure the block index is
4866  // updated, etc.
4867 
4868  // it is now an empty pointer
4869  RemoveBlockRequest(resp.blockhash);
4870  fBlockRead = true;
4871  // mapBlockSource is used for potentially punishing peers and
4872  // updating which peers send us compact blocks, so the race
4873  // between here and cs_main in ProcessNewBlock is fine.
4874  // BIP 152 permits peers to relay compact blocks after
4875  // validating the header only; we should not punish peers
4876  // if the block turns out to be invalid.
4877  mapBlockSource.emplace(resp.blockhash,
4878  std::make_pair(pfrom.GetId(), false));
4879  }
4880  } // Don't hold cs_main when we call into ProcessNewBlock
4881  if (fBlockRead) {
4882  // Since we requested this block (it was in mapBlocksInFlight),
4883  // force it to be processed, even if it would not be a candidate for
4884  // new tip (missing previous block, chain not long enough, etc)
4885  // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
4886  // disk-space attacks), but this should be safe due to the
4887  // protections in the compact block handler -- see related comment
4888  // in compact block optimistic reconstruction handling.
4889  ProcessBlock(config, pfrom, pblock, /*force_processing=*/true);
4890  }
4891  return;
4892  }
4893 
4894  if (msg_type == NetMsgType::HEADERS) {
4895  // Ignore headers received while importing
4896  if (fImporting || fReindex) {
4898  "Unexpected headers message received from peer %d\n",
4899  pfrom.GetId());
4900  return;
4901  }
4902 
4903  std::vector<CBlockHeader> headers;
4904 
4905  // Bypass the normal CBlock deserialization, as we don't want to risk
4906  // deserializing 2000 full blocks.
4907  unsigned int nCount = ReadCompactSize(vRecv);
4908  if (nCount > MAX_HEADERS_RESULTS) {
4909  Misbehaving(pfrom, 20,
4910  strprintf("too-many-headers: headers message size = %u",
4911  nCount));
4912  return;
4913  }
4914  headers.resize(nCount);
4915  for (unsigned int n = 0; n < nCount; n++) {
4916  vRecv >> headers[n];
4917  // Ignore tx count; assume it is 0.
4918  ReadCompactSize(vRecv);
4919  }
4920 
4921  return ProcessHeadersMessage(config, pfrom, *peer, headers,
4922  /*via_compact_block=*/false);
4923  }
4924 
4925  if (msg_type == NetMsgType::BLOCK) {
4926  // Ignore block received while importing
4927  if (fImporting || fReindex) {
4929  "Unexpected block message received from peer %d\n",
4930  pfrom.GetId());
4931  return;
4932  }
4933 
4934  std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
4935  vRecv >> *pblock;
4936 
4937  LogPrint(BCLog::NET, "received block %s peer=%d\n",
4938  pblock->GetHash().ToString(), pfrom.GetId());
4939 
4940  // Process all blocks from whitelisted peers, even if not requested,
4941  // unless we're still syncing with the network. Such an unrequested
4942  // block may still be processed, subject to the conditions in
4943  // AcceptBlock().
4944  bool forceProcessing =
4945  pfrom.HasPermission(PF_NOBAN) &&
4946  !m_chainman.ActiveChainstate().IsInitialBlockDownload();
4947  const BlockHash hash = pblock->GetHash();
4948  {
4949  LOCK(cs_main);
4950  // Always process the block if we requested it, since we may
4951  // need it even when it's not a candidate for a new best tip.
4952  forceProcessing = IsBlockRequested(hash);
4953  RemoveBlockRequest(hash);
4954  // mapBlockSource is only used for punishing peers and setting
4955  // which peers send us compact blocks, so the race between here and
4956  // cs_main in ProcessNewBlock is fine.
4957  mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
4958  }
4959  ProcessBlock(config, pfrom, pblock, forceProcessing);
4960  return;
4961  }
4962 
4963  if (msg_type == NetMsgType::AVAHELLO) {
4964  {
4965  bool expected = false;
4966  if (!pfrom.m_avalanche_enabled.compare_exchange_strong(expected,
4967  true)) {
4968  LogPrint(
4970  "Ignoring avahello from peer %d: already in our node set\n",
4971  pfrom.GetId());
4972  return;
4973  }
4974  }
4975 
4976  avalanche::Delegation delegation;
4977  vRecv >> delegation;
4978 
4979  // A delegation with an all zero limited id indicates that the peer has
4980  // no proof, so we're done.
4981  if (delegation.getLimitedProofId() != uint256::ZERO) {
4983  CPubKey pubkey;
4984  if (!delegation.verify(state, pubkey)) {
4985  Misbehaving(pfrom, 100, "invalid-delegation");
4986  return;
4987  }
4988  pfrom.m_avalanche_pubkey = std::move(pubkey);
4989 
4990  CHashWriter sighasher(SER_GETHASH, 0);
4991  sighasher << delegation.getId();
4992  sighasher << pfrom.nRemoteHostNonce;
4993  sighasher << pfrom.GetLocalNonce();
4994  sighasher << pfrom.nRemoteExtraEntropy;
4995  sighasher << pfrom.GetLocalExtraEntropy();
4996 
4997  SchnorrSig sig;
4998  vRecv >> sig;
4999  if (!(*pfrom.m_avalanche_pubkey)
5000  .VerifySchnorr(sighasher.GetHash(), sig)) {
5001  Misbehaving(pfrom, 100, "invalid-avahello-signature");
5002  return;
5003  }
5004 
5005  // If we don't know this proof already, add it to the tracker so it
5006  // can be requested.
5007  const avalanche::ProofId proofid(delegation.getProofId());
5008  if (!AlreadyHaveProof(proofid)) {
5009  const bool preferred = isPreferredDownloadPeer(pfrom);
5010  LOCK(cs_proofrequest);
5011  AddProofAnnouncement(pfrom, proofid,
5012  GetTime<std::chrono::microseconds>(),
5013  preferred);
5014  }
5015 
5016  // Don't check the return value. If it fails we probably don't
5017  // know about the proof yet.
5018  g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
5019  return pm.addNode(pfrom.GetId(), proofid);
5020  });
5021  }
5022 
5023  // Send getavaaddr and getavaproofs to our avalanche outbound or
5024  // manual connections
5025  if (!pfrom.IsInboundConn()) {
5026  m_connman.PushMessage(&pfrom,
5027  msgMaker.Make(NetMsgType::GETAVAADDR));
5028  WITH_LOCK(peer->m_addr_token_bucket_mutex,
5029  peer->m_addr_token_bucket += GetMaxAddrToSend());
5030 
5031  if (pfrom.m_proof_relay &&
5032  !m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
5033  m_connman.PushMessage(&pfrom,
5034  msgMaker.Make(NetMsgType::GETAVAPROOFS));
5035  pfrom.m_proof_relay->compactproofs_requested = true;
5036  }
5037  }
5038 
5039  return;
5040  }
5041 
5042  if (msg_type == NetMsgType::AVAPOLL) {
5043  auto now = std::chrono::steady_clock::now();
5044  int64_t cooldown =
5045  gArgs.GetIntArg("-avacooldown", AVALANCHE_DEFAULT_COOLDOWN);
5046 
5047  {
5048  LOCK(cs_main);
5049  auto &node_state = State(pfrom.GetId())->m_avalanche_state;
5050 
5051  if (now <
5052  node_state.last_poll + std::chrono::milliseconds(cooldown)) {
5053  Misbehaving(pfrom, 20, "avapool-cooldown");
5054  }
5055 
5056  node_state.last_poll = now;
5057  }
5058 
5059  const bool quorum_established =
5060  g_avalanche && g_avalanche->isQuorumEstablished();
5061 
5062  uint64_t round;
5063  Unserialize(vRecv, round);
5064 
5065  unsigned int nCount = ReadCompactSize(vRecv);
5066  if (nCount > AVALANCHE_MAX_ELEMENT_POLL) {
5067  Misbehaving(
5068  pfrom, 20,
5069  strprintf("too-many-ava-poll: poll message size = %u", nCount));
5070  return;
5071  }
5072 
5073  std::vector<avalanche::Vote> votes;
5074  votes.reserve(nCount);
5075 
5076  for (unsigned int n = 0; n < nCount; n++) {
5077  CInv inv;
5078  vRecv >> inv;
5079 
5080  // Default vote for unknown inv type
5081  uint32_t vote = -1;
5082 
5083  // We don't vote definitively until we have an established quorum
5084  if (!quorum_established) {
5085  votes.emplace_back(vote, inv.hash);
5086  continue;
5087  }
5088 
5089  // If inv's type is known, get a vote for its hash
5090  switch (inv.type) {
5091  case MSG_TX: {
5092  vote = getAvalancheVoteForTx(m_mempool, TxId(inv.hash));
5093  } break;
5094  case MSG_BLOCK: {
5095  vote = WITH_LOCK(cs_main, return GetAvalancheVoteForBlock(
5096  BlockHash(inv.hash)));
5097  } break;
5098  case MSG_AVA_PROOF: {
5099  vote =
5101  } break;
5102  default: {
5104  "poll inv type unknown from peer=%d\n", inv.type);
5105  }
5106  }
5107 
5108  votes.emplace_back(vote, inv.hash);
5109  }
5110 
5111  // Send the query to the node.
5112  g_avalanche->sendResponse(
5113  &pfrom, avalanche::Response(round, cooldown, std::move(votes)));
5114  return;
5115  }
5116 
5117  if (msg_type == NetMsgType::AVARESPONSE) {
5118  // As long as QUIC is not implemented, we need to sign response and
5119  // verify response's signatures in order to avoid any manipulation of
5120  // messages at the transport level.
5121  CHashVerifier<CDataStream> verifier(&vRecv);
5123  verifier >> response;
5124 
5125  SchnorrSig sig;
5126  vRecv >> sig;
5127  if (!pfrom.m_avalanche_pubkey.has_value() ||
5128  !(*pfrom.m_avalanche_pubkey)
5129  .VerifySchnorr(verifier.GetHash(), sig)) {
5130  Misbehaving(pfrom, 100, "invalid-ava-response-signature");
5131  return;
5132  }
5133 
5134  std::vector<avalanche::BlockUpdate> blockUpdates;
5135  std::vector<avalanche::ProofUpdate> proofUpdates;
5136  int banscore;
5137  std::string error;
5138  if (!g_avalanche->registerVotes(pfrom.GetId(), response, blockUpdates,
5139  proofUpdates, banscore, error)) {
5140  Misbehaving(pfrom, banscore, error);
5141  return;
5142  }
5143 
5144  pfrom.invsVoted(response.GetVotes().size());
5145 
5146  auto logVoteUpdate = [](const auto &voteUpdate,
5147  const std::string &voteItemTypeStr,
5148  const auto &voteItemId) {
5149  std::string voteOutcome;
5150  switch (voteUpdate.getStatus()) {
5152  voteOutcome = "invalidated";
5153  break;
5155  voteOutcome = "rejected";
5156  break;
5158  voteOutcome = "accepted";
5159  break;
5161  voteOutcome = "finalized";
5162  break;
5164  voteOutcome = "stalled";
5165  break;
5166 
5167  // No default case, so the compiler can warn about missing
5168  // cases
5169  }
5170 
5171  LogPrint(BCLog::AVALANCHE, "Avalanche %s %s %s\n", voteOutcome,
5172  voteItemTypeStr, voteItemId.ToString());
5173  };
5174 
5175  for (avalanche::ProofUpdate &u : proofUpdates) {
5176  avalanche::ProofRef proof = u.getVoteItem();
5177  const avalanche::ProofId &proofid = proof->getId();
5178 
5179  logVoteUpdate(u, "proof", proofid);
5180 
5182  auto nextCooldownTimePoint = GetTime<std::chrono::seconds>();
5183  switch (u.getStatus()) {
5185  WITH_LOCK(cs_invalidProofs, invalidProofs->insert(proofid));
5186  // Fallthrough
5188  // Invalidate mode removes the proof from all proof pools
5189  rejectionMode =
5191  // Fallthrough
5193  if (!g_avalanche->withPeerManager(
5194  [&](avalanche::PeerManager &pm) {
5195  return pm.rejectProof(proofid, rejectionMode);
5196  })) {
5198  "ERROR: Failed to reject proof: %s\n",
5199  proofid.GetHex());
5200  }
5201  break;
5203  nextCooldownTimePoint +=
5204  std::chrono::seconds(gArgs.GetIntArg(
5205  "-avalanchepeerreplacementcooldown",
5208  if (!g_avalanche->withPeerManager(
5209  [&](avalanche::PeerManager &pm) {
5210  pm.registerProof(
5211  proof, avalanche::PeerManager::
5212  RegistrationMode::FORCE_ACCEPT);
5213  return pm.forPeer(
5214  proofid, [&](const avalanche::Peer &peer) {
5215  pm.updateNextPossibleConflictTime(
5216  peer.peerid, nextCooldownTimePoint);
5217  if (u.getStatus() ==
5218  avalanche::VoteStatus::Finalized) {
5219  pm.setFinalized(peer.peerid);
5220  }
5221  // Only fail if the peer was not
5222  // created
5223  return true;
5224  });
5225  })) {
5227  "ERROR: Failed to accept proof: %s\n",
5228  proofid.GetHex());
5229  }
5230  break;
5231  }
5232  }
5233 
5234  if (blockUpdates.size()) {
5235  for (avalanche::BlockUpdate &u : blockUpdates) {
5236  CBlockIndex *pindex = u.getVoteItem();
5237 
5238  logVoteUpdate(u, "block", pindex->GetBlockHash());
5239 
5240  switch (u.getStatus()) {
5243  BlockValidationState state;
5244  m_chainman.ActiveChainstate().ParkBlock(config, state,
5245  pindex);
5246  if (!state.IsValid()) {
5247  LogPrintf("ERROR: Database error: %s\n",
5248  state.GetRejectReason());
5249  return;
5250  }
5251  } break;
5253  LOCK(cs_main);
5254  m_chainman.ActiveChainstate().UnparkBlock(pindex);
5255  } break;
5257  {
5258  LOCK(cs_main);
5259  m_chainman.ActiveChainstate().UnparkBlock(pindex);
5260  }
5262  pindex);
5263  } break;
5265  // Fall back on Nakamoto consensus in the absence of
5266  // Avalanche votes for other competing or descendant
5267  // blocks.
5268  break;
5269  }
5270  }
5271 
5272  BlockValidationState state;
5273  if (!m_chainman.ActiveChainstate().ActivateBestChain(config,
5274  state)) {
5275  LogPrintf("failed to activate chain (%s)\n", state.ToString());
5276  }
5277  }
5278 
5279  return;
5280  }
5281 
5282  if (msg_type == NetMsgType::AVAPROOF) {
5283  auto proof = RCUPtr<avalanche::Proof>::make();
5284  vRecv >> *proof;
5285 
5286  ReceivedAvalancheProof(pfrom, proof);
5287 
5288  return;
5289  }
5290 
5291  if (msg_type == NetMsgType::GETAVAPROOFS) {
5292  if (pfrom.m_proof_relay == nullptr) {
5293  return;
5294  }
5295 
5296  pfrom.m_proof_relay->lastSharedProofsUpdate =
5297  GetTime<std::chrono::seconds>();
5298 
5299  pfrom.m_proof_relay->sharedProofs =
5300  g_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
5301  return pm.getShareableProofsSnapshot();
5302  });
5303 
5304  avalanche::CompactProofs compactProofs(
5305  pfrom.m_proof_relay->sharedProofs);
5306  m_connman.PushMessage(
5307  &pfrom, msgMaker.Make(NetMsgType::AVAPROOFS, compactProofs));
5308 
5309  return;
5310  }
5311 
5312  if (msg_type == NetMsgType::AVAPROOFS) {
5313  if (pfrom.m_proof_relay == nullptr) {
5314  return;
5315  }
5316 
5317  // Only process the compact proofs if we requested them
5318  if (!pfrom.m_proof_relay->compactproofs_requested) {
5319  LogPrint(BCLog::AVALANCHE, "Ignoring unsollicited avaproofs\n");
5320  return;
5321  }
5322  pfrom.m_proof_relay->compactproofs_requested = false;
5323 
5324  avalanche::CompactProofs compactProofs;
5325  try {
5326  vRecv >> compactProofs;
5327  } catch (std::ios_base::failure &e) {
5328  // This compact proofs have non contiguous or overflowing indexes
5329  Misbehaving(pfrom, 100, "avaproofs-bad-indexes");
5330  return;
5331  }
5332 
5333  // If there are prefilled proofs, process them first
5334  std::set<uint32_t> prefilledIndexes;
5335  for (const auto &prefilledProof : compactProofs.getPrefilledProofs()) {
5336  if (!ReceivedAvalancheProof(pfrom, prefilledProof.proof)) {
5337  // If we got an invalid proof, the peer is getting banned and we
5338  // can bail out.
5339  return;
5340  }
5341  }
5342 
5343  // If there is no shortid, avoid parsing/responding/accounting for the
5344  // message.
5345  if (compactProofs.getShortIDs().size() == 0) {
5347  "Got an avaproofs message with no shortid (peer %d)\n",
5348  pfrom.GetId());
5349  return;
5350  }
5351 
5352  // To determine the chance that the number of entries in a bucket
5353  // exceeds N, we use the fact that the number of elements in a single
5354  // bucket is binomially distributed (with n = the number of shorttxids
5355  // S, and p = 1 / the number of buckets), that in the worst case the
5356  // number of buckets is equal to S (due to std::unordered_map having a
5357  // default load factor of 1.0), and that the chance for any bucket to
5358  // exceed N elements is at most buckets * (the chance that any given
5359  // bucket is above N elements). Thus:
5360  // P(max_elements_per_bucket > N) <=
5361  // S * (1 - cdf(binomial(n=S,p=1/S), N))
5362  // If we assume up to 21000000, allowing 15 elements per bucket should
5363  // only fail once per ~2.5 million avaproofs transfers (per peer and
5364  // connection).
5365  // TODO re-evaluate the bucket count to a more realistic value.
5366  // TODO: In the case of a shortid-collision, we should request all the
5367  // proofs which collided. For now, we only request one, which is not
5368  // that bad considering this event is expected to be very rare.
5369  auto shortIdProcessor =
5371  compactProofs.getShortIDs(), 15);
5372 
5373  if (shortIdProcessor.hasOutOfBoundIndex()) {
5374  // This should be catched by deserialization, but catch it here as
5375  // well as a good measure.
5376  Misbehaving(pfrom, 100, "avaproofs-bad-indexes");
5377  return;
5378  }
5379  if (!shortIdProcessor.isEvenlyDistributed()) {
5380  // This is suspicious, don't ban but bail out
5381  return;
5382  }
5383 
5384  const auto &proofs =
5385  g_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
5386  return pm.getShareableProofsSnapshot();
5387  });
5388 
5389  size_t proofCount = 0;
5390  proofs.forEachLeaf([&](const avalanche::ProofRef &proof) {
5391  uint64_t shortid = compactProofs.getShortID(proof->getId());
5392 
5393  proofCount += shortIdProcessor.matchKnownItem(shortid, proof);
5394 
5395  // Though ideally we'd continue scanning for the
5396  // two-proofs-match-shortid case, the performance win of an early
5397  // exit here is too good to pass up and worth the extra risk.
5398  return proofCount != shortIdProcessor.getShortIdCount();
5399  });
5400 
5402  for (size_t i = 0; i < compactProofs.size(); i++) {
5403  if (shortIdProcessor.getItem(i) == nullptr) {
5404  req.indices.push_back(i);
5405  }
5406  }
5407 
5408  m_connman.PushMessage(&pfrom,
5409  msgMaker.Make(NetMsgType::AVAPROOFSREQ, req));
5410 
5411  // We want to keep a count of how many nodes we successfully requested
5412  // avaproofs from as this is used to determine when we are confident our
5413  // quorum is close enough to the other participants.
5414  g_avalanche->avaproofsSent(pfrom.GetId());
5415 
5416  return;
5417  }
5418 
5419  if (msg_type == NetMsgType::AVAPROOFSREQ) {
5420  if (pfrom.m_proof_relay == nullptr) {
5421  return;
5422  }
5423 
5424  avalanche::ProofsRequest proofreq;
5425  vRecv >> proofreq;
5426 
5427  auto requestedIndiceIt = proofreq.indices.begin();
5428  uint32_t treeIndice = 0;
5429  pfrom.m_proof_relay->sharedProofs.forEachLeaf([&](const auto &proof) {
5430  if (requestedIndiceIt == proofreq.indices.end()) {
5431  // No more indice to process
5432  return false;
5433  }
5434 
5435  if (treeIndice++ == *requestedIndiceIt) {
5436  m_connman.PushMessage(
5437  &pfrom, msgMaker.Make(NetMsgType::AVAPROOF, *proof));
5438  requestedIndiceIt++;
5439  }
5440 
5441  return true;
5442  });
5443 
5444  pfrom.m_proof_relay->sharedProofs = {};
5445  return;
5446  }
5447 
5448  if (msg_type == NetMsgType::GETADDR) {
5449  // This asymmetric behavior for inbound and outbound connections was
5450  // introduced to prevent a fingerprinting attack: an attacker can send
5451  // specific fake addresses to users' AddrMan and later request them by
5452  // sending getaddr messages. Making nodes which are behind NAT and can
5453  // only make outgoing connections ignore the getaddr message mitigates
5454  // the attack.
5455  if (!pfrom.IsInboundConn()) {
5457  "Ignoring \"getaddr\" from %s connection. peer=%d\n",
5458  pfrom.ConnectionTypeAsString(), pfrom.GetId());
5459  return;
5460  }
5461 
5462  // Since this must be an inbound connection, SetupAddressRelay will
5463  // never fail.
5464  Assume(SetupAddressRelay(pfrom, *peer));
5465 
5466  // Only send one GetAddr response per connection to reduce resource
5467  // waste and discourage addr stamping of INV announcements.
5468  if (peer->m_getaddr_recvd) {
5469  LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n",
5470  pfrom.GetId());
5471  return;
5472  }
5473  peer->m_getaddr_recvd = true;
5474 
5475  peer->m_addrs_to_send.clear();
5476  std::vector<CAddress> vAddr;
5477  const size_t maxAddrToSend = GetMaxAddrToSend();
5478  if (pfrom.HasPermission(PF_ADDR)) {
5479  vAddr = m_connman.GetAddresses(maxAddrToSend, MAX_PCT_ADDR_TO_SEND,
5480  /* network */ std::nullopt);
5481  } else {
5482  vAddr = m_connman.GetAddresses(pfrom, maxAddrToSend,
5484  }
5485  FastRandomContext insecure_rand;
5486  for (const CAddress &addr : vAddr) {
5487  PushAddress(*peer, addr, insecure_rand);
5488  }
5489  return;
5490  }
5491 
5492  if (msg_type == NetMsgType::GETAVAADDR) {
5493  auto now = GetTime<std::chrono::seconds>();
5494  if (now < pfrom.m_nextGetAvaAddr) {
5495  // Prevent a peer from exhausting our resources by spamming
5496  // getavaaddr messages.
5498  "Ignoring repeated getavaaddr from peer %d\n",
5499  pfrom.GetId());
5500  return;
5501  }
5502 
5503  // Only accept a getavaaddr every GETAVAADDR_INTERVAL at most
5504  pfrom.m_nextGetAvaAddr = now + GETAVAADDR_INTERVAL;
5505 
5506  if (!SetupAddressRelay(pfrom, *peer)) {
5508  "Ignoring getavaaddr message from %s peer=%d\n",
5509  pfrom.ConnectionTypeAsString(), pfrom.GetId());
5510  return;
5511  }
5512 
5513  auto availabilityScoreComparator = [](const CNode *lhs,
5514  const CNode *rhs) {
5515  double scoreLhs = lhs->getAvailabilityScore();
5516  double scoreRhs = rhs->getAvailabilityScore();
5517 
5518  if (scoreLhs != scoreRhs) {
5519  return scoreLhs > scoreRhs;
5520  }
5521 
5522  return lhs < rhs;
5523  };
5524 
5525  // Get up to MAX_ADDR_TO_SEND addresses of the nodes which are the
5526  // most active in the avalanche network. Account for 0 availability as
5527  // well so we can send addresses even if we did not start polling yet.
5528  std::set<const CNode *, decltype(availabilityScoreComparator)> avaNodes(
5529  availabilityScoreComparator);
5530  m_connman.ForEachNode([&](const CNode *pnode) {
5531  if (!pnode->m_avalanche_enabled ||
5532  pnode->getAvailabilityScore() < 0.) {
5533  return;
5534  }
5535 
5536  avaNodes.insert(pnode);
5537  if (avaNodes.size() > GetMaxAddrToSend()) {
5538  avaNodes.erase(std::prev(avaNodes.end()));
5539  }
5540  });
5541 
5542  peer->m_addrs_to_send.clear();
5543  FastRandomContext insecure_rand;
5544  for (const CNode *pnode : avaNodes) {
5545  PushAddress(*peer, pnode->addr, insecure_rand);
5546  }
5547 
5548  return;
5549  }
5550 
5551  if (msg_type == NetMsgType::MEMPOOL) {
5552  if (!(pfrom.GetLocalServices() & NODE_BLOOM) &&
5553  !pfrom.HasPermission(PF_MEMPOOL)) {
5554  if (!pfrom.HasPermission(PF_NOBAN)) {
5556  "mempool request with bloom filters disabled, "
5557  "disconnect peer=%d\n",
5558  pfrom.GetId());
5559  pfrom.fDisconnect = true;
5560  }
5561  return;
5562  }
5563 
5564  if (m_connman.OutboundTargetReached(false) &&
5565  !pfrom.HasPermission(PF_MEMPOOL)) {
5566  if (!pfrom.HasPermission(PF_NOBAN)) {
5568  "mempool request with bandwidth limit reached, "
5569  "disconnect peer=%d\n",
5570  pfrom.GetId());
5571  pfrom.fDisconnect = true;
5572  }
5573  return;
5574  }
5575 
5576  if (pfrom.m_tx_relay != nullptr) {
5577  LOCK(pfrom.m_tx_relay->cs_tx_inventory);
5578  pfrom.m_tx_relay->fSendMempool = true;
5579  }
5580  return;
5581  }
5582 
5583  if (msg_type == NetMsgType::PING) {
5584  if (pfrom.GetCommonVersion() > BIP0031_VERSION) {
5585  uint64_t nonce = 0;
5586  vRecv >> nonce;
5587  // Echo the message back with the nonce. This allows for two useful
5588  // features:
5589  //
5590  // 1) A remote node can quickly check if the connection is
5591  // operational.
5592  // 2) Remote nodes can measure the latency of the network thread. If
5593  // this node is overloaded it won't respond to pings quickly and the
5594  // remote node can avoid sending us more work, like chain download
5595  // requests.
5596  //
5597  // The nonce stops the remote getting confused between different
5598  // pings: without it, if the remote node sends a ping once per
5599  // second and this node takes 5 seconds to respond to each, the 5th
5600  // ping the remote sends would appear to return very quickly.
5601  m_connman.PushMessage(&pfrom,
5602  msgMaker.Make(NetMsgType::PONG, nonce));
5603  }
5604  return;
5605  }
5606 
5607  if (msg_type == NetMsgType::PONG) {
5608  const auto ping_end = time_received;
5609  uint64_t nonce = 0;
5610  size_t nAvail = vRecv.in_avail();
5611  bool bPingFinished = false;
5612  std::string sProblem;
5613 
5614  if (nAvail >= sizeof(nonce)) {
5615  vRecv >> nonce;
5616 
5617  // Only process pong message if there is an outstanding ping (old
5618  // ping without nonce should never pong)
5619  if (peer->m_ping_nonce_sent != 0) {
5620  if (nonce == peer->m_ping_nonce_sent) {
5621  // Matching pong received, this ping is no longer
5622  // outstanding
5623  bPingFinished = true;
5624  const auto ping_time = ping_end - peer->m_ping_start.load();
5625  if (ping_time.count() >= 0) {
5626  // Let connman know about this successful ping-pong
5627  pfrom.PongReceived(ping_time);
5628  } else {
5629  // This should never happen
5630  sProblem = "Timing mishap";
5631  }
5632  } else {
5633  // Nonce mismatches are normal when pings are overlapping
5634  sProblem = "Nonce mismatch";
5635  if (nonce == 0) {
5636  // This is most likely a bug in another implementation
5637  // somewhere; cancel this ping
5638  bPingFinished = true;
5639  sProblem = "Nonce zero";
5640  }
5641  }
5642  } else {
5643  sProblem = "Unsolicited pong without ping";
5644  }
5645  } else {
5646  // This is most likely a bug in another implementation somewhere;
5647  // cancel this ping
5648  bPingFinished = true;
5649  sProblem = "Short payload";
5650  }
5651 
5652  if (!(sProblem.empty())) {
5654  "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
5655  pfrom.GetId(), sProblem, peer->m_ping_nonce_sent, nonce,
5656  nAvail);
5657  }
5658  if (bPingFinished) {
5659  peer->m_ping_nonce_sent = 0;
5660  }
5661  return;
5662  }
5663 
5664  if (msg_type == NetMsgType::FILTERLOAD) {
5665  if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
5667  "filterload received despite not offering bloom services "
5668  "from peer=%d; disconnecting\n",
5669  pfrom.GetId());
5670  pfrom.fDisconnect = true;
5671  return;
5672  }
5673  CBloomFilter filter;
5674  vRecv >> filter;
5675 
5676  if (!filter.IsWithinSizeConstraints()) {
5677  // There is no excuse for sending a too-large filter
5678  Misbehaving(pfrom, 100, "too-large bloom filter");
5679  } else if (pfrom.m_tx_relay != nullptr) {
5680  LOCK(pfrom.m_tx_relay->cs_filter);
5681  pfrom.m_tx_relay->pfilter.reset(new CBloomFilter(filter));
5682  pfrom.m_tx_relay->fRelayTxes = true;
5683  }
5684  return;
5685  }
5686 
5687  if (msg_type == NetMsgType::FILTERADD) {
5688  if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
5690  "filteradd received despite not offering bloom services "
5691  "from peer=%d; disconnecting\n",
5692  pfrom.GetId());
5693  pfrom.fDisconnect = true;
5694  return;
5695  }
5696  std::vector<uint8_t> vData;
5697  vRecv >> vData;
5698 
5699  // Nodes must NEVER send a data item > 520 bytes (the max size for a
5700  // script data object, and thus, the maximum size any matched object can
5701  // have) in a filteradd message.
5702  bool bad = false;
5703  if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
5704  bad = true;
5705  } else if (pfrom.m_tx_relay != nullptr) {
5706  LOCK(pfrom.m_tx_relay->cs_filter);
5707  if (pfrom.m_tx_relay->pfilter) {
5708  pfrom.m_tx_relay->pfilter->insert(vData);
5709  } else {
5710  bad = true;
5711  }
5712  }
5713  if (bad) {
5714  // The structure of this code doesn't really allow for a good error
5715  // code. We'll go generic.
5716  Misbehaving(pfrom, 100, "bad filteradd message");
5717  }
5718  return;
5719  }
5720 
5721  if (msg_type == NetMsgType::FILTERCLEAR) {
5722  if (!(pfrom.GetLocalServices() & NODE_BLOOM)) {
5724  "filterclear received despite not offering bloom services "
5725  "from peer=%d; disconnecting\n",
5726  pfrom.GetId());
5727  pfrom.fDisconnect = true;
5728  return;
5729  }
5730  if (pfrom.m_tx_relay == nullptr) {
5731  return;
5732  }
5733  LOCK(pfrom.m_tx_relay->cs_filter);
5734  pfrom.m_tx_relay->pfilter = nullptr;
5735  pfrom.m_tx_relay->fRelayTxes = true;
5736  return;
5737  }
5738 
5739  if (msg_type == NetMsgType::FEEFILTER) {
5740  Amount newFeeFilter = Amount::zero();
5741  vRecv >> newFeeFilter;
5742  if (MoneyRange(newFeeFilter)) {
5743  if (pfrom.m_tx_relay != nullptr) {
5744  LOCK(pfrom.m_tx_relay->cs_feeFilter);
5745  pfrom.m_tx_relay->minFeeFilter = newFeeFilter;
5746  }
5747  LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n",
5748  CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
5749  }
5750  return;
5751  }
5752 
5753  if (msg_type == NetMsgType::GETCFILTERS) {
5754  ProcessGetCFilters(pfrom, vRecv, m_chainparams, m_connman);
5755  return;
5756  }
5757 
5758  if (msg_type == NetMsgType::GETCFHEADERS) {
5759  ProcessGetCFHeaders(pfrom, vRecv, m_chainparams, m_connman);
5760  return;
5761  }
5762 
5763  if (msg_type == NetMsgType::GETCFCHECKPT) {
5764  ProcessGetCFCheckPt(pfrom, vRecv, m_chainparams, m_connman);
5765  return;
5766  }
5767 
5768  if (msg_type == NetMsgType::NOTFOUND) {
5769  std::vector<CInv> vInv;
5770  vRecv >> vInv;
5771  // A peer might send up to 1 notfound per getdata request, but no more
5772  if (vInv.size() <= PROOF_REQUEST_PARAMS.max_peer_announcements +
5775  for (CInv &inv : vInv) {
5776  if (inv.IsMsgTx()) {
5777  // If we receive a NOTFOUND message for a tx we requested,
5778  // mark the announcement for it as completed in
5779  // InvRequestTracker.
5780  LOCK(::cs_main);
5781  m_txrequest.ReceivedResponse(pfrom.GetId(), TxId(inv.hash));
5782  continue;
5783  }
5784  if (inv.IsMsgProof()) {
5785  LOCK(cs_proofrequest);
5786  m_proofrequest.ReceivedResponse(
5787  pfrom.GetId(), avalanche::ProofId(inv.hash));
5788  }
5789  }
5790  }
5791  return;
5792  }
5793 
5794  // Ignore unknown commands for extensibility
5795  LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n",
5796  SanitizeString(msg_type), pfrom.GetId());
5797  return;
5798 }
5799 
5800 bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode &pnode, Peer &peer) {
5801  {
5802  LOCK(peer.m_misbehavior_mutex);
5803 
5804  // There's nothing to do if the m_should_discourage flag isn't set
5805  if (!peer.m_should_discourage) {
5806  return false;
5807  }
5808 
5809  peer.m_should_discourage = false;
5810  } // peer.m_misbehavior_mutex
5811 
5812  if (pnode.HasPermission(PF_NOBAN)) {
5813  // We never disconnect or discourage peers for bad behavior if they have
5814  // the NOBAN permission flag
5815  LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id);
5816  return false;
5817  }
5818 
5819  if (pnode.IsManualConn()) {
5820  // We never disconnect or discourage manual peers for bad behavior
5821  LogPrintf("Warning: not punishing manually connected peer %d!\n",
5822  peer.m_id);
5823  return false;
5824  }
5825 
5826  if (pnode.addr.IsLocal()) {
5827  // We disconnect local peers for bad behavior but don't discourage
5828  // (since that would discourage all peers on the same local address)
5829  LogPrintf(
5830  "Warning: disconnecting but not discouraging local peer %d!\n",
5831  peer.m_id);
5832  pnode.fDisconnect = true;
5833  return true;
5834  }
5835 
5836  // Normal case: Disconnect the peer and discourage all nodes sharing the
5837  // address
5838  LogPrint(BCLog::NET, "Disconnecting and discouraging peer %d!\n",
5839  peer.m_id);
5840  if (m_banman) {
5841  m_banman->Discourage(pnode.addr);
5842  }
5843  m_connman.DisconnectNode(pnode.addr);
5844  return true;
5845 }
5846 
5847 bool PeerManagerImpl::ProcessMessages(const Config &config, CNode *pfrom,
5848  std::atomic<bool> &interruptMsgProc) {
5849  //
5850  // Message format
5851  // (4) message start
5852  // (12) command
5853  // (4) size
5854  // (4) checksum
5855  // (x) data
5856  //
5857  bool fMoreWork = false;
5858 
5859  PeerRef peer = GetPeerRef(pfrom->GetId());
5860  if (peer == nullptr) {
5861  return false;
5862  }
5863 
5864  {
5865  LOCK(peer->m_getdata_requests_mutex);
5866  if (!peer->m_getdata_requests.empty()) {
5867  ProcessGetData(config, *pfrom, *peer, interruptMsgProc);
5868  }
5869  }
5870 
5871  {
5873  if (!peer->m_orphan_work_set.empty()) {
5874  ProcessOrphanTx(config, peer->m_orphan_work_set);
5875  }
5876  }
5877 
5878  if (pfrom->fDisconnect) {
5879  return false;
5880  }
5881 
5882  // this maintains the order of responses and prevents m_getdata_requests
5883  // from growing unbounded
5884  {
5885  LOCK(peer->m_getdata_requests_mutex);
5886  if (!peer->m_getdata_requests.empty()) {
5887  return true;
5888  }
5889  }
5890 
5891  {
5892  LOCK(g_cs_orphans);
5893  if (!peer->m_orphan_work_set.empty()) {
5894  return true;
5895  }
5896  }
5897 
5898  // Don't bother if send buffer is too full to respond anyway
5899  if (pfrom->fPauseSend) {
5900  return false;
5901  }
5902 
5903  std::list<CNetMessage> msgs;
5904  {
5905  LOCK(pfrom->cs_vProcessMsg);
5906  if (pfrom->vProcessMsg.empty()) {
5907  return false;
5908  }
5909  // Just take one message
5910  msgs.splice(msgs.begin(), pfrom->vProcessMsg,
5911  pfrom->vProcessMsg.begin());
5912  pfrom->nProcessQueueSize -= msgs.front().m_raw_message_size;
5913  pfrom->fPauseRecv =
5914  pfrom->nProcessQueueSize > m_connman.GetReceiveFloodSize();
5915  fMoreWork = !pfrom->vProcessMsg.empty();
5916  }
5917  CNetMessage &msg(msgs.front());
5918 
5919  TRACE6(net, inbound_message, pfrom->GetId(), pfrom->m_addr_name.c_str(),
5920  pfrom->ConnectionTypeAsString().c_str(), msg.m_command.c_str(),
5921  msg.m_recv.size(), msg.m_recv.data());
5922 
5923  if (gArgs.GetBoolArg("-capturemessages", false)) {
5924  CaptureMessage(pfrom->addr, msg.m_command, MakeUCharSpan(msg.m_recv),
5925  /*is_incoming=*/true);
5926  }
5927 
5928  msg.SetVersion(pfrom->GetCommonVersion());
5929 
5930  // Check network magic
5931  if (!msg.m_valid_netmagic) {
5933  "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n",
5934  SanitizeString(msg.m_command), pfrom->GetId());
5935 
5936  // Make sure we discourage where that come from for some time.
5937  if (m_banman) {
5938  m_banman->Discourage(pfrom->addr);
5939  }
5940  m_connman.DisconnectNode(pfrom->addr);
5941 
5942  pfrom->fDisconnect = true;
5943  return false;
5944  }
5945 
5946  // Check header
5947  if (!msg.m_valid_header) {
5948  LogPrint(BCLog::NET, "PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n",
5949  SanitizeString(msg.m_command), pfrom->GetId());
5950  return fMoreWork;
5951  }
5952  const std::string &msg_type = msg.m_command;
5953 
5954  // Message size
5955  unsigned int nMessageSize = msg.m_message_size;
5956 
5957  // Checksum
5958  CDataStream &vRecv = msg.m_recv;
5959  if (!msg.m_valid_checksum) {
5960  LogPrint(BCLog::NET, "%s(%s, %u bytes): CHECKSUM ERROR peer=%d\n",
5961  __func__, SanitizeString(msg_type), nMessageSize,
5962  pfrom->GetId());
5963  if (m_banman) {
5964  m_banman->Discourage(pfrom->addr);
5965  }
5966  m_connman.DisconnectNode(pfrom->addr);
5967  return fMoreWork;
5968  }
5969 
5970  try {
5971  ProcessMessage(config, *pfrom, msg_type, vRecv, msg.m_time,
5972  interruptMsgProc);
5973  if (interruptMsgProc) {
5974  return false;
5975  }
5976 
5977  {
5978  LOCK(peer->m_getdata_requests_mutex);
5979  if (!peer->m_getdata_requests.empty()) {
5980  fMoreWork = true;
5981  }
5982  }
5983  } catch (const std::exception &e) {
5984  LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n",
5985  __func__, SanitizeString(msg_type), nMessageSize, e.what(),
5986  typeid(e).name());
5987  } catch (...) {
5988  LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n",
5989  __func__, SanitizeString(msg_type), nMessageSize);
5990  }
5991 
5992  return fMoreWork;
5993 }
5994 
5995 void PeerManagerImpl::ConsiderEviction(CNode &pto, int64_t time_in_seconds) {
5997 
5998  CNodeState &state = *State(pto.GetId());
5999  const CNetMsgMaker msgMaker(pto.GetCommonVersion());
6000 
6001  if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() &&
6002  state.fSyncStarted) {
6003  // This is an outbound peer subject to disconnection if they don't
6004  // announce a block with as much work as the current tip within
6005  // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if their
6006  // chain has more work than ours, we should sync to it, unless it's
6007  // invalid, in which case we should find that out and disconnect from
6008  // them elsewhere).
6009  if (state.pindexBestKnownBlock != nullptr &&
6010  state.pindexBestKnownBlock->nChainWork >=
6011  m_chainman.ActiveChain().Tip()->nChainWork) {
6012  if (state.m_chain_sync.m_timeout != 0) {
6013  state.m_chain_sync.m_timeout = 0;
6014  state.m_chain_sync.m_work_header = nullptr;
6015  state.m_chain_sync.m_sent_getheaders = false;
6016  }
6017  } else if (state.m_chain_sync.m_timeout == 0 ||
6018  (state.m_chain_sync.m_work_header != nullptr &&
6019  state.pindexBestKnownBlock != nullptr &&
6020  state.pindexBestKnownBlock->nChainWork >=
6021  state.m_chain_sync.m_work_header->nChainWork)) {
6022  // Our best block known by this peer is behind our tip, and we're
6023  // either noticing that for the first time, OR this peer was able to
6024  // catch up to some earlier point where we checked against our tip.
6025  // Either way, set a new timeout based on current tip.
6026  state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
6027  state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip();
6028  state.m_chain_sync.m_sent_getheaders = false;
6029  } else if (state.m_chain_sync.m_timeout > 0 &&
6030  time_in_seconds > state.m_chain_sync.m_timeout) {
6031  // No evidence yet that our peer has synced to a chain with work
6032  // equal to that of our tip, when we first detected it was behind.
6033  // Send a single getheaders message to give the peer a chance to
6034  // update us.
6035  if (state.m_chain_sync.m_sent_getheaders) {
6036  // They've run out of time to catch up!
6037  LogPrintf(
6038  "Disconnecting outbound peer %d for old chain, best known "
6039  "block = %s\n",
6040  pto.GetId(),
6041  state.pindexBestKnownBlock != nullptr
6042  ? state.pindexBestKnownBlock->GetBlockHash().ToString()
6043  : "<none>");
6044  pto.fDisconnect = true;
6045  } else {
6046  assert(state.m_chain_sync.m_work_header);
6047  LogPrint(
6048  BCLog::NET,
6049  "sending getheaders to outbound peer=%d to verify chain "
6050  "work (current best known block:%s, benchmark blockhash: "
6051  "%s)\n",
6052  pto.GetId(),
6053  state.pindexBestKnownBlock != nullptr
6054  ? state.pindexBestKnownBlock->GetBlockHash().ToString()
6055  : "<none>",
6056  state.m_chain_sync.m_work_header->GetBlockHash()
6057  .ToString());
6058  m_connman.PushMessage(
6059  &pto,
6060  msgMaker.Make(NetMsgType::GETHEADERS,
6061  m_chainman.ActiveChain().GetLocator(
6062  state.m_chain_sync.m_work_header->pprev),
6063  uint256()));
6064  state.m_chain_sync.m_sent_getheaders = true;
6065  // 2 minutes
6066  constexpr int64_t HEADERS_RESPONSE_TIME = 120;
6067  // Bump the timeout to allow a response, which could clear the
6068  // timeout (if the response shows the peer has synced), reset
6069  // the timeout (if the peer syncs to the required work but not
6070  // to our tip), or result in disconnect (if we advance to the
6071  // timeout and pindexBestKnownBlock has not sufficiently
6072  // progressed)
6073  state.m_chain_sync.m_timeout =
6074  time_in_seconds + HEADERS_RESPONSE_TIME;
6075  }
6076  }
6077  }
6078 }
6079 
6080 void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now) {
6081  // If we have any extra block-relay-only peers, disconnect the youngest
6082  // unless it's given us a block -- in which case, compare with the
6083  // second-youngest, and out of those two, disconnect the peer who least
6084  // recently gave us a block.
6085  // The youngest block-relay-only peer would be the extra peer we connected
6086  // to temporarily in order to sync our tip; see net.cpp.
6087  // Note that we use higher nodeid as a measure for most recent connection.
6088  if (m_connman.GetExtraBlockRelayCount() > 0) {
6089  std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0},
6090  next_youngest_peer{-1, 0};
6091 
6092  m_connman.ForEachNode([&](CNode *pnode) {
6093  if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) {
6094  return;
6095  }
6096  if (pnode->GetId() > youngest_peer.first) {
6097  next_youngest_peer = youngest_peer;
6098  youngest_peer.first = pnode->GetId();
6099  youngest_peer.second = pnode->m_last_block_time;
6100  }
6101  });
6102 
6103  NodeId to_disconnect = youngest_peer.first;
6104  if (youngest_peer.second > next_youngest_peer.second) {
6105  // Our newest block-relay-only peer gave us a block more recently;
6106  // disconnect our second youngest.
6107  to_disconnect = next_youngest_peer.first;
6108  }
6109 
6110  m_connman.ForNode(
6111  to_disconnect,
6112  [&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
6114  // Make sure we're not getting a block right now, and that we've
6115  // been connected long enough for this eviction to happen at
6116  // all. Note that we only request blocks from a peer if we learn
6117  // of a valid headers chain with at least as much work as our
6118  // tip.
6119  CNodeState *node_state = State(pnode->GetId());
6120  if (node_state == nullptr ||
6121  (now - pnode->m_connected >= MINIMUM_CONNECT_TIME &&
6122  node_state->nBlocksInFlight == 0)) {
6123  pnode->fDisconnect = true;
6125  "disconnecting extra block-relay-only peer=%d "
6126  "(last block received at time %d)\n",
6127  pnode->GetId(),
6129  return true;
6130  } else {
6131  LogPrint(
6132  BCLog::NET,
6133  "keeping block-relay-only peer=%d chosen for eviction "
6134  "(connect time: %d, blocks_in_flight: %d)\n",
6135  pnode->GetId(), count_seconds(pnode->m_connected),
6136  node_state->nBlocksInFlight);
6137  }
6138  return false;
6139  });
6140  }
6141 
6142  // Check whether we have too many OUTBOUND_FULL_RELAY peers
6143  if (m_connman.GetExtraFullOutboundCount() <= 0) {
6144  return;
6145  }
6146 
6147  // If we have more OUTBOUND_FULL_RELAY peers than we target, disconnect one.
6148  // Pick the OUTBOUND_FULL_RELAY peer that least recently announced us a new
6149  // block, with ties broken by choosing the more recent connection (higher
6150  // node id)
6151  NodeId worst_peer = -1;
6152  int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
6153 
6154  m_connman.ForEachNode([&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(
6155  ::cs_main) {
6157 
6158  // Only consider OUTBOUND_FULL_RELAY peers that are not already marked
6159  // for disconnection
6160  if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) {
6161  return;
6162  }
6163  CNodeState *state = State(pnode->GetId());
6164  if (state == nullptr) {
6165  // shouldn't be possible, but just in case
6166  return;
6167  }
6168  // Don't evict our protected peers
6169  if (state->m_chain_sync.m_protect) {
6170  return;
6171  }
6172  if (state->m_last_block_announcement < oldest_block_announcement ||
6173  (state->m_last_block_announcement == oldest_block_announcement &&
6174  pnode->GetId() > worst_peer)) {
6175  worst_peer = pnode->GetId();
6176  oldest_block_announcement = state->m_last_block_announcement;
6177  }
6178  });
6179 
6180  if (worst_peer == -1) {
6181  return;
6182  }
6183 
6184  bool disconnected = m_connman.ForNode(
6185  worst_peer, [&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
6187 
6188  // Only disconnect a peer that has been connected to us for some
6189  // reasonable fraction of our check-frequency, to give it time for
6190  // new information to have arrived. Also don't disconnect any peer
6191  // we're trying to download a block from.
6192  CNodeState &state = *State(pnode->GetId());
6193  if (now - pnode->m_connected > MINIMUM_CONNECT_TIME &&
6194  state.nBlocksInFlight == 0) {
6196  "disconnecting extra outbound peer=%d (last block "
6197  "announcement received at time %d)\n",
6198  pnode->GetId(), oldest_block_announcement);
6199  pnode->fDisconnect = true;
6200  return true;
6201  } else {
6203  "keeping outbound peer=%d chosen for eviction "
6204  "(connect time: %d, blocks_in_flight: %d)\n",
6205  pnode->GetId(), count_seconds(pnode->m_connected),
6206  state.nBlocksInFlight);
6207  return false;
6208  }
6209  });
6210 
6211  if (disconnected) {
6212  // If we disconnected an extra peer, that means we successfully
6213  // connected to at least one peer after the last time we detected a
6214  // stale tip. Don't try any more extra peers until we next detect a
6215  // stale tip, to limit the load we put on the network from these extra
6216  // connections.
6217  m_connman.SetTryNewOutboundPeer(false);
6218  }
6219 }
6220 
6221 void PeerManagerImpl::CheckForStaleTipAndEvictPeers() {
6222  LOCK(cs_main);
6223 
6224  int64_t time_in_seconds = GetTime();
6225 
6226  EvictExtraOutboundPeers(std::chrono::seconds{time_in_seconds});
6227 
6228  if (time_in_seconds > m_stale_tip_check_time) {
6229  // Check whether our tip is stale, and if so, allow using an extra
6230  // outbound peer.
6231  if (!fImporting && !fReindex && m_connman.GetNetworkActive() &&
6232  m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) {
6233  LogPrintf("Potential stale tip detected, will try using extra "
6234  "outbound peer (last tip update: %d seconds ago)\n",
6235  time_in_seconds - m_last_tip_update);
6236  m_connman.SetTryNewOutboundPeer(true);
6237  } else if (m_connman.GetTryNewOutboundPeer()) {
6238  m_connman.SetTryNewOutboundPeer(false);
6239  }
6240  m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL;
6241  }
6242 
6243  if (!m_initial_sync_finished &&
6244  CanDirectFetch(m_chainparams.GetConsensus())) {
6245  m_connman.StartExtraBlockRelayPeers();
6246  m_initial_sync_finished = true;
6247  }
6248 }
6249 
6250 void PeerManagerImpl::MaybeSendPing(CNode &node_to, Peer &peer,
6251  std::chrono::microseconds now) {
6252  if (m_connman.ShouldRunInactivityChecks(
6253  node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) &&
6254  peer.m_ping_nonce_sent &&
6255  now > peer.m_ping_start.load() + TIMEOUT_INTERVAL) {
6256  // The ping timeout is using mocktime. To disable the check during
6257  // testing, increase -peertimeout.
6258  LogPrint(BCLog::NET, "ping timeout: %fs peer=%d\n",
6259  0.000001 * count_microseconds(now - peer.m_ping_start.load()),
6260  peer.m_id);
6261  node_to.fDisconnect = true;
6262  return;
6263  }
6264 
6265  const CNetMsgMaker msgMaker(node_to.GetCommonVersion());
6266  bool pingSend = false;
6267 
6268  if (peer.m_ping_queued) {
6269  // RPC ping request by user
6270  pingSend = true;
6271  }
6272 
6273  if (peer.m_ping_nonce_sent == 0 &&
6274  now > peer.m_ping_start.load() + PING_INTERVAL) {
6275  // Ping automatically sent as a latency probe & keepalive.
6276  pingSend = true;
6277  }
6278 
6279  if (pingSend) {
6280  uint64_t nonce = 0;
6281  while (nonce == 0) {
6282  GetRandBytes((uint8_t *)&nonce, sizeof(nonce));
6283  }
6284  peer.m_ping_queued = false;
6285  peer.m_ping_start = now;
6286  if (node_to.GetCommonVersion() > BIP0031_VERSION) {
6287  peer.m_ping_nonce_sent = nonce;
6288  m_connman.PushMessage(&node_to,
6289  msgMaker.Make(NetMsgType::PING, nonce));
6290  } else {
6291  // Peer is too old to support ping command with nonce, pong will
6292  // never arrive.
6293  peer.m_ping_nonce_sent = 0;
6294  m_connman.PushMessage(&node_to, msgMaker.Make(NetMsgType::PING));
6295  }
6296  }
6297 }
6298 
6299 void PeerManagerImpl::MaybeSendAddr(CNode &node, Peer &peer,
6300  std::chrono::microseconds current_time) {
6301  // Nothing to do for non-address-relay peers
6302  if (!peer.m_addr_relay_enabled) {
6303  return;
6304  }
6305 
6306  LOCK(peer.m_addr_send_times_mutex);
6307  if (fListen && !m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
6308  peer.m_next_local_addr_send < current_time) {
6309  // If we've sent before, clear the bloom filter for the peer, so
6310  // that our self-announcement will actually go out. This might
6311  // be unnecessary if the bloom filter has already rolled over
6312  // since our last self-announcement, but there is only a small
6313  // bandwidth cost that we can incur by doing this (which happens
6314  // once a day on average).
6315  if (peer.m_next_local_addr_send != 0us) {
6316  peer.m_addr_known->reset();
6317  }
6318  if (std::optional<CAddress> local_addr = GetLocalAddrForPeer(&node)) {
6319  FastRandomContext insecure_rand;
6320  PushAddress(peer, *local_addr, insecure_rand);
6321  }
6322  peer.m_next_local_addr_send =
6324  }
6325 
6326  // We sent an `addr` message to this peer recently. Nothing more to do.
6327  if (current_time <= peer.m_next_addr_send) {
6328  return;
6329  }
6330 
6331  peer.m_next_addr_send =
6333 
6334  const size_t max_addr_to_send = GetMaxAddrToSend();
6335  if (!Assume(peer.m_addrs_to_send.size() <= max_addr_to_send)) {
6336  // Should be impossible since we always check size before adding to
6337  // m_addrs_to_send. Recover by trimming the vector.
6338  peer.m_addrs_to_send.resize(max_addr_to_send);
6339  }
6340 
6341  // Remove addr records that the peer already knows about, and add new
6342  // addrs to the m_addr_known filter on the same pass.
6343  auto addr_already_known = [&peer](const CAddress &addr) {
6344  bool ret = peer.m_addr_known->contains(addr.GetKey());
6345  if (!ret) {
6346  peer.m_addr_known->insert(addr.GetKey());
6347  }
6348  return ret;
6349  };
6350  peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(),
6351  peer.m_addrs_to_send.end(),
6352  addr_already_known),
6353  peer.m_addrs_to_send.end());
6354 
6355  // No addr messages to send
6356  if (peer.m_addrs_to_send.empty()) {
6357  return;
6358  }
6359 
6360  const char *msg_type;
6361  int make_flags;
6362  if (peer.m_wants_addrv2) {
6363  msg_type = NetMsgType::ADDRV2;
6364  make_flags = ADDRV2_FORMAT;
6365  } else {
6366  msg_type = NetMsgType::ADDR;
6367  make_flags = 0;
6368  }
6369  m_connman.PushMessage(
6370  &node, CNetMsgMaker(node.GetCommonVersion())
6371  .Make(make_flags, msg_type, peer.m_addrs_to_send));
6372  peer.m_addrs_to_send.clear();
6373 
6374  // we only send the big addr message once
6375  if (peer.m_addrs_to_send.capacity() > 40) {
6376  peer.m_addrs_to_send.shrink_to_fit();
6377  }
6378 }
6379 
6380 void PeerManagerImpl::MaybeSendFeefilter(
6381  CNode &pto, std::chrono::microseconds current_time) {
6382  if (m_ignore_incoming_txs) {
6383  return;
6384  }
6385  if (!pto.m_tx_relay) {
6386  return;
6387  }
6388  if (pto.GetCommonVersion() < FEEFILTER_VERSION) {
6389  return;
6390  }
6391  // peers with the forcerelay permission should not filter txs to us
6392  if (pto.HasPermission(PF_FORCERELAY)) {
6393  return;
6394  }
6395 
6396  Amount currentFilter =
6397  m_mempool
6398  .GetMinFee(
6399  gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) *
6400  1000000)
6401  .GetFeePerK();
6402  static FeeFilterRounder g_filter_rounder{
6404 
6405  if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
6406  // Received tx-inv messages are discarded when the active
6407  // chainstate is in IBD, so tell the peer to not send them.
6408  currentFilter = MAX_MONEY;
6409  } else {
6410  static const Amount MAX_FILTER{g_filter_rounder.round(MAX_MONEY)};
6411  if (pto.m_tx_relay->lastSentFeeFilter == MAX_FILTER) {
6412  // Send the current filter if we sent MAX_FILTER previously
6413  // and made it out of IBD.
6414  pto.m_tx_relay->m_next_send_feefilter = 0us;
6415  }
6416  }
6417  if (current_time > pto.m_tx_relay->m_next_send_feefilter) {
6418  Amount filterToSend = g_filter_rounder.round(currentFilter);
6419  // We always have a fee filter of at least minRelayTxFee
6420  filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
6421  if (filterToSend != pto.m_tx_relay->lastSentFeeFilter) {
6422  m_connman.PushMessage(
6423  &pto, CNetMsgMaker(pto.GetCommonVersion())
6424  .Make(NetMsgType::FEEFILTER, filterToSend));
6425  pto.m_tx_relay->lastSentFeeFilter = filterToSend;
6426  }
6427  pto.m_tx_relay->m_next_send_feefilter =
6429  }
6430  // If the fee filter has changed substantially and it's still more than
6431