|
|
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
|
|
|
|
// Copyright (c) 2009-2020 The Bitcoin Core developers
|
|
|
|
// Distributed under the MIT software license, see the accompanying
|
|
|
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
|
|
|
|
|
|
#ifndef BITCOIN_NET_PROCESSING_H
|
|
|
|
#define BITCOIN_NET_PROCESSING_H
|
|
|
|
|
|
|
|
#include <consensus/params.h>
|
|
|
|
#include <net.h>
|
|
|
|
#include <sync.h>
|
Change transaction request logic to use txrequest
This removes most transaction request logic from net_processing, and
replaces it with calls to a global TxRequestTracker object.
The major changes are:
* Announcements from outbound (and whitelisted) peers are now always
preferred over those from inbound peers. This used to be the case for the
first request (by delaying the first request from inbound peers), and
a bias afters. The 2s delay for requests from inbound peers still exists,
but after that, if viable outbound peers remain for any given transaction,
they will always be tried first.
* No more hard cap of 100 in flight transactions per peer, as there is less
need for it (memory usage is linear in the number of announcements, but
independent from the number in flight, and CPU usage isn't affected by it).
Furthermore, if only one peer announces a transaction, and it has over 100
in flight and requestable already, we still want to request it from them.
The cap is replaced with an additional 2s delay (possibly combined with the
existing 2s delays for inbound connections, and for txid peers when wtxid
peers are available).
Includes functional tests written by Marco Falke and Antoine Riard.
4 years ago
|
|
|
#include <txrequest.h>
|
|
|
|
#include <validationinterface.h>
|
|
|
|
|
|
|
|
class BlockTransactionsRequest;
|
|
|
|
class BlockValidationState;
|
|
|
|
class CBlockHeader;
|
|
|
|
class CChainParams;
|
|
|
|
class CTxMemPool;
|
|
|
|
class ChainstateManager;
|
|
|
|
class TxValidationState;
|
|
|
|
|
|
|
|
extern RecursiveMutex cs_main;
|
|
|
|
extern RecursiveMutex g_cs_orphans;
|
|
|
|
|
|
|
|
/** Default for -maxorphantx, maximum number of orphan transactions kept in memory */
|
|
|
|
static const unsigned int DEFAULT_MAX_ORPHAN_TRANSACTIONS = 100;
|
|
|
|
/** Default number of orphan+recently-replaced txn to keep around for block reconstruction */
|
|
|
|
static const unsigned int DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN = 100;
|
|
|
|
static const bool DEFAULT_PEERBLOOMFILTERS = false;
|
|
|
|
static const bool DEFAULT_PEERBLOCKFILTERS = false;
|
|
|
|
/** Threshold for marking a node to be discouraged, e.g. disconnected and added to the discouragement filter. */
|
|
|
|
static const int DISCOURAGEMENT_THRESHOLD{100};
|
|
|
|
|
|
|
|
struct CNodeStateStats {
|
|
|
|
int m_misbehavior_score = 0;
|
|
|
|
int nSyncHeight = -1;
|
|
|
|
int nCommonHeight = -1;
|
|
|
|
std::vector<int> vHeightInFlight;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Data structure for an individual peer. This struct is not protected by
|
|
|
|
* cs_main since it does not contain validation-critical data.
|
|
|
|
*
|
|
|
|
* Memory is owned by shared pointers and this object is destructed when
|
|
|
|
* the refcount drops to zero.
|
|
|
|
*
|
|
|
|
* Mutexes inside this struct must not be held when locking m_peer_mutex.
|
|
|
|
*
|
|
|
|
* TODO: move most members from CNodeState to this structure.
|
|
|
|
* TODO: move remaining application-layer data members from CNode to this structure.
|
|
|
|
*/
|
|
|
|
struct Peer {
|
|
|
|
/** Same id as the CNode object for this peer */
|
|
|
|
const NodeId m_id{0};
|
|
|
|
|
|
|
|
/** Protects misbehavior data members */
|
|
|
|
Mutex m_misbehavior_mutex;
|
|
|
|
/** Accumulated misbehavior score for this peer */
|
|
|
|
int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0};
|
|
|
|
/** Whether this peer should be disconnected and marked as discouraged (unless it has the noban permission). */
|
|
|
|
bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
|
|
|
|
|
|
|
|
/** Set of txids to reconsider once their parent transactions have been accepted **/
|
|
|
|
std::set<uint256> m_orphan_work_set GUARDED_BY(g_cs_orphans);
|
|
|
|
|
|
|
|
/** Protects m_getdata_requests **/
|
|
|
|
Mutex m_getdata_requests_mutex;
|
|
|
|
/** Work queue of items requested by this peer **/
|
|
|
|
std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
|
|
|
|
|
|
|
|
explicit Peer(NodeId id) : m_id(id) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
using PeerRef = std::shared_ptr<Peer>;
|
|
|
|
|
|
|
|
class PeerManager final : public CValidationInterface, public NetEventsInterface {
|
|
|
|
public:
|
|
|
|
PeerManager(const CChainParams& chainparams, CConnman& connman, BanMan* banman,
|
|
|
|
CScheduler& scheduler, ChainstateManager& chainman, CTxMemPool& pool,
|
|
|
|
bool ignore_incoming_txs);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Overridden from CValidationInterface.
|
|
|
|
*/
|
|
|
|
void BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override;
|
|
|
|
void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override;
|
|
|
|
/**
|
|
|
|
* Overridden from CValidationInterface.
|
|
|
|
*/
|
|
|
|
void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override;
|
|
|
|
/**
|
|
|
|
* Overridden from CValidationInterface.
|
|
|
|
*/
|
|
|
|
void BlockChecked(const CBlock& block, const BlockValidationState& state) override;
|
|
|
|
/**
|
|
|
|
* Overridden from CValidationInterface.
|
|
|
|
*/
|
|
|
|
void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) override;
|
|
|
|
|
|
|
|
/** Initialize a peer by adding it to mapNodeState and pushing a message requesting its version */
|
|
|
|
void InitializeNode(CNode* pnode) override;
|
|
|
|
/** Handle removal of a peer by updating various state and removing it from mapNodeState */
|
|
|
|
void FinalizeNode(const CNode& node, bool& fUpdateConnectionTime) override;
|
|
|
|
/**
|
|
|
|
* Process protocol messages received from a given node
|
|
|
|
*
|
|
|
|
* @param[in] pfrom The node which we have received messages from.
|
|
|
|
* @param[in] interrupt Interrupt condition for processing threads
|
|
|
|
*/
|
|
|
|
bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override;
|
|
|
|
/**
|
|
|
|
* Send queued protocol messages to be sent to a give node.
|
|
|
|
*
|
|
|
|
* @param[in] pto The node which we are sending messages to.
|
|
|
|
* @return True if there is more work to be done
|
|
|
|
*/
|
|
|
|
bool SendMessages(CNode* pto) override EXCLUSIVE_LOCKS_REQUIRED(pto->cs_sendProcessing);
|
Permit disconnection of outbound peers on bad/slow chains
Currently we have no rotation of outbound peers. If an outbound peer
stops serving us blocks, or is on a consensus-incompatible chain with
less work than our tip (but otherwise valid headers), then we will never
disconnect that peer, even though that peer is using one of our 8
outbound connection slots. Because we rely on our outbound peers to
find an honest node in order to reach consensus, allowing an
incompatible peer to occupy one of those slots is undesirable,
particularly if it is possible for all such slots to be occupied by such
peers.
Protect against this by always checking to see if a peer's best known
block has less work than our tip, and if so, set a 20 minute timeout --
if the peer is still not known to have caught up to a chain with as much
work as ours after 20 minutes, then send a single getheaders message,
wait 2 more minutes, and if a better header hasn't been received by then,
disconnect that peer.
Note:
- we do not require that our peer sync to the same tip as ours, just an
equal or greater work tip. (Doing otherwise would risk partitioning the
network in the event of a chain split, and is also unnecessary.)
- we pick 4 of our outbound peers and do not subject them to this logic,
to be more conservative. We don't wish to permit temporary network
issues (or an attacker) to excessively disrupt network topology.
7 years ago
|
|
|
|
|
|
|
/** Consider evicting an outbound peer based on the amount of time they've been behind our tip */
|
|
|
|
void ConsiderEviction(CNode& pto, int64_t time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
|
|
|
/** Evict extra outbound peers. If we think our tip may be stale, connect to an extra outbound */
|
|
|
|
void CheckForStaleTipAndEvictPeers();
|
|
|
|
/** If we have extra outbound peers, try to disconnect the one with the oldest block announcement */
|
|
|
|
void EvictExtraOutboundPeers(int64_t time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
|
|
|
/** Retrieve unbroadcast transactions from the mempool and reattempt sending to peers */
|
|
|
|
void ReattemptInitialBroadcast(CScheduler& scheduler) const;
|
|
|
|
|
|
|
|
/** Process a single message from a peer. Public for fuzz testing */
|
|
|
|
void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv,
|
|
|
|
const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Increment peer's misbehavior score. If the new value >= DISCOURAGEMENT_THRESHOLD, mark the node
|
|
|
|
* to be discouraged, meaning the peer might be disconnected and added to the discouragement filter.
|
|
|
|
* Public for unit testing.
|
|
|
|
*/
|
|
|
|
void Misbehaving(const NodeId pnode, const int howmuch, const std::string& message);
|
|
|
|
|
|
|
|
/** Get statistics from node state */
|
|
|
|
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats);
|
|
|
|
|
|
|
|
/** Whether this node ignores txs received over p2p. */
|
|
|
|
bool IgnoresIncomingTxs() {return m_ignore_incoming_txs;};
|
|
|
|
|
|
|
|
private:
|
|
|
|
/** Get a shared pointer to the Peer object.
|
|
|
|
* May return an empty shared_ptr if the Peer object can't be found. */
|
|
|
|
PeerRef GetPeerRef(NodeId id) const;
|
|
|
|
|
|
|
|
/** Get a shared pointer to the Peer object and remove it from m_peer_map.
|
|
|
|
* May return an empty shared_ptr if the Peer object can't be found. */
|
|
|
|
PeerRef RemovePeer(NodeId id);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Potentially mark a node discouraged based on the contents of a BlockValidationState object
|
|
|
|
*
|
|
|
|
* @param[in] via_compact_block this bool is passed in because net_processing should
|
|
|
|
* punish peers differently depending on whether the data was provided in a compact
|
|
|
|
* block message or not. If the compact block had a valid header, but contained invalid
|
|
|
|
* txs, the peer should not be punished. See BIP 152.
|
|
|
|
*
|
|
|
|
* @return Returns true if the peer was punished (probably disconnected)
|
|
|
|
*/
|
|
|
|
bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
|
|
|
|
bool via_compact_block, const std::string& message = "");
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Potentially disconnect and discourage a node based on the contents of a TxValidationState object
|
|
|
|
*
|
|
|
|
* @return Returns true if the peer was punished (probably disconnected)
|
|
|
|
*/
|
|
|
|
bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, const std::string& message = "");
|
|
|
|
|
|
|
|
/** Maybe disconnect a peer and discourage future connections from its address.
|
|
|
|
*
|
|
|
|
* @param[in] pnode The node to check.
|
|
|
|
* @return True if the peer was marked for disconnection in this function
|
|
|
|
*/
|
|
|
|
bool MaybeDiscourageAndDisconnect(CNode& pnode);
|
|
|
|
|
|
|
|
void ProcessOrphanTx(std::set<uint256>& orphan_work_set) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans);
|
|
|
|
/** Process a single headers message from a peer. */
|
|
|
|
void ProcessHeadersMessage(CNode& pfrom, const std::vector<CBlockHeader>& headers, bool via_compact_block);
|
|
|
|
|
|
|
|
void SendBlockTransactions(CNode& pfrom, const CBlock& block, const BlockTransactionsRequest& req);
|
|
|
|
|
Change transaction request logic to use txrequest
This removes most transaction request logic from net_processing, and
replaces it with calls to a global TxRequestTracker object.
The major changes are:
* Announcements from outbound (and whitelisted) peers are now always
preferred over those from inbound peers. This used to be the case for the
first request (by delaying the first request from inbound peers), and
a bias afters. The 2s delay for requests from inbound peers still exists,
but after that, if viable outbound peers remain for any given transaction,
they will always be tried first.
* No more hard cap of 100 in flight transactions per peer, as there is less
need for it (memory usage is linear in the number of announcements, but
independent from the number in flight, and CPU usage isn't affected by it).
Furthermore, if only one peer announces a transaction, and it has over 100
in flight and requestable already, we still want to request it from them.
The cap is replaced with an additional 2s delay (possibly combined with the
existing 2s delays for inbound connections, and for txid peers when wtxid
peers are available).
Includes functional tests written by Marco Falke and Antoine Riard.
4 years ago
|
|
|
/** Register with TxRequestTracker that an INV has been received from a
|
|
|
|
* peer. The announcement parameters are decided in PeerManager and then
|
|
|
|
* passed to TxRequestTracker. */
|
|
|
|
void AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time)
|
|
|
|
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
|
|
|
|
|
|
|
|
/** Send a version message to a peer */
|
|
|
|
void PushNodeVersion(CNode& pnode, int64_t nTime);
|
|
|
|
|
|
|
|
const CChainParams& m_chainparams;
|
|
|
|
CConnman& m_connman;
|
|
|
|
/** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */
|
|
|
|
BanMan* const m_banman;
|
|
|
|
ChainstateManager& m_chainman;
|
|
|
|
CTxMemPool& m_mempool;
|
Change transaction request logic to use txrequest
This removes most transaction request logic from net_processing, and
replaces it with calls to a global TxRequestTracker object.
The major changes are:
* Announcements from outbound (and whitelisted) peers are now always
preferred over those from inbound peers. This used to be the case for the
first request (by delaying the first request from inbound peers), and
a bias afters. The 2s delay for requests from inbound peers still exists,
but after that, if viable outbound peers remain for any given transaction,
they will always be tried first.
* No more hard cap of 100 in flight transactions per peer, as there is less
need for it (memory usage is linear in the number of announcements, but
independent from the number in flight, and CPU usage isn't affected by it).
Furthermore, if only one peer announces a transaction, and it has over 100
in flight and requestable already, we still want to request it from them.
The cap is replaced with an additional 2s delay (possibly combined with the
existing 2s delays for inbound connections, and for txid peers when wtxid
peers are available).
Includes functional tests written by Marco Falke and Antoine Riard.
4 years ago
|
|
|
TxRequestTracker m_txrequest GUARDED_BY(::cs_main);
|
|
|
|
|
|
|
|
int64_t m_stale_tip_check_time; //!< Next time to check for stale tip
|
|
|
|
|
|
|
|
//* Whether this node is running in blocks only mode */
|
|
|
|
const bool m_ignore_incoming_txs;
|
|
|
|
|
|
|
|
/** Whether we've completed initial sync yet, for determining when to turn
|
|
|
|
* on extra block-relay-only peers. */
|
|
|
|
bool m_initial_sync_finished{false};
|
|
|
|
|
|
|
|
/** Protects m_peer_map. This mutex must not be locked while holding a lock
|
|
|
|
* on any of the mutexes inside a Peer object. */
|
|
|
|
mutable Mutex m_peer_mutex;
|
|
|
|
/**
|
|
|
|
* Map of all Peer objects, keyed by peer id. This map is protected
|
|
|
|
* by the m_peer_mutex. Once a shared pointer reference is
|
|
|
|
* taken, the lock may be released. Individual fields are protected by
|
|
|
|
* their own locks.
|
|
|
|
*/
|
|
|
|
std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
|
|
|
|
};
|
|
|
|
|
|
|
|
/** Relay transaction to every node */
|
|
|
|
void RelayTransaction(const uint256& txid, const uint256& wtxid, const CConnman& connman) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
|
|
|
|
|
|
|
|
#endif // BITCOIN_NET_PROCESSING_H
|