@ -1205,6 +1205,213 @@ inline void static SendBlockTransactions(const CBlock& block, const BlockTransac
connman - > PushMessage ( pfrom , msgMaker . Make ( nSendFlags , NetMsgType : : BLOCKTXN , resp ) ) ;
connman - > PushMessage ( pfrom , msgMaker . Make ( nSendFlags , NetMsgType : : BLOCKTXN , resp ) ) ;
}
}
bool static ProcessHeadersMessage ( CNode * pfrom , CConnman * connman , const std : : vector < CBlockHeader > & headers , const CChainParams & chainparams , bool punish_duplicate_invalid )
{
const CNetMsgMaker msgMaker ( pfrom - > GetSendVersion ( ) ) ;
size_t nCount = headers . size ( ) ;
if ( nCount = = 0 ) {
// Nothing interesting. Stop asking this peers for more headers.
return true ;
}
const CBlockIndex * pindexLast = nullptr ;
{
LOCK ( cs_main ) ;
CNodeState * nodestate = State ( pfrom - > GetId ( ) ) ;
// If this looks like it could be a block announcement (nCount <
// MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
// don't connect:
// - Send a getheaders message in response to try to connect the chain.
// - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
// don't connect before giving DoS points
// - Once a headers message is received that is valid and does connect,
// nUnconnectingHeaders gets reset back to 0.
if ( mapBlockIndex . find ( headers [ 0 ] . hashPrevBlock ) = = mapBlockIndex . end ( ) & & nCount < MAX_BLOCKS_TO_ANNOUNCE ) {
nodestate - > nUnconnectingHeaders + + ;
connman - > PushMessage ( pfrom , msgMaker . Make ( NetMsgType : : GETHEADERS , chainActive . GetLocator ( pindexBestHeader ) , uint256 ( ) ) ) ;
LogPrint ( BCLog : : NET , " received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d) \n " ,
headers [ 0 ] . GetHash ( ) . ToString ( ) ,
headers [ 0 ] . hashPrevBlock . ToString ( ) ,
pindexBestHeader - > nHeight ,
pfrom - > GetId ( ) , nodestate - > nUnconnectingHeaders ) ;
// Set hashLastUnknownBlock for this peer, so that if we
// eventually get the headers - even from a different peer -
// we can use this peer to download.
UpdateBlockAvailability ( pfrom - > GetId ( ) , headers . back ( ) . GetHash ( ) ) ;
if ( nodestate - > nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS = = 0 ) {
Misbehaving ( pfrom - > GetId ( ) , 20 ) ;
}
return true ;
}
uint256 hashLastBlock ;
for ( const CBlockHeader & header : headers ) {
if ( ! hashLastBlock . IsNull ( ) & & header . hashPrevBlock ! = hashLastBlock ) {
Misbehaving ( pfrom - > GetId ( ) , 20 ) ;
return error ( " non-continuous headers sequence " ) ;
}
hashLastBlock = header . GetHash ( ) ;
}
}
CValidationState state ;
CBlockHeader first_invalid_header ;
if ( ! ProcessNewBlockHeaders ( headers , state , chainparams , & pindexLast , & first_invalid_header ) ) {
int nDoS ;
if ( state . IsInvalid ( nDoS ) ) {
if ( nDoS > 0 ) {
LOCK ( cs_main ) ;
Misbehaving ( pfrom - > GetId ( ) , nDoS ) ;
}
if ( punish_duplicate_invalid & & mapBlockIndex . find ( first_invalid_header . GetHash ( ) ) ! = mapBlockIndex . end ( ) ) {
// Goal: don't allow outbound peers to use up our outbound
// connection slots if they are on incompatible chains.
//
// We ask the caller to set punish_invalid appropriately based
// on the peer and the method of header delivery (compact
// blocks are allowed to be invalid in some circumstances,
// under BIP 152).
// Here, we try to detect the narrow situation that we have a
// valid block header (ie it was valid at the time the header
// was received, and hence stored in mapBlockIndex) but know the
// block is invalid, and that a peer has announced that same
// block as being on its active chain.
// Disconnect the peer in such a situation.
//
// Note: if the header that is invalid was not accepted to our
// mapBlockIndex at all, that may also be grounds for
// disconnecting the peer, as the chain they are on is likely
// to be incompatible. However, there is a circumstance where
// that does not hold: if the header's timestamp is more than
// 2 hours ahead of our current time. In that case, the header
// may become valid in the future, and we don't want to
// disconnect a peer merely for serving us one too-far-ahead
// block header, to prevent an attacker from splitting the
// network by mining a block right at the 2 hour boundary.
//
// TODO: update the DoS logic (or, rather, rewrite the
// DoS-interface between validation and net_processing) so that
// the interface is cleaner, and so that we disconnect on all the
// reasons that a peer's headers chain is incompatible
// with ours (eg block->nVersion softforks, MTP violations,
// etc), and not just the duplicate-invalid case.
pfrom - > fDisconnect = true ;
}
return error ( " invalid header received " ) ;
}
}
{
LOCK ( cs_main ) ;
CNodeState * nodestate = State ( pfrom - > GetId ( ) ) ;
if ( nodestate - > nUnconnectingHeaders > 0 ) {
LogPrint ( BCLog : : NET , " peer=%d: resetting nUnconnectingHeaders (%d -> 0) \n " , pfrom - > GetId ( ) , nodestate - > nUnconnectingHeaders ) ;
}
nodestate - > nUnconnectingHeaders = 0 ;
assert ( pindexLast ) ;
UpdateBlockAvailability ( pfrom - > GetId ( ) , pindexLast - > GetBlockHash ( ) ) ;
// From here, pindexBestKnownBlock should be guaranteed to be non-null,
// because it is set in UpdateBlockAvailability. Some nullptr checks
// are still present, however, as belt-and-suspenders.
if ( nCount = = MAX_HEADERS_RESULTS ) {
// Headers message had its maximum size; the peer may have more headers.
// TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
// from there instead.
LogPrint ( BCLog : : NET , " more getheaders (%d) to end to peer=%d (startheight:%d) \n " , pindexLast - > nHeight , pfrom - > GetId ( ) , pfrom - > nStartingHeight ) ;
connman - > PushMessage ( pfrom , msgMaker . Make ( NetMsgType : : GETHEADERS , chainActive . GetLocator ( pindexLast ) , uint256 ( ) ) ) ;
}
bool fCanDirectFetch = CanDirectFetch ( chainparams . GetConsensus ( ) ) ;
// If this set of headers is valid and ends in a block with at least as
// much work as our tip, download as much as possible.
if ( fCanDirectFetch & & pindexLast - > IsValid ( BLOCK_VALID_TREE ) & & chainActive . Tip ( ) - > nChainWork < = pindexLast - > nChainWork ) {
std : : vector < const CBlockIndex * > vToFetch ;
const CBlockIndex * pindexWalk = pindexLast ;
// Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
while ( pindexWalk & & ! chainActive . Contains ( pindexWalk ) & & vToFetch . size ( ) < = MAX_BLOCKS_IN_TRANSIT_PER_PEER ) {
if ( ! ( pindexWalk - > nStatus & BLOCK_HAVE_DATA ) & &
! mapBlocksInFlight . count ( pindexWalk - > GetBlockHash ( ) ) & &
( ! IsWitnessEnabled ( pindexWalk - > pprev , chainparams . GetConsensus ( ) ) | | State ( pfrom - > GetId ( ) ) - > fHaveWitness ) ) {
// We don't have this block, and it's not yet in flight.
vToFetch . push_back ( pindexWalk ) ;
}
pindexWalk = pindexWalk - > pprev ;
}
// If pindexWalk still isn't on our main chain, we're looking at a
// very large reorg at a time we think we're close to caught up to
// the main chain -- this shouldn't really happen. Bail out on the
// direct fetch and rely on parallel download instead.
if ( ! chainActive . Contains ( pindexWalk ) ) {
LogPrint ( BCLog : : NET , " Large reorg, won't direct fetch to %s (%d) \n " ,
pindexLast - > GetBlockHash ( ) . ToString ( ) ,
pindexLast - > nHeight ) ;
} else {
std : : vector < CInv > vGetData ;
// Download as much as possible, from earliest to latest.
for ( const CBlockIndex * pindex : reverse_iterate ( vToFetch ) ) {
if ( nodestate - > nBlocksInFlight > = MAX_BLOCKS_IN_TRANSIT_PER_PEER ) {
// Can't download any more from this peer
break ;
}
uint32_t nFetchFlags = GetFetchFlags ( pfrom ) ;
vGetData . push_back ( CInv ( MSG_BLOCK | nFetchFlags , pindex - > GetBlockHash ( ) ) ) ;
MarkBlockAsInFlight ( pfrom - > GetId ( ) , pindex - > GetBlockHash ( ) , pindex ) ;
LogPrint ( BCLog : : NET , " Requesting block %s from peer=%d \n " ,
pindex - > GetBlockHash ( ) . ToString ( ) , pfrom - > GetId ( ) ) ;
}
if ( vGetData . size ( ) > 1 ) {
LogPrint ( BCLog : : NET , " Downloading blocks toward %s (%d) via headers direct fetch \n " ,
pindexLast - > GetBlockHash ( ) . ToString ( ) , pindexLast - > nHeight ) ;
}
if ( vGetData . size ( ) > 0 ) {
if ( nodestate - > fSupportsDesiredCmpctVersion & & vGetData . size ( ) = = 1 & & mapBlocksInFlight . size ( ) = = 1 & & pindexLast - > pprev - > IsValid ( BLOCK_VALID_CHAIN ) ) {
// In any case, we want to download using a compact block, not a regular one
vGetData [ 0 ] = CInv ( MSG_CMPCT_BLOCK , vGetData [ 0 ] . hash ) ;
}
connman - > PushMessage ( pfrom , msgMaker . Make ( NetMsgType : : GETDATA , vGetData ) ) ;
}
}
}
// If we're in IBD, we want outbound peers that will serve us a useful
// chain. Disconnect peers that are on chains with insufficient work.
if ( IsInitialBlockDownload ( ) & & nCount ! = MAX_HEADERS_RESULTS ) {
// When nCount < MAX_HEADERS_RESULTS, we know we have no more
// headers to fetch from this peer.
if ( nodestate - > pindexBestKnownBlock & & nodestate - > pindexBestKnownBlock - > nChainWork < nMinimumChainWork ) {
// This peer has too little work on their headers chain to help
// us sync -- disconnect if using an outbound slot (unless
// whitelisted or addnode).
// Note: We compare their tip to nMinimumChainWork (rather than
// chainActive.Tip()) because we won't start block download
// until we have a headers chain that has at least
// nMinimumChainWork, even if a peer has a chain past our tip,
// as an anti-DoS measure.
if ( IsOutboundDisconnectionCandidate ( pfrom ) ) {
LogPrintf ( " Disconnecting outbound peer %d -- headers chain has insufficient work \n " , pfrom - > GetId ( ) ) ;
pfrom - > fDisconnect = true ;
}
}
}
if ( ! pfrom - > fDisconnect & & IsOutboundDisconnectionCandidate ( pfrom ) & & nodestate - > pindexBestKnownBlock ! = nullptr ) {
// If this is an outbound peer, check to see if we should protect
// it from the bad/lagging chain logic.
if ( g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT & & nodestate - > pindexBestKnownBlock - > nChainWork > = chainActive . Tip ( ) - > nChainWork & & ! nodestate - > m_chain_sync . m_protect ) {
nodestate - > m_chain_sync . m_protect = true ;
+ + g_outbound_peers_with_protect_from_disconnect ;
}
}
}
return true ;
}
bool static ProcessMessage ( CNode * pfrom , const std : : string & strCommand , CDataStream & vRecv , int64_t nTimeReceived , const CChainParams & chainparams , CConnman * connman , const std : : atomic < bool > & interruptMsgProc )
bool static ProcessMessage ( CNode * pfrom , const std : : string & strCommand , CDataStream & vRecv , int64_t nTimeReceived , const CChainParams & chainparams , CConnman * connman , const std : : atomic < bool > & interruptMsgProc )
{
{
LogPrint ( BCLog : : NET , " received: %s (%u bytes) peer=%d \n " , SanitizeString ( strCommand ) , vRecv . size ( ) , pfrom - > GetId ( ) ) ;
LogPrint ( BCLog : : NET , " received: %s (%u bytes) peer=%d \n " , SanitizeString ( strCommand ) , vRecv . size ( ) , pfrom - > GetId ( ) ) ;
@ -2047,7 +2254,6 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
// If we end up treating this as a plain headers message, call that as well
// If we end up treating this as a plain headers message, call that as well
// without cs_main.
// without cs_main.
bool fRevertToHeaderProcessing = false ;
bool fRevertToHeaderProcessing = false ;
CDataStream vHeadersMsg ( SER_NETWORK , PROTOCOL_VERSION ) ;
// Keep a CBlock for "optimistic" compactblock reconstructions (see
// Keep a CBlock for "optimistic" compactblock reconstructions (see
// below)
// below)
@ -2164,10 +2370,6 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
return true ;
return true ;
} else {
} else {
// If this was an announce-cmpctblock, we want the same treatment as a header message
// If this was an announce-cmpctblock, we want the same treatment as a header message
// Dirty hack to process as if it were just a headers message (TODO: move message handling into their own functions)
std : : vector < CBlock > headers ;
headers . push_back ( cmpctblock . header ) ;
vHeadersMsg < < headers ;
fRevertToHeaderProcessing = true ;
fRevertToHeaderProcessing = true ;
}
}
}
}
@ -2176,8 +2378,14 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if ( fProcessBLOCKTXN )
if ( fProcessBLOCKTXN )
return ProcessMessage ( pfrom , NetMsgType : : BLOCKTXN , blockTxnMsg , nTimeReceived , chainparams , connman , interruptMsgProc ) ;
return ProcessMessage ( pfrom , NetMsgType : : BLOCKTXN , blockTxnMsg , nTimeReceived , chainparams , connman , interruptMsgProc ) ;
if ( fRevertToHeaderProcessing )
if ( fRevertToHeaderProcessing ) {
return ProcessMessage ( pfrom , NetMsgType : : HEADERS , vHeadersMsg , nTimeReceived , chainparams , connman , interruptMsgProc ) ;
// Headers received from HB compact block peers are permitted to be
// relayed before full validation (see BIP 152), so we don't want to disconnect
// the peer if the header turns out to be for an invalid block.
// Note that if a peer tries to build on an invalid chain, that
// will be detected and the peer will be banned.
return ProcessHeadersMessage ( pfrom , connman , { cmpctblock . header } , chainparams , /*punish_duplicate_invalid=*/ false ) ;
}
if ( fBlockReconstructed ) {
if ( fBlockReconstructed ) {
// If we got here, we were able to optimistically reconstruct a
// If we got here, we were able to optimistically reconstruct a
@ -2308,169 +2516,12 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
ReadCompactSize ( vRecv ) ; // ignore tx count; assume it is 0.
ReadCompactSize ( vRecv ) ; // ignore tx count; assume it is 0.
}
}
if ( nCount = = 0 ) {
// Headers received via a HEADERS message should be valid, and reflect
// Nothing interesting. Stop asking this peers for more headers.
// the chain the peer is on. If we receive a known-invalid header,
return true ;
// disconnect the peer if it is using one of our outbound connection
}
// slots.
bool should_punish = ! pfrom - > fInbound & & ! pfrom - > m_manual_connection ;
const CBlockIndex * pindexLast = nullptr ;
return ProcessHeadersMessage ( pfrom , connman , headers , chainparams , should_punish ) ;
{
LOCK ( cs_main ) ;
CNodeState * nodestate = State ( pfrom - > GetId ( ) ) ;
// If this looks like it could be a block announcement (nCount <
// MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
// don't connect:
// - Send a getheaders message in response to try to connect the chain.
// - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
// don't connect before giving DoS points
// - Once a headers message is received that is valid and does connect,
// nUnconnectingHeaders gets reset back to 0.
if ( mapBlockIndex . find ( headers [ 0 ] . hashPrevBlock ) = = mapBlockIndex . end ( ) & & nCount < MAX_BLOCKS_TO_ANNOUNCE ) {
nodestate - > nUnconnectingHeaders + + ;
connman - > PushMessage ( pfrom , msgMaker . Make ( NetMsgType : : GETHEADERS , chainActive . GetLocator ( pindexBestHeader ) , uint256 ( ) ) ) ;
LogPrint ( BCLog : : NET , " received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d) \n " ,
headers [ 0 ] . GetHash ( ) . ToString ( ) ,
headers [ 0 ] . hashPrevBlock . ToString ( ) ,
pindexBestHeader - > nHeight ,
pfrom - > GetId ( ) , nodestate - > nUnconnectingHeaders ) ;
// Set hashLastUnknownBlock for this peer, so that if we
// eventually get the headers - even from a different peer -
// we can use this peer to download.
UpdateBlockAvailability ( pfrom - > GetId ( ) , headers . back ( ) . GetHash ( ) ) ;
if ( nodestate - > nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS = = 0 ) {
Misbehaving ( pfrom - > GetId ( ) , 20 ) ;
}
return true ;
}
uint256 hashLastBlock ;
for ( const CBlockHeader & header : headers ) {
if ( ! hashLastBlock . IsNull ( ) & & header . hashPrevBlock ! = hashLastBlock ) {
Misbehaving ( pfrom - > GetId ( ) , 20 ) ;
return error ( " non-continuous headers sequence " ) ;
}
hashLastBlock = header . GetHash ( ) ;
}
}
CValidationState state ;
if ( ! ProcessNewBlockHeaders ( headers , state , chainparams , & pindexLast ) ) {
int nDoS ;
if ( state . IsInvalid ( nDoS ) ) {
if ( nDoS > 0 ) {
LOCK ( cs_main ) ;
Misbehaving ( pfrom - > GetId ( ) , nDoS ) ;
}
return error ( " invalid header received " ) ;
}
}
{
LOCK ( cs_main ) ;
CNodeState * nodestate = State ( pfrom - > GetId ( ) ) ;
if ( nodestate - > nUnconnectingHeaders > 0 ) {
LogPrint ( BCLog : : NET , " peer=%d: resetting nUnconnectingHeaders (%d -> 0) \n " , pfrom - > GetId ( ) , nodestate - > nUnconnectingHeaders ) ;
}
nodestate - > nUnconnectingHeaders = 0 ;
assert ( pindexLast ) ;
UpdateBlockAvailability ( pfrom - > GetId ( ) , pindexLast - > GetBlockHash ( ) ) ;
// From here, pindexBestKnownBlock should be guaranteed to be non-null,
// because it is set in UpdateBlockAvailability. Some nullptr checks
// are still present, however, as belt-and-suspenders.
if ( nCount = = MAX_HEADERS_RESULTS ) {
// Headers message had its maximum size; the peer may have more headers.
// TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
// from there instead.
LogPrint ( BCLog : : NET , " more getheaders (%d) to end to peer=%d (startheight:%d) \n " , pindexLast - > nHeight , pfrom - > GetId ( ) , pfrom - > nStartingHeight ) ;
connman - > PushMessage ( pfrom , msgMaker . Make ( NetMsgType : : GETHEADERS , chainActive . GetLocator ( pindexLast ) , uint256 ( ) ) ) ;
}
bool fCanDirectFetch = CanDirectFetch ( chainparams . GetConsensus ( ) ) ;
// If this set of headers is valid and ends in a block with at least as
// much work as our tip, download as much as possible.
if ( fCanDirectFetch & & pindexLast - > IsValid ( BLOCK_VALID_TREE ) & & chainActive . Tip ( ) - > nChainWork < = pindexLast - > nChainWork ) {
std : : vector < const CBlockIndex * > vToFetch ;
const CBlockIndex * pindexWalk = pindexLast ;
// Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
while ( pindexWalk & & ! chainActive . Contains ( pindexWalk ) & & vToFetch . size ( ) < = MAX_BLOCKS_IN_TRANSIT_PER_PEER ) {
if ( ! ( pindexWalk - > nStatus & BLOCK_HAVE_DATA ) & &
! mapBlocksInFlight . count ( pindexWalk - > GetBlockHash ( ) ) & &
( ! IsWitnessEnabled ( pindexWalk - > pprev , chainparams . GetConsensus ( ) ) | | State ( pfrom - > GetId ( ) ) - > fHaveWitness ) ) {
// We don't have this block, and it's not yet in flight.
vToFetch . push_back ( pindexWalk ) ;
}
pindexWalk = pindexWalk - > pprev ;
}
// If pindexWalk still isn't on our main chain, we're looking at a
// very large reorg at a time we think we're close to caught up to
// the main chain -- this shouldn't really happen. Bail out on the
// direct fetch and rely on parallel download instead.
if ( ! chainActive . Contains ( pindexWalk ) ) {
LogPrint ( BCLog : : NET , " Large reorg, won't direct fetch to %s (%d) \n " ,
pindexLast - > GetBlockHash ( ) . ToString ( ) ,
pindexLast - > nHeight ) ;
} else {
std : : vector < CInv > vGetData ;
// Download as much as possible, from earliest to latest.
for ( const CBlockIndex * pindex : reverse_iterate ( vToFetch ) ) {
if ( nodestate - > nBlocksInFlight > = MAX_BLOCKS_IN_TRANSIT_PER_PEER ) {
// Can't download any more from this peer
break ;
}
uint32_t nFetchFlags = GetFetchFlags ( pfrom ) ;
vGetData . push_back ( CInv ( MSG_BLOCK | nFetchFlags , pindex - > GetBlockHash ( ) ) ) ;
MarkBlockAsInFlight ( pfrom - > GetId ( ) , pindex - > GetBlockHash ( ) , pindex ) ;
LogPrint ( BCLog : : NET , " Requesting block %s from peer=%d \n " ,
pindex - > GetBlockHash ( ) . ToString ( ) , pfrom - > GetId ( ) ) ;
}
if ( vGetData . size ( ) > 1 ) {
LogPrint ( BCLog : : NET , " Downloading blocks toward %s (%d) via headers direct fetch \n " ,
pindexLast - > GetBlockHash ( ) . ToString ( ) , pindexLast - > nHeight ) ;
}
if ( vGetData . size ( ) > 0 ) {
if ( nodestate - > fSupportsDesiredCmpctVersion & & vGetData . size ( ) = = 1 & & mapBlocksInFlight . size ( ) = = 1 & & pindexLast - > pprev - > IsValid ( BLOCK_VALID_CHAIN ) ) {
// In any case, we want to download using a compact block, not a regular one
vGetData [ 0 ] = CInv ( MSG_CMPCT_BLOCK , vGetData [ 0 ] . hash ) ;
}
connman - > PushMessage ( pfrom , msgMaker . Make ( NetMsgType : : GETDATA , vGetData ) ) ;
}
}
}
// If we're in IBD, we want outbound peers that will serve us a useful
// chain. Disconnect peers that are on chains with insufficient work.
if ( IsInitialBlockDownload ( ) & & nCount ! = MAX_HEADERS_RESULTS ) {
// When nCount < MAX_HEADERS_RESULTS, we know we have no more
// headers to fetch from this peer.
if ( nodestate - > pindexBestKnownBlock & & nodestate - > pindexBestKnownBlock - > nChainWork < nMinimumChainWork ) {
// This peer has too little work on their headers chain to help
// us sync -- disconnect if using an outbound slot (unless
// whitelisted or addnode).
// Note: We compare their tip to nMinimumChainWork (rather than
// chainActive.Tip()) because we won't start block download
// until we have a headers chain that has at least
// nMinimumChainWork, even if a peer has a chain past our tip,
// as an anti-DoS measure.
if ( IsOutboundDisconnectionCandidate ( pfrom ) ) {
LogPrintf ( " Disconnecting outbound peer %d -- headers chain has insufficient work \n " , pfrom - > GetId ( ) ) ;
pfrom - > fDisconnect = true ;
}
}
}
if ( ! pfrom - > fDisconnect & & IsOutboundDisconnectionCandidate ( pfrom ) & & nodestate - > pindexBestKnownBlock ! = nullptr ) {
// If this is an outbound peer, check to see if we should protect
// it from the bad/lagging chain logic.
if ( g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT & & nodestate - > pindexBestKnownBlock - > nChainWork > = chainActive . Tip ( ) - > nChainWork & & ! nodestate - > m_chain_sync . m_protect ) {
nodestate - > m_chain_sync . m_protect = true ;
+ + g_outbound_peers_with_protect_from_disconnect ;
}
}
}
}
}
else if ( strCommand = = NetMsgType : : BLOCK & & ! fImporting & & ! fReindex ) // Ignore blocks received while importing
else if ( strCommand = = NetMsgType : : BLOCK & & ! fImporting & & ! fReindex ) // Ignore blocks received while importing