From 9f66ac7cf1931c4d7c36abbb000b7de306d83a4c Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Tue, 12 Mar 2024 13:18:15 -0400 Subject: [PATCH] net_processing: do not treat non-connecting headers as response Since https://github.com/bitcoin/bitcoin/pull/25454 we keep track of the last GETHEADERS request that was sent and wasn't responded to. So far, every incoming HEADERS message is treated as a response to whatever GETHEADERS was last sent, regardless of its contents. This commit makes this tracking more accurate, by only treating HEADERS messages which (1) are empty, (2) connect to our existing block header tree, or (3) are a continuation of a low-work headers sync as responses that clear the "outstanding GETHEADERS" state (m_last_getheaders_timestamp). That means that HEADERS messages which do not satisfy any of the above criteria will be ignored, not triggering a GETHEADERS, and potentially (for now, but see later commit) increase misbehavior score. --- src/net_processing.cpp | 32 +++++++++++++++--------------- test/functional/p2p_sendheaders.py | 9 ++++++++- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 6374cb52c18..d78fbffc14d 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -2768,25 +2768,21 @@ bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfro { if (peer.m_headers_sync) { auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == MAX_HEADERS_RESULTS); + // If it is a valid continuation, we should treat the existing getheaders request as responded to. + if (result.success) peer.m_last_getheaders_timestamp = {}; if (result.request_more) { auto locator = peer.m_headers_sync->NextHeadersRequestLocator(); // If we were instructed to ask for a locator, it should not be empty. Assume(!locator.vHave.empty()); + // We can only be instructed to request more if processing was successful. + Assume(result.success); if (!locator.vHave.empty()) { // It should be impossible for the getheaders request to fail, - // because we should have cleared the last getheaders timestamp - // when processing the headers that triggered this call. But - // it may be possible to bypass this via compactblock - // processing, so check the result before logging just to be - // safe. + // because we just cleared the last getheaders timestamp. bool sent_getheaders = MaybeSendGetHeaders(pfrom, locator, peer); - if (sent_getheaders) { - LogPrint(BCLog::NET, "more getheaders (from %s) to peer=%d\n", - locator.vHave.front().ToString(), pfrom.GetId()); - } else { - LogPrint(BCLog::NET, "error sending next getheaders (from %s) to continue sync with peer=%d\n", - locator.vHave.front().ToString(), pfrom.GetId()); - } + Assume(sent_getheaders); + LogPrint(BCLog::NET, "more getheaders (from %s) to peer=%d\n", + locator.vHave.front().ToString(), pfrom.GetId()); } } @@ -3065,6 +3061,9 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer, LOCK(m_headers_presync_mutex); m_headers_presync_stats.erase(pfrom.GetId()); } + // A headers message with no headers cannot be an announcement, so assume + // it is a response to our last getheaders request, if there is one. + peer.m_last_getheaders_timestamp = {}; return; } @@ -3129,6 +3128,11 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer, return; } + // If headers connect, assume that this is in response to any outstanding getheaders + // request we may have sent, and clear out the time of our last request. Non-connecting + // headers cannot be a response to a getheaders request. + peer.m_last_getheaders_timestamp = {}; + // If the headers we received are already in memory and an ancestor of // m_best_header or our tip, skip anti-DoS checks. These headers will not // use any more memory (and we are not leaking information that could be @@ -4984,10 +4988,6 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } - // Assume that this is in response to any outstanding getheaders - // request we may have sent, and clear out the time of our last request - peer->m_last_getheaders_timestamp = {}; - std::vector headers; // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks. diff --git a/test/functional/p2p_sendheaders.py b/test/functional/p2p_sendheaders.py index 27a3aa8fb9d..755e96d4a3d 100755 --- a/test/functional/p2p_sendheaders.py +++ b/test/functional/p2p_sendheaders.py @@ -559,7 +559,13 @@ class SendHeadersTest(BitcoinTestFramework): height += 1 for i in range(1, MAX_NUM_UNCONNECTING_HEADERS_MSGS): - # Send a header that doesn't connect, check that we get a getheaders. + with p2p_lock: + test_node.last_message.pop("getheaders", None) + # Send an empty header as a failed response to the received getheaders + # (from the previous iteration). Otherwise, the new headers will be + # treated as a response instead of as an announcement. + test_node.send_header_for_blocks([]) + # Send the actual unconnecting header, which should trigger a new getheaders. test_node.send_header_for_blocks([blocks[i]]) test_node.wait_for_getheaders(block_hash=expected_hash) @@ -574,6 +580,7 @@ class SendHeadersTest(BitcoinTestFramework): # before we get disconnected. Should be 5*MAX_NUM_UNCONNECTING_HEADERS_MSGS for i in range(5 * MAX_NUM_UNCONNECTING_HEADERS_MSGS - 1): # Send a header that doesn't connect, check that we get a getheaders. + test_node.send_header_for_blocks([]) test_node.send_header_for_blocks([blocks[i % len(blocks)]]) test_node.wait_for_getheaders(block_hash=expected_hash)