|
|
|
@ -105,6 +105,10 @@ class TestP2PConn(P2PInterface):
|
|
|
|
|
self.last_message.pop("headers", None)
|
|
|
|
|
self.last_message.pop("cmpctblock", None)
|
|
|
|
|
|
|
|
|
|
def clear_getblocktxn(self):
|
|
|
|
|
with p2p_lock:
|
|
|
|
|
self.last_message.pop("getblocktxn", None)
|
|
|
|
|
|
|
|
|
|
def get_headers(self, locator, hashstop):
|
|
|
|
|
msg = msg_getheaders()
|
|
|
|
|
msg.locator.vHave = locator
|
|
|
|
@ -745,7 +749,7 @@ class CompactBlocksTest(BitcoinTestFramework):
|
|
|
|
|
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
|
|
|
|
|
peer.send_and_ping(msg_sendcmpct(announce=True, version=2))
|
|
|
|
|
|
|
|
|
|
def test_compactblock_reconstruction_multiple_peers(self, stalling_peer, delivery_peer):
|
|
|
|
|
def test_compactblock_reconstruction_stalling_peer(self, stalling_peer, delivery_peer):
|
|
|
|
|
node = self.nodes[0]
|
|
|
|
|
assert len(self.utxos)
|
|
|
|
|
|
|
|
|
@ -823,12 +827,85 @@ class CompactBlocksTest(BitcoinTestFramework):
|
|
|
|
|
hb_test_node.send_and_ping(msg_sendcmpct(announce=False, version=2))
|
|
|
|
|
assert_highbandwidth_states(self.nodes[0], hb_to=True, hb_from=False)
|
|
|
|
|
|
|
|
|
|
def test_compactblock_reconstruction_parallel_reconstruction(self, stalling_peer, delivery_peer, inbound_peer, outbound_peer):
|
|
|
|
|
""" All p2p connections are inbound except outbound_peer. We test that ultimate parallel slot
|
|
|
|
|
can only be taken by an outbound node unless prior attempts were done by an outbound
|
|
|
|
|
"""
|
|
|
|
|
node = self.nodes[0]
|
|
|
|
|
assert len(self.utxos)
|
|
|
|
|
|
|
|
|
|
def announce_cmpct_block(node, peer, txn_count):
|
|
|
|
|
utxo = self.utxos.pop(0)
|
|
|
|
|
block = self.build_block_with_transactions(node, utxo, txn_count)
|
|
|
|
|
|
|
|
|
|
cmpct_block = HeaderAndShortIDs()
|
|
|
|
|
cmpct_block.initialize_from_block(block)
|
|
|
|
|
msg = msg_cmpctblock(cmpct_block.to_p2p())
|
|
|
|
|
peer.send_and_ping(msg)
|
|
|
|
|
with p2p_lock:
|
|
|
|
|
assert "getblocktxn" in peer.last_message
|
|
|
|
|
return block, cmpct_block
|
|
|
|
|
|
|
|
|
|
for name, peer in [("delivery", delivery_peer), ("inbound", inbound_peer), ("outbound", outbound_peer)]:
|
|
|
|
|
self.log.info(f"Setting {name} as high bandwidth peer")
|
|
|
|
|
block, cmpct_block = announce_cmpct_block(node, peer, 1)
|
|
|
|
|
msg = msg_blocktxn()
|
|
|
|
|
msg.block_transactions.blockhash = block.sha256
|
|
|
|
|
msg.block_transactions.transactions = block.vtx[1:]
|
|
|
|
|
peer.send_and_ping(msg)
|
|
|
|
|
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
|
|
|
|
|
peer.clear_getblocktxn()
|
|
|
|
|
|
|
|
|
|
# Test the simple parallel download case...
|
|
|
|
|
for num_missing in [1, 5, 20]:
|
|
|
|
|
|
|
|
|
|
# Remaining low-bandwidth peer is stalling_peer, who announces first
|
|
|
|
|
assert_equal([peer['bip152_hb_to'] for peer in node.getpeerinfo()], [False, True, True, True])
|
|
|
|
|
|
|
|
|
|
block, cmpct_block = announce_cmpct_block(node, stalling_peer, num_missing)
|
|
|
|
|
|
|
|
|
|
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
|
|
|
|
|
with p2p_lock:
|
|
|
|
|
# The second peer to announce should still get a getblocktxn
|
|
|
|
|
assert "getblocktxn" in delivery_peer.last_message
|
|
|
|
|
assert int(node.getbestblockhash(), 16) != block.sha256
|
|
|
|
|
|
|
|
|
|
inbound_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
|
|
|
|
|
with p2p_lock:
|
|
|
|
|
# The third inbound peer to announce should *not* get a getblocktxn
|
|
|
|
|
assert "getblocktxn" not in inbound_peer.last_message
|
|
|
|
|
assert int(node.getbestblockhash(), 16) != block.sha256
|
|
|
|
|
|
|
|
|
|
outbound_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
|
|
|
|
|
with p2p_lock:
|
|
|
|
|
# The third peer to announce should get a getblocktxn if outbound
|
|
|
|
|
assert "getblocktxn" in outbound_peer.last_message
|
|
|
|
|
assert int(node.getbestblockhash(), 16) != block.sha256
|
|
|
|
|
|
|
|
|
|
# Second peer completes the compact block first
|
|
|
|
|
msg = msg_blocktxn()
|
|
|
|
|
msg.block_transactions.blockhash = block.sha256
|
|
|
|
|
msg.block_transactions.transactions = block.vtx[1:]
|
|
|
|
|
delivery_peer.send_and_ping(msg)
|
|
|
|
|
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
|
|
|
|
|
|
|
|
|
|
# Nothing bad should happen if we get a late fill from the first peer...
|
|
|
|
|
stalling_peer.send_and_ping(msg)
|
|
|
|
|
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
|
|
|
|
|
|
|
|
|
|
delivery_peer.clear_getblocktxn()
|
|
|
|
|
inbound_peer.clear_getblocktxn()
|
|
|
|
|
outbound_peer.clear_getblocktxn()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run_test(self):
|
|
|
|
|
self.wallet = MiniWallet(self.nodes[0])
|
|
|
|
|
|
|
|
|
|
# Setup the p2p connections
|
|
|
|
|
self.segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn())
|
|
|
|
|
self.additional_segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn())
|
|
|
|
|
self.onemore_inbound_node = self.nodes[0].add_p2p_connection(TestP2PConn())
|
|
|
|
|
self.outbound_node = self.nodes[0].add_outbound_p2p_connection(TestP2PConn(), p2p_idx=3, connection_type="outbound-full-relay")
|
|
|
|
|
|
|
|
|
|
# We will need UTXOs to construct transactions in later tests.
|
|
|
|
|
self.make_utxos()
|
|
|
|
@ -838,6 +915,8 @@ class CompactBlocksTest(BitcoinTestFramework):
|
|
|
|
|
self.log.info("Testing SENDCMPCT p2p message... ")
|
|
|
|
|
self.test_sendcmpct(self.segwit_node)
|
|
|
|
|
self.test_sendcmpct(self.additional_segwit_node)
|
|
|
|
|
self.test_sendcmpct(self.onemore_inbound_node)
|
|
|
|
|
self.test_sendcmpct(self.outbound_node)
|
|
|
|
|
|
|
|
|
|
self.log.info("Testing compactblock construction...")
|
|
|
|
|
self.test_compactblock_construction(self.segwit_node)
|
|
|
|
@ -860,8 +939,11 @@ class CompactBlocksTest(BitcoinTestFramework):
|
|
|
|
|
self.log.info("Testing handling of incorrect blocktxn responses...")
|
|
|
|
|
self.test_incorrect_blocktxn_response(self.segwit_node)
|
|
|
|
|
|
|
|
|
|
self.log.info("Testing reconstructing compact blocks from all peers...")
|
|
|
|
|
self.test_compactblock_reconstruction_multiple_peers(self.segwit_node, self.additional_segwit_node)
|
|
|
|
|
self.log.info("Testing reconstructing compact blocks with a stalling peer...")
|
|
|
|
|
self.test_compactblock_reconstruction_stalling_peer(self.segwit_node, self.additional_segwit_node)
|
|
|
|
|
|
|
|
|
|
self.log.info("Testing reconstructing compact blocks from multiple peers...")
|
|
|
|
|
self.test_compactblock_reconstruction_parallel_reconstruction(stalling_peer=self.segwit_node, inbound_peer=self.onemore_inbound_node, delivery_peer=self.additional_segwit_node, outbound_peer=self.outbound_node)
|
|
|
|
|
|
|
|
|
|
# Test that if we submitblock to node1, we'll get a compact block
|
|
|
|
|
# announcement to all peers.
|
|
|
|
|