Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 550096e

Browse files
committedFeb 2, 2023
test: Functional test for opportunistic encryption
1 parent bab691d commit 550096e

File tree

3 files changed

+123
-10
lines changed

3 files changed

+123
-10
lines changed
 

‎test/functional/p2p_v2.py

+91
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
#!/usr/bin/env python3
2+
# Copyright (c) 2022 The Bitcoin Core developers
3+
# Distributed under the MIT software license, see the accompanying
4+
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
5+
"""
6+
Test BIP324 v2 transport
7+
"""
8+
9+
from test_framework.test_framework import BitcoinTestFramework
10+
from test_framework.util import assert_equal
11+
12+
class P2PV2Test(BitcoinTestFramework):
13+
def set_test_params(self):
14+
self.setup_clean_chain = True
15+
self.num_nodes = 5
16+
self.extra_args = [["-v2transport=1"], ["-v2transport=1"], ["-v2transport=0"], ["-v2transport=0"], ["-v2transport=0"]]
17+
18+
def run_test(self):
19+
sending_ellswift = "sending 64 byte v2 p2p ellswift key to peer"
20+
downgrading_to_v1 = "downgrading to v1 transport protocol for peer"
21+
self.disconnect_nodes(0, 1)
22+
self.disconnect_nodes(1, 2)
23+
self.disconnect_nodes(2, 3)
24+
self.disconnect_nodes(3, 4)
25+
26+
# V2 nodes can sync with V2 nodes
27+
assert_equal(self.nodes[0].getblockcount(), 0)
28+
assert_equal(self.nodes[1].getblockcount(), 0)
29+
self.nodes[0].generatetoaddress(5, "bcrt1q0yq2azut8gn2xu3y2g0xucf8pny6w8uxmyf220", invalid_call=False)
30+
assert_equal(self.nodes[0].getblockcount(), 5)
31+
assert_equal(self.nodes[1].getblockcount(), 0)
32+
with self.nodes[0].assert_debug_log(expected_msgs=[sending_ellswift],
33+
unexpected_msgs=[downgrading_to_v1]):
34+
self.connect_nodes(0, 1, True)
35+
# sync_all() verifies that the block tips match
36+
self.sync_all(self.nodes[0:2])
37+
assert_equal(self.nodes[1].getblockcount(), 5)
38+
39+
# V1 nodes can sync with each other
40+
assert_equal(self.nodes[2].getblockcount(), 0)
41+
assert_equal(self.nodes[3].getblockcount(), 0)
42+
self.nodes[2].generatetoaddress(8, "bcrt1qyr5lnc2g8aa3qa9c4th9d46n5uu4y0m9nvq2cv", invalid_call=False)
43+
assert_equal(self.nodes[2].getblockcount(), 8)
44+
assert_equal(self.nodes[3].getblockcount(), 0)
45+
with self.nodes[2].assert_debug_log(expected_msgs=[],
46+
unexpected_msgs=[sending_ellswift, downgrading_to_v1]):
47+
self.connect_nodes(2, 3, False)
48+
self.sync_all(self.nodes[2:4])
49+
assert_equal(self.nodes[3].getblockcount(), 8)
50+
assert self.nodes[0].getbestblockhash() != self.nodes[2].getbestblockhash()
51+
52+
# V1 nodes can sync with V2 nodes
53+
self.disconnect_nodes(0, 1)
54+
self.disconnect_nodes(2, 3)
55+
with self.nodes[2].assert_debug_log(expected_msgs=[],
56+
unexpected_msgs=[sending_ellswift, downgrading_to_v1]):
57+
self.connect_nodes(2, 1, True)
58+
self.sync_all(self.nodes[1:3])
59+
assert_equal(self.nodes[1].getblockcount(), 8)
60+
assert self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash()
61+
62+
# V2 nodes can sync with V1 nodes
63+
self.disconnect_nodes(1, 2)
64+
with self.nodes[0].assert_debug_log(expected_msgs=[],
65+
unexpected_msgs=[sending_ellswift, downgrading_to_v1]):
66+
self.connect_nodes(0, 3, False)
67+
self.sync_all([self.nodes[0], self.nodes[3]])
68+
assert_equal(self.nodes[0].getblockcount(), 8)
69+
70+
# V2 node mines another block and everyone gets it
71+
self.connect_nodes(0, 1, True)
72+
self.connect_nodes(1, 2, False)
73+
self.nodes[1].generatetoaddress(1, "bcrt1q3zsxn3qx0cqyyxgv90k7j6786mpe543wc4vy2v", invalid_call=False)
74+
self.sync_all(self.nodes[0:4])
75+
assert_equal(self.nodes[0].getblockcount(), 9) # sync_all() verifies tip hashes match
76+
77+
# V1 node mines another block and everyone gets it
78+
self.nodes[3].generatetoaddress(2, "bcrt1q3zsxn3qx0cqyyxgv90k7j6786mpe543wc4vy2v", invalid_call=False)
79+
self.sync_all(self.nodes[0:4])
80+
assert_equal(self.nodes[2].getblockcount(), 11) # sync_all() verifies tip hashes match
81+
82+
assert_equal(self.nodes[4].getblockcount(), 0)
83+
# Peer 4 is v1 p2p, but is falsely advertised as v2.
84+
with self.nodes[1].assert_debug_log(expected_msgs=[sending_ellswift, downgrading_to_v1]):
85+
self.connect_nodes(1, 4, True)
86+
self.sync_all()
87+
assert_equal(self.nodes[4].getblockcount(), 11)
88+
89+
90+
if __name__ == '__main__':
91+
P2PV2Test().main()

‎test/functional/test_framework/test_framework.py

+31-10
Original file line numberDiff line numberDiff line change
@@ -588,26 +588,47 @@ def restart_node(self, i, extra_args=None):
588588
def wait_for_node_exit(self, i, timeout):
589589
self.nodes[i].process.wait(timeout)
590590

591-
def connect_nodes(self, a, b):
591+
def connect_nodes(self, a, b, peer_advertises_v2=False):
592592
from_connection = self.nodes[a]
593593
to_connection = self.nodes[b]
594594
from_num_peers = 1 + len(from_connection.getpeerinfo())
595595
to_num_peers = 1 + len(to_connection.getpeerinfo())
596596
ip_port = "127.0.0.1:" + str(p2p_port(b))
597-
from_connection.addnode(ip_port, "onetry")
597+
598+
if peer_advertises_v2:
599+
from_connection.addnode(ip_port, "onetry", True)
600+
else:
601+
# skip the optional third argument (default false) for
602+
# compatibility with older clients
603+
from_connection.addnode(ip_port, "onetry")
604+
605+
min_verack_msg_bytes = 21 if peer_advertises_v2 else 24
598606
# poll until version handshake complete to avoid race conditions
599607
# with transaction relaying
600608
# See comments in net_processing:
601609
# * Must have a version message before anything else
602610
# * Must have a verack message before anything else
603-
self.wait_until(lambda: sum(peer['version'] != 0 for peer in from_connection.getpeerinfo()) == from_num_peers)
604-
self.wait_until(lambda: sum(peer['version'] != 0 for peer in to_connection.getpeerinfo()) == to_num_peers)
605-
self.wait_until(lambda: sum(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in from_connection.getpeerinfo()) == from_num_peers)
606-
self.wait_until(lambda: sum(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in to_connection.getpeerinfo()) == to_num_peers)
607-
# The message bytes are counted before processing the message, so make
608-
# sure it was fully processed by waiting for a ping.
609-
self.wait_until(lambda: sum(peer["bytesrecv_per_msg"].pop("pong", 0) >= 32 for peer in from_connection.getpeerinfo()) == from_num_peers)
610-
self.wait_until(lambda: sum(peer["bytesrecv_per_msg"].pop("pong", 0) >= 32 for peer in to_connection.getpeerinfo()) == to_num_peers)
611+
# * The message bytes are counted before processing the message, so make
612+
# sure it was fully processed by waiting for a ping.
613+
614+
def check_initiator_handshake():
615+
peerinfo = from_connection.getpeerinfo()
616+
if len(peerinfo) != from_num_peers:
617+
return False
618+
619+
responder = list(filter(lambda x: x["addr"] == ip_port, peerinfo))[0]
620+
return responder['version'] != 0 and responder['bytesrecv_per_msg'].pop('verack', 0) >= min_verack_msg_bytes and responder['bytesrecv_per_msg'].pop("pong", 0) >= 29
621+
622+
def check_responder_handshake():
623+
peerinfo = to_connection.getpeerinfo()
624+
if len(peerinfo) != to_num_peers:
625+
return False
626+
627+
initiator = list(filter(lambda x: x["addrbind"] == ip_port, peerinfo))[0]
628+
return initiator['version'] != 0 and initiator['bytesrecv_per_msg'].pop('verack', 0) >= min_verack_msg_bytes and initiator['bytesrecv_per_msg'].pop("pong", 0) >= 29
629+
630+
self.wait_until(check_initiator_handshake)
631+
self.wait_until(check_responder_handshake)
611632

612633
def disconnect_nodes(self, a, b):
613634
def disconnect_nodes_helper(node_a, node_b):

‎test/functional/test_runner.py

+1
Original file line numberDiff line numberDiff line change
@@ -232,6 +232,7 @@
232232
'p2p_invalid_locator.py',
233233
'p2p_invalid_block.py',
234234
'p2p_invalid_tx.py',
235+
'p2p_v2.py',
235236
'p2p_v2_transport.py',
236237
'example_test.py',
237238
'wallet_txn_doublespend.py --legacy-wallet',

0 commit comments

Comments
 (0)
Please sign in to comment.