diff --git a/.github/workflows/cache-factory.yml b/.github/workflows/cache-factory.yml index 798483bce37..36b294e40e1 100644 --- a/.github/workflows/cache-factory.yml +++ b/.github/workflows/cache-factory.yml @@ -22,7 +22,7 @@ jobs: - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: shared-key: stable-cache diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 97ae879f654..3c607a04fb4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,12 +18,7 @@ env: jobs: test: name: Test ${{ matrix.crate }} - runs-on: ${{ fromJSON( - github.repository == 'libp2p/rust-libp2p' && ( - (contains(fromJSON('["libp2p-webrtc", "libp2p"]'), matrix.crate) && '["self-hosted", "linux", "x64", "2xlarge"]') || - (contains(fromJSON('["libp2p-quic", "libp2p-perf"]'), matrix.crate) && '["self-hosted", "linux", "x64", "xlarge"]') || - '["self-hosted", "linux", "x64", "large"]' - ) || '"ubuntu-latest"') }} + runs-on: ubuntu-latest timeout-minutes: 10 needs: gather_published_crates strategy: @@ -41,7 +36,7 @@ jobs: - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: shared-key: stable-cache save-if: false @@ -150,7 +145,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: key: ${{ matrix.target }} save-if: ${{ github.ref == 'refs/heads/master' }} @@ -175,7 +170,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -195,7 +190,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: key: ${{ matrix.features }} save-if: ${{ github.ref == 'refs/heads/master' }} @@ -212,7 +207,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -238,7 +233,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -254,7 +249,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: save-if: ${{ github.ref == 'refs/heads/master' }} @@ -273,7 +268,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@9fe7ca9f6550e5d6358e179d451cc25ea6b54f98 #v1.5.0 - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 with: shared-key: stable-cache save-if: false @@ -364,7 +359,7 @@ jobs: steps: - uses: actions/checkout@v5 - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - run: cargo install --version 0.10.0 pb-rs --locked @@ -390,7 +385,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - - uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0 + - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - run: cargo metadata --locked --format-version=1 > /dev/null cargo-deny: diff --git a/.github/workflows/interop-test.yml b/.github/workflows/interop-test.yml index 0c0a90043f4..c5dbe2323ac 100644 --- a/.github/workflows/interop-test.yml +++ b/.github/workflows/interop-test.yml @@ -13,7 +13,7 @@ jobs: run-transport-interop: name: Run transport interoperability tests if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository - runs-on: ${{ fromJSON(github.repository == 'libp2p/rust-libp2p' && '["self-hosted", "linux", "x64", "4xlarge"]' || '"ubuntu-latest"') }} + runs-on: ubuntu-latest strategy: matrix: flavour: [chromium, native] diff --git a/Cargo.lock b/Cargo.lock index 7fa7072c51c..421ae65f53f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -61,12 +61,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "allocator-api2" -version = "0.2.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" - [[package]] name = "anes" version = "0.1.6" @@ -1598,8 +1592,8 @@ dependencies = [ "aho-corasick", "bstr", "log", - "regex-automata 0.4.9", - "regex-syntax 0.8.5", + "regex-automata", + "regex-syntax", ] [[package]] @@ -1666,8 +1660,6 @@ version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" dependencies = [ - "allocator-api2", - "equivalent", "foldhash", ] @@ -2422,7 +2414,7 @@ checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "libp2p" -version = "0.56.0" +version = "0.56.1" dependencies = [ "bytes", "either", @@ -2432,7 +2424,7 @@ dependencies = [ "libp2p-allow-block-list", "libp2p-autonat", "libp2p-connection-limits", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-dcutr", "libp2p-dns", "libp2p-floodsub", @@ -2446,14 +2438,14 @@ dependencies = [ "libp2p-mplex", "libp2p-noise", "libp2p-ping", - "libp2p-plaintext 0.43.0", + "libp2p-plaintext", "libp2p-pnet", "libp2p-quic", "libp2p-relay", "libp2p-rendezvous", "libp2p-request-response", - "libp2p-swarm 0.47.0", - "libp2p-tcp 0.44.0", + "libp2p-swarm", + "libp2p-tcp", "libp2p-tls", "libp2p-uds", "libp2p-upnp", @@ -2461,10 +2453,10 @@ dependencies = [ "libp2p-websocket", "libp2p-websocket-websys", "libp2p-webtransport-websys", - "libp2p-yamux 0.47.0", + "libp2p-yamux", "multiaddr", "pin-project", - "rw-stream-sink 0.4.0", + "rw-stream-sink", "thiserror 2.0.12", "tokio", "tracing-subscriber", @@ -2474,11 +2466,11 @@ dependencies = [ name = "libp2p-allow-block-list" version = "0.6.0" dependencies = [ - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.47.0", + "libp2p-swarm", "libp2p-swarm-derive", - "libp2p-swarm-test 0.6.0", + "libp2p-swarm-test", "tokio", ] @@ -2492,14 +2484,14 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identify", "libp2p-identity", "libp2p-request-response", - "libp2p-swarm 0.47.0", - "libp2p-swarm-test 0.6.0", + "libp2p-swarm", + "libp2p-swarm-test", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "rand 0.8.5", "rand_core 0.6.4", "thiserror 2.0.12", @@ -2513,13 +2505,13 @@ dependencies = [ name = "libp2p-connection-limits" version = "0.6.0" dependencies = [ - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identify", "libp2p-identity", "libp2p-ping", - "libp2p-swarm 0.47.0", + "libp2p-swarm", "libp2p-swarm-derive", - "libp2p-swarm-test 0.6.0", + "libp2p-swarm-test", "quickcheck-ext", "rand 0.8.5", "tokio", @@ -2538,41 +2530,16 @@ dependencies = [ "libp2p-noise", "multiaddr", "multihash", - "multistream-select 0.13.0", + "multistream-select", "parking_lot", "pin-project", "quick-protobuf", "rand 0.8.5", - "rw-stream-sink 0.4.0", + "rw-stream-sink", "thiserror 2.0.12", "tokio", "tracing", - "unsigned-varint 0.8.0", - "web-time 1.1.0", -] - -[[package]] -name = "libp2p-core" -version = "0.43.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d28e2d2def7c344170f5c6450c0dbe3dfef655610dbfde2f6ac28a527abbe36" -dependencies = [ - "either", - "fnv", - "futures", - "futures-timer", - "libp2p-identity", - "multiaddr", - "multihash", - "multistream-select 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot", - "pin-project", - "quick-protobuf", - "rand 0.8.5", - "rw-stream-sink 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "thiserror 2.0.12", - "tracing", - "unsigned-varint 0.8.0", + "unsigned-varint", "web-time 1.1.0", ] @@ -2586,17 +2553,17 @@ dependencies = [ "futures-bounded", "futures-timer", "hashlink", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identify", "libp2p-identity", - "libp2p-plaintext 0.43.0", + "libp2p-plaintext", "libp2p-relay", - "libp2p-swarm 0.47.0", - "libp2p-swarm-test 0.6.0", - "libp2p-tcp 0.44.0", - "libp2p-yamux 0.47.0", + "libp2p-swarm", + "libp2p-swarm-test", + "libp2p-tcp", + "libp2p-yamux", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "thiserror 2.0.12", "tokio", "tracing", @@ -2611,7 +2578,7 @@ dependencies = [ "async-trait", "futures", "hickory-resolver", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", "parking_lot", "smallvec", @@ -2629,11 +2596,11 @@ dependencies = [ "cuckoofilter", "fnv", "futures", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.47.0", + "libp2p-swarm", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "rand 0.8.5", "smallvec", "thiserror 2.0.12", @@ -2656,13 +2623,13 @@ dependencies = [ "getrandom 0.2.15", "hashlink", "hex_fmt", - "libp2p-core 0.43.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.47.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-swarm-test 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libp2p-swarm", + "libp2p-swarm-test", "prometheus-client", "quick-protobuf", - "quick-protobuf-codec 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "quick-protobuf-codec", "quickcheck-ext", "rand 0.8.5", "regex", @@ -2683,12 +2650,12 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.47.0", - "libp2p-swarm-test 0.6.0", + "libp2p-swarm", + "libp2p-swarm-test", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "smallvec", "thiserror 2.0.12", "tokio", @@ -2734,15 +2701,15 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identify", "libp2p-identity", "libp2p-noise", - "libp2p-swarm 0.47.0", - "libp2p-swarm-test 0.6.0", - "libp2p-yamux 0.47.0", + "libp2p-swarm", + "libp2p-swarm-test", + "libp2p-yamux", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "quickcheck-ext", "rand 0.8.5", "serde", @@ -2763,10 +2730,10 @@ dependencies = [ "futures", "hickory-proto", "if-watch", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.47.0", - "libp2p-swarm-test 0.6.0", + "libp2p-swarm", + "libp2p-swarm-test", "rand 0.8.5", "smallvec", "socket2 0.6.0", @@ -2779,12 +2746,12 @@ dependencies = [ name = "libp2p-memory-connection-limits" version = "0.5.0" dependencies = [ - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identify", "libp2p-identity", - "libp2p-swarm 0.47.0", + "libp2p-swarm", "libp2p-swarm-derive", - "libp2p-swarm-test 0.6.0", + "libp2p-swarm-test", "memory-stats", "sysinfo", "tokio", @@ -2796,7 +2763,7 @@ name = "libp2p-metrics" version = "0.17.1" dependencies = [ "futures", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-dcutr", "libp2p-gossipsub", "libp2p-identify", @@ -2804,7 +2771,7 @@ dependencies = [ "libp2p-kad", "libp2p-ping", "libp2p-relay", - "libp2p-swarm 0.47.0", + "libp2p-swarm", "pin-project", "prometheus-client", "web-time 1.1.0", @@ -2818,11 +2785,11 @@ dependencies = [ "bytes", "criterion", "futures", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", "libp2p-muxer-test-harness", - "libp2p-plaintext 0.43.0", - "libp2p-tcp 0.44.0", + "libp2p-plaintext", + "libp2p-tcp", "nohash-hasher", "parking_lot", "quickcheck-ext", @@ -2831,7 +2798,7 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", - "unsigned-varint 0.8.0", + "unsigned-varint", ] [[package]] @@ -2841,7 +2808,7 @@ dependencies = [ "futures", "futures-timer", "futures_ringbuf", - "libp2p-core 0.43.1", + "libp2p-core", "tracing", ] @@ -2853,7 +2820,7 @@ dependencies = [ "bytes", "futures", "futures_ringbuf", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", "multiaddr", "multihash", @@ -2875,10 +2842,10 @@ version = "0.1.0" dependencies = [ "hashlink", "libp2p", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.47.0", - "libp2p-swarm-test 0.6.0", + "libp2p-swarm", + "libp2p-swarm-test", "serde_json", "tokio", ] @@ -2893,13 +2860,13 @@ dependencies = [ "futures-bounded", "futures-timer", "libp2p", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.47.0", - "libp2p-swarm-test 0.6.0", - "libp2p-tcp 0.44.0", + "libp2p-swarm", + "libp2p-swarm-test", + "libp2p-tcp", "libp2p-tls", - "libp2p-yamux 0.47.0", + "libp2p-yamux", "serde", "serde_json", "thiserror 2.0.12", @@ -2915,10 +2882,10 @@ version = "0.47.0" dependencies = [ "futures", "futures-timer", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.47.0", - "libp2p-swarm-test 0.6.0", + "libp2p-swarm", + "libp2p-swarm-test", "quickcheck-ext", "rand 0.8.5", "tokio", @@ -2934,43 +2901,27 @@ dependencies = [ "bytes", "futures", "futures_ringbuf", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "quickcheck-ext", "tracing", "tracing-subscriber", ] -[[package]] -name = "libp2p-plaintext" -version = "0.43.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e659439578fc6d305da8303834beb9d62f155f40e7f5b9d81c9f2b2c69d1926" -dependencies = [ - "asynchronous-codec", - "bytes", - "futures", - "libp2p-core 0.43.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-identity", - "quick-protobuf", - "quick-protobuf-codec 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tracing", -] - [[package]] name = "libp2p-pnet" version = "0.26.0" dependencies = [ "futures", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", "libp2p-noise", - "libp2p-swarm 0.47.0", - "libp2p-tcp 0.44.0", + "libp2p-swarm", + "libp2p-tcp", "libp2p-websocket", - "libp2p-yamux 0.47.0", + "libp2p-yamux", "pin-project", "quickcheck-ext", "rand 0.8.5", @@ -2987,13 +2938,13 @@ dependencies = [ "futures", "futures-timer", "if-watch", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", "libp2p-muxer-test-harness", "libp2p-noise", - "libp2p-tcp 0.44.0", + "libp2p-tcp", "libp2p-tls", - "libp2p-yamux 0.47.0", + "libp2p-yamux", "quickcheck", "quinn", "rand 0.8.5", @@ -3016,15 +2967,15 @@ dependencies = [ "futures", "futures-bounded", "futures-timer", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", "libp2p-ping", - "libp2p-plaintext 0.43.0", - "libp2p-swarm 0.47.0", - "libp2p-swarm-test 0.6.0", - "libp2p-yamux 0.47.0", + "libp2p-plaintext", + "libp2p-swarm", + "libp2p-swarm-test", + "libp2p-yamux", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "quickcheck-ext", "rand 0.8.5", "static_assertions", @@ -3044,13 +2995,13 @@ dependencies = [ "bimap", "futures", "futures-timer", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", "libp2p-request-response", - "libp2p-swarm 0.47.0", - "libp2p-swarm-test 0.6.0", + "libp2p-swarm", + "libp2p-swarm-test", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "rand 0.8.5", "thiserror 2.0.12", "tokio", @@ -3069,10 +3020,10 @@ dependencies = [ "futures", "futures-bounded", "futures_ringbuf", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.47.0", - "libp2p-swarm-test 0.6.0", + "libp2p-swarm", + "libp2p-swarm-test", "rand 0.8.5", "serde", "serde_json", @@ -3105,10 +3056,10 @@ name = "libp2p-stream" version = "0.4.0-alpha" dependencies = [ "futures", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.47.0", - "libp2p-swarm-test 0.6.0", + "libp2p-swarm", + "libp2p-swarm-test", "rand 0.8.5", "tokio", "tracing", @@ -3126,16 +3077,16 @@ dependencies = [ "futures-timer", "getrandom 0.2.15", "hashlink", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identify", "libp2p-identity", "libp2p-kad", "libp2p-ping", - "libp2p-plaintext 0.43.0", + "libp2p-plaintext", "libp2p-swarm-derive", - "libp2p-swarm-test 0.6.0", - "libp2p-yamux 0.47.0", - "multistream-select 0.13.0", + "libp2p-swarm-test", + "libp2p-yamux", + "multistream-select", "quickcheck-ext", "rand 0.8.5", "smallvec", @@ -3147,27 +3098,6 @@ dependencies = [ "web-time 1.1.0", ] -[[package]] -name = "libp2p-swarm" -version = "0.47.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6aa762e5215919a34e31c35d4b18bf2e18566ecab7f8a3d39535f4a3068f8b62" -dependencies = [ - "either", - "fnv", - "futures", - "futures-timer", - "libp2p-core 0.43.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-identity", - "lru", - "multistream-select 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.8.5", - "smallvec", - "tokio", - "tracing", - "web-time 1.1.0", -] - [[package]] name = "libp2p-swarm-derive" version = "0.35.1" @@ -3184,30 +3114,12 @@ dependencies = [ "async-trait", "futures", "futures-timer", - "libp2p-core 0.43.1", - "libp2p-identity", - "libp2p-plaintext 0.43.0", - "libp2p-swarm 0.47.0", - "libp2p-tcp 0.44.0", - "libp2p-yamux 0.47.0", - "tracing", -] - -[[package]] -name = "libp2p-swarm-test" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b149112570d507efe305838c7130835955a0b1147aa8051c1c3867a83175cf6" -dependencies = [ - "async-trait", - "futures", - "futures-timer", - "libp2p-core 0.43.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libp2p-core", "libp2p-identity", - "libp2p-plaintext 0.43.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-swarm 0.47.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-tcp 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libp2p-yamux 0.47.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libp2p-plaintext", + "libp2p-swarm", + "libp2p-tcp", + "libp2p-yamux", "tracing", ] @@ -3219,29 +3131,13 @@ dependencies = [ "futures-timer", "if-watch", "libc", - "libp2p-core 0.43.1", + "libp2p-core", "socket2 0.6.0", "tokio", "tracing", "tracing-subscriber", ] -[[package]] -name = "libp2p-tcp" -version = "0.44.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65b4e030c52c46c8d01559b2b8ca9b7c4185f10576016853129ca1fe5cd1a644" -dependencies = [ - "futures", - "futures-timer", - "if-watch", - "libc", - "libp2p-core 0.43.1 (registry+https://github.com/rust-lang/crates.io-index)", - "socket2 0.5.9", - "tokio", - "tracing", -] - [[package]] name = "libp2p-tls" version = "0.6.2" @@ -3249,10 +3145,10 @@ dependencies = [ "futures", "futures-rustls", "hex-literal", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", - "libp2p-swarm 0.47.0", - "libp2p-yamux 0.47.0", + "libp2p-swarm", + "libp2p-yamux", "rcgen", "ring", "rustls", @@ -3268,7 +3164,7 @@ name = "libp2p-uds" version = "0.43.0" dependencies = [ "futures", - "libp2p-core 0.43.1", + "libp2p-core", "tempfile", "tokio", "tracing", @@ -3281,8 +3177,8 @@ dependencies = [ "futures", "futures-timer", "igd-next", - "libp2p-core 0.43.1", - "libp2p-swarm 0.47.0", + "libp2p-core", + "libp2p-swarm", "tokio", "tracing", ] @@ -3296,7 +3192,7 @@ dependencies = [ "futures-timer", "hex", "if-watch", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", "libp2p-noise", "libp2p-webrtc-utils", @@ -3322,11 +3218,11 @@ dependencies = [ "futures", "hex", "hex-literal", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", "libp2p-noise", "quick-protobuf", - "quick-protobuf-codec 0.3.1", + "quick-protobuf-codec", "rand 0.8.5", "serde", "sha2", @@ -3343,7 +3239,7 @@ dependencies = [ "getrandom 0.2.15", "hex", "js-sys", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", "libp2p-webrtc-utils", "send_wrapper 0.6.0", @@ -3361,14 +3257,14 @@ dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-dns", "libp2p-identity", - "libp2p-tcp 0.44.0", + "libp2p-tcp", "parking_lot", "pin-project-lite", "rcgen", - "rw-stream-sink 0.4.0", + "rw-stream-sink", "soketto", "thiserror 2.0.12", "tokio", @@ -3384,10 +3280,10 @@ dependencies = [ "bytes", "futures", "js-sys", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", "libp2p-noise", - "libp2p-yamux 0.47.0", + "libp2p-yamux", "send_wrapper 0.6.0", "thiserror 2.0.12", "tracing", @@ -3401,7 +3297,7 @@ version = "0.5.1" dependencies = [ "futures", "js-sys", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", "libp2p-noise", "multiaddr", @@ -3421,7 +3317,7 @@ version = "0.47.0" dependencies = [ "either", "futures", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-muxer-test-harness", "thiserror 2.0.12", "tokio", @@ -3430,21 +3326,6 @@ dependencies = [ "yamux 0.13.4", ] -[[package]] -name = "libp2p-yamux" -version = "0.47.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f15df094914eb4af272acf9adaa9e287baa269943f32ea348ba29cfb9bfc60d8" -dependencies = [ - "either", - "futures", - "libp2p-core 0.43.1 (registry+https://github.com/rust-lang/crates.io-index)", - "thiserror 2.0.12", - "tracing", - "yamux 0.12.1", - "yamux 0.13.4", -] - [[package]] name = "libredox" version = "0.1.3" @@ -3502,22 +3383,13 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "lru" -version = "0.12.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" -dependencies = [ - "hashbrown 0.15.2", -] - [[package]] name = "matchers" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" dependencies = [ - "regex-automata 0.1.10", + "regex-automata", ] [[package]] @@ -3664,7 +3536,7 @@ dependencies = [ "percent-encoding", "serde", "static_assertions", - "unsigned-varint 0.8.0", + "unsigned-varint", "url", ] @@ -3690,7 +3562,7 @@ dependencies = [ "quickcheck", "rand 0.8.5", "serde", - "unsigned-varint 0.8.0", + "unsigned-varint", ] [[package]] @@ -3702,27 +3574,13 @@ dependencies = [ "futures_ringbuf", "pin-project", "quickcheck-ext", - "rw-stream-sink 0.4.0", + "rw-stream-sink", "smallvec", "tokio", "tokio-util", "tracing", "tracing-subscriber", - "unsigned-varint 0.8.0", -] - -[[package]] -name = "multistream-select" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0df8e5eec2298a62b326ee4f0d7fe1a6b90a09dfcf9df37b38f947a8c42f19" -dependencies = [ - "bytes", - "futures", - "log", - "pin-project", - "smallvec", - "unsigned-varint 0.7.2", + "unsigned-varint", ] [[package]] @@ -3846,12 +3704,11 @@ dependencies = [ [[package]] name = "nu-ansi-term" -version = "0.46.0" +version = "0.50.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" dependencies = [ - "overload", - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -4144,12 +4001,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - [[package]] name = "p256" version = "0.13.2" @@ -4408,9 +4259,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.23.1" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf41c1a7c32ed72abe5082fb19505b969095c12da9f5732a4bc9878757fd087c" +checksum = "e4500adecd7af8e0e9f4dbce15cfee07ce913fbf6ad605cc468b83f2d531ee94" dependencies = [ "dtoa", "itoa", @@ -4420,9 +4271,9 @@ dependencies = [ [[package]] name = "prometheus-client-derive-encode" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" +checksum = "9adf1691c04c0a5ff46ff8f262b58beb07b0dbb61f96f9f54f6cbd82106ed87f" dependencies = [ "proc-macro2", "quote", @@ -4472,20 +4323,7 @@ dependencies = [ "quick-protobuf", "quickcheck-ext", "thiserror 2.0.12", - "unsigned-varint 0.8.0", -] - -[[package]] -name = "quick-protobuf-codec" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15a0580ab32b169745d7a39db2ba969226ca16738931be152a3209b409de2474" -dependencies = [ - "asynchronous-codec", - "bytes", - "quick-protobuf", - "thiserror 1.0.69", - "unsigned-varint 0.8.0", + "unsigned-varint", ] [[package]] @@ -4759,17 +4597,8 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.9", - "regex-syntax 0.8.5", -] - -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" -dependencies = [ - "regex-syntax 0.6.29", + "regex-automata", + "regex-syntax", ] [[package]] @@ -4780,15 +4609,9 @@ checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.5", + "regex-syntax", ] -[[package]] -name = "regex-syntax" -version = "0.6.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" - [[package]] name = "regex-syntax" version = "0.8.5" @@ -5125,17 +4948,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "rw-stream-sink" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8c9026ff5d2f23da5e45bbc283f156383001bfb09c4e44256d02c1a685fe9a1" -dependencies = [ - "futures", - "pin-project", - "static_assertions", -] - [[package]] name = "ryu" version = "1.0.20" @@ -6152,14 +5964,14 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" dependencies = [ "matchers", "nu-ansi-term", "once_cell", - "regex", + "regex-automata", "sharded-slab", "smallvec", "thread_local", @@ -6261,12 +6073,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "unsigned-varint" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" - [[package]] name = "unsigned-varint" version = "0.8.0" @@ -6766,7 +6572,7 @@ version = "0.1.0" dependencies = [ "futures", "getrandom 0.2.15", - "libp2p-core 0.43.1", + "libp2p-core", "libp2p-identity", "libp2p-noise", "libp2p-webtransport-websys", diff --git a/Cargo.toml b/Cargo.toml index e51faa159be..36c148cffee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -74,7 +74,7 @@ rust-version = "1.83.0" edition = "2021" [workspace.dependencies] -libp2p = { version = "0.56.0", path = "libp2p" } +libp2p = { version = "0.56.1", path = "libp2p" } libp2p-allow-block-list = { version = "0.6.0", path = "misc/allow-block-list" } libp2p-autonat = { version = "0.15.0", path = "protocols/autonat" } libp2p-connection-limits = { version = "0.6.0", path = "misc/connection-limits" } @@ -130,7 +130,7 @@ hickory-resolver = { version = "0.25.2", default-features = false } multiaddr = "0.18.1" multihash = "0.19.1" multistream-select = { version = "0.13.0", path = "misc/multistream-select" } -prometheus-client = "0.23" +prometheus-client = "0.24" quick-protobuf-codec = { version = "0.3.1", path = "misc/quick-protobuf-codec" } quickcheck = { package = "quickcheck-ext", path = "misc/quickcheck-ext" } rcgen = "0.13" diff --git a/examples/README.md b/examples/README.md index b1fb9f1f104..2e397960b76 100644 --- a/examples/README.md +++ b/examples/README.md @@ -24,19 +24,26 @@ Each example includes its own README.md file with specific instructions on how t ## Individual libp2p features -- [Chat](./chat) A basic chat application demonstrating libp2p and the mDNS and Gossipsub protocols. -- [Distributed key-value store](./distributed-key-value-store) A basic key value store demonstrating libp2p and the mDNS and Kademlia protocol. +- [Chat](./chat) A basic chat application demonstrating libp2p and the [mDNS] and [Gossipsub] protocols. +- [Distributed key-value store](./distributed-key-value-store) A basic key value store demonstrating libp2p and the [mDNS] and [Kademlia] protocol. - [File sharing application](./file-sharing) Basic file sharing application with peers either providing or locating and getting files by name. - While obviously showcasing how to build a basic file sharing application with the Kademlia and - Request-Response protocol, the actual goal of this example is **to show how to integrate + While obviously showcasing how to build a basic file sharing application with the [Kademlia] and + [Request-Response] protocol, the actual goal of this example is **to show how to integrate rust-libp2p into a larger application**. -- [IPFS Kademlia](./ipfs-kad) Demonstrates how to perform Kademlia queries on the IPFS network. +- [IPFS Kademlia](./ipfs-kad) Demonstrates how to perform [Kademlia] queries on the [IPFS] network. -- [IPFS Private](./ipfs-private) Implementation using the gossipsub, ping and identify protocols to implement the ipfs private swarms feature. +- [IPFS Private](./ipfs-private) Implementation using the [Gossipsub], ping and identify protocols to implement the IPFS private swarms feature. - [Ping](./ping) Small `ping` clone, sending a ping to a peer, expecting a pong as a response. See [tutorial](../libp2p/src/tutorials/ping.rs) for a step-by-step guide building the example. -- [Rendezvous](./rendezvous) Rendezvous Protocol. See [specs](https://github.com/libp2p/specs/blob/master/rendezvous/README.md). +- [Rendezvous](./rendezvous) [Rendezvous] Protocol. See [specs](https://github.com/libp2p/specs/blob/master/rendezvous/README.md). + +[mDNS]: https://github.com/libp2p/specs/blob/master/discovery/mdns.md +[Gossipsub]: https://github.com/libp2p/specs/tree/master/pubsub/gossipsub +[Kademlia]: https://github.com/libp2p/specs/blob/master/kad-dht/README.md +[Request-Response]: https://en.wikipedia.org/wiki/Request%E2%80%93response +[IPFS]: https://ipfs.tech/ +[Rendezvous]: https://github.com/libp2p/specs/blob/master/rendezvous/README.md diff --git a/libp2p/CHANGELOG.md b/libp2p/CHANGELOG.md index cb27ebe68e5..5c6c4a6865c 100644 --- a/libp2p/CHANGELOG.md +++ b/libp2p/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.56.1 + +- Fix `metrics` delegation to gossipsub protocol. + See [PR 6180](https://github.com/libp2p/rust-libp2p/pull/6180) + ## 0.56.0 - Remove `async-std` support. diff --git a/libp2p/Cargo.toml b/libp2p/Cargo.toml index 48f4c9477bd..dd1952fb93e 100644 --- a/libp2p/Cargo.toml +++ b/libp2p/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p" edition.workspace = true rust-version = { workspace = true } description = "Peer-to-peer networking library" -version = "0.56.0" +version = "0.56.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -65,7 +65,7 @@ kad = ["dep:libp2p-kad", "libp2p-metrics?/kad"] macros = ["libp2p-swarm/macros"] mdns = ["dep:libp2p-mdns"] memory-connection-limits = ["dep:libp2p-memory-connection-limits"] -metrics = ["dep:libp2p-metrics"] +metrics = ["dep:libp2p-metrics", "libp2p-gossipsub?/metrics"] noise = ["dep:libp2p-noise"] ping = ["dep:libp2p-ping", "libp2p-metrics?/ping"] plaintext = ["dep:libp2p-plaintext"] diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index fbc2e28dc83..085a7bfefa1 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -1,4 +1,14 @@ ## 0.50.0 +- Prevent mesh exceeding mesh_n_high. + See [PR 6184](https://github.com/libp2p/rust-libp2p/pull/6184) + +- Fix underflow when shuffling peers after prunning. + See [PR 6183](https://github.com/libp2p/rust-libp2p/pull/6183) + + +- Implement gossipsub 1.3 extensions control message. + See [PR 6119](https://github.com/libp2p/rust-libp2p/pull/6119) + - Remove peer penalty for duplicate messages. See [PR 6112](https://github.com/libp2p/rust-libp2p/pull/6112) @@ -13,6 +23,12 @@ - Fix incorrect default values in ConfigBuilder See [PR 6113](https://github.com/libp2p/rust-libp2p/pull/6113) + +- Remove duplicated config `set_topic_max_transmit_size` method, prefer `max_transmit_size_for_topic`. + See [PR 6173](https://github.com/libp2p/rust-libp2p/pull/6173). + +- Switch the internal `async-channel` used to dispatch messages from `NetworkBehaviour` to the `ConnectionHandler` + with an internal priority queue. See [PR 6175](https://github.com/libp2p/rust-libp2p/pull/6175) - Switch the internal `async-channel` used to dispatch messages from `NetworkBehaviour` to the `ConnectionHandler` with an internal priority queue. See [PR XXXX](https://github.com/libp2p/rust-libp2p/pull/XXXX) diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml index 1e75a65c143..896b05acfb7 100644 --- a/protocols/gossipsub/Cargo.toml +++ b/protocols/gossipsub/Cargo.toml @@ -13,6 +13,7 @@ categories = ["network-programming", "asynchronous"] [features] wasm-bindgen = ["getrandom/js", "futures-timer/wasm-bindgen"] metrics = ["prometheus-client"] +partial_messages = [] [dependencies] async-channel = "2.3.1" @@ -28,23 +29,23 @@ getrandom = { workspace = true } hashlink = { workspace = true } hex_fmt = "0.3.0" web-time = { workspace = true } -# Libp2p crates, updated to use crates.io versions so that we can use this gossipsub fork with -# crates.io libp2p -libp2p-core = "0.43" -libp2p-identity = { version = "0.2", features = ["rand"] } -libp2p-swarm = "0.47" +libp2p-core = { workspace = true } +libp2p-identity = { workspace = true, features = ["rand"] } +libp2p-swarm = { workspace = true } quick-protobuf = "0.8" -quick-protobuf-codec = "0.3.1" +quick-protobuf-codec = { workspace = true } rand = "0.8" regex = "1.10.5" serde = { version = "1", optional = true, features = ["derive"] } sha2 = "0.10.8" tracing = { workspace = true } -prometheus-client = { version = "0.23", optional = true } +# Metrics dependencies +prometheus-client = { workspace = true, optional = true } [dev-dependencies] -libp2p-swarm-test = { version = "0.6.0", features = ["tokio"] } +libp2p-core = { workspace = true } +libp2p-swarm-test = { path = "../../swarm-test" } quickcheck = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } tokio = { workspace = true, features = ["rt", "rt-multi-thread", "time", "macros"] } diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index a0a3a16f0e7..5eead334f38 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -68,12 +68,17 @@ use crate::{ topic::{Hasher, Topic, TopicHash}, transform::{DataTransform, IdentityTransform}, types::{ - ControlAction, Graft, IDontWant, IHave, IWant, Message, MessageAcceptance, MessageId, - PeerDetails, PeerInfo, PeerKind, Prune, RawMessage, RpcOut, Subscription, + ControlAction, Extensions, Graft, IDontWant, IHave, IWant, Message, MessageAcceptance, + MessageId, PeerDetails, PeerInfo, PeerKind, Prune, RawMessage, RpcOut, Subscription, SubscriptionAction, }, FailedMessages, PublishError, SubscriptionError, TopicScoreParams, ValidationError, }; +#[cfg(feature = "partial_messages")] +use crate::{ + partial::Partial, + types::{PartialMessage, PartialSubOpts}, +}; #[cfg(test)] mod tests; @@ -141,6 +146,20 @@ pub enum Event { /// The decompressed message itself. message: Message, }, + /// A new partial message has been received. + #[cfg(feature = "partial_messages")] + Partial { + /// The topic of the partial message. + topic_id: TopicHash, + /// The peer that forwarded us this message. + propagation_source: PeerId, + /// The group ID that identifies the complete logical message. + group_id: Vec, + /// The partial message data. + message: Option>, + /// The partial message metadata, what peer has and wants. + metadata: Option>, + }, /// A remote subscribed to a topic. Subscribed { /// Remote that has subscribed. @@ -287,6 +306,14 @@ pub struct Behaviour { /// Overlay network of connected peers - Maps topics to connected gossipsub peers. mesh: HashMap>, + /// Partial options when subscribing topics. + #[cfg(feature = "partial_messages")] + partial_opts: HashMap, + + /// Cached partial messages. + #[cfg(feature = "partial_messages")] + cached_partials: HashMap, Box>>, + /// Map of topics to list of peers that we publish to, but don't subscribe to. fanout: HashMap>, @@ -450,6 +477,10 @@ where data_transform, failed_messages: Default::default(), gossip_promises: Default::default(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), + #[cfg(feature = "partial_messages")] + cached_partials: Default::default(), }) } @@ -513,7 +544,12 @@ where /// /// Returns [`Ok(true)`] if the subscription worked. Returns [`Ok(false)`] if we were already /// subscribed. - pub fn subscribe(&mut self, topic: &Topic) -> Result { + pub fn subscribe( + &mut self, + topic: &Topic, + #[cfg(feature = "partial_messages")] requests_partial: bool, + #[cfg(feature = "partial_messages")] supports_partial: bool, + ) -> Result { let topic_hash = topic.hash(); if !self.subscription_filter.can_subscribe(&topic_hash) { return Err(SubscriptionError::NotAllowed); @@ -527,13 +563,31 @@ where // send subscription request to all peers for peer_id in self.connected_peers.keys().copied().collect::>() { tracing::debug!(%peer_id, "Sending SUBSCRIBE to peer"); - let event = RpcOut::Subscribe(topic_hash.clone()); + let event = RpcOut::Subscribe { + topic: topic_hash.clone(), + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts { + requests_partial, + supports_partial, + }, + }; self.send_message(peer_id, event); } // call JOIN(topic) // this will add new peers to the mesh for the topic self.join(&topic_hash); + #[cfg(feature = "partial_messages")] + { + self.partial_opts.insert( + topic_hash.clone(), + PartialSubOpts { + requests_partial, + supports_partial, + }, + ); + } + tracing::debug!(%topic, "Subscribed to topic"); Ok(true) } @@ -560,6 +614,10 @@ where // call LEAVE(topic) // this will remove the topic from the mesh self.leave(&topic_hash); + #[cfg(feature = "partial_messages")] + { + self.partial_opts.remove(&topic_hash.clone()); + } tracing::debug!(topic=%topic_hash, "Unsubscribed from topic"); true @@ -589,7 +647,6 @@ where return Err(PublishError::MessageTooLarge); } - let mesh_n = self.config.mesh_n_for_topic(&topic); let raw_message = self.build_raw_message(topic, transformed_data)?; // calculate the message id from the un-transformed data @@ -615,113 +672,16 @@ where let topic_hash = raw_message.topic.clone(); - let mut peers_on_topic = self - .connected_peers - .iter() - .filter(|(_, p)| p.topics.contains(&topic_hash)) - .map(|(peer_id, _)| peer_id) - .peekable(); - - if peers_on_topic.peek().is_none() { - return Err(PublishError::NoPeersSubscribedToTopic); - } - - let mut recipient_peers = HashSet::new(); - if self.config.flood_publish() { - // Forward to all peers above score and all explicit peers - recipient_peers.extend(peers_on_topic.filter(|p| { - self.explicit_peers.contains(*p) - || !self - .peer_score - .below_threshold(p, |ts| ts.publish_threshold) - .0 - })); - } else { - match self.mesh.get(&topic_hash) { - // Mesh peers - Some(mesh_peers) => { - // We have a mesh set. We want to make sure to publish to at least `mesh_n` - // peers (if possible). - let needed_extra_peers = mesh_n.saturating_sub(mesh_peers.len()); - - if needed_extra_peers > 0 { - // We don't have `mesh_n` peers in our mesh, we will randomly select extras - // and publish to them. - - // Get a random set of peers that are appropriate to send messages too. - let peer_list = get_random_peers( - &self.connected_peers, - &topic_hash, - needed_extra_peers, - |peer| { - !mesh_peers.contains(peer) - && !self.explicit_peers.contains(peer) - && !self - .peer_score - .below_threshold(peer, |ts| ts.publish_threshold) - .0 - }, - ); - recipient_peers.extend(peer_list); - } - - recipient_peers.extend(mesh_peers); - } - // Gossipsub peers - None => { - tracing::debug!(topic=%topic_hash, "Topic not in the mesh"); - // `fanout_peers` is always non-empty if it's `Some`. - let fanout_peers = self - .fanout - .get(&topic_hash) - .filter(|peers| !peers.is_empty()); - // If we have fanout peers add them to the map. - if let Some(peers) = fanout_peers { - for peer in peers { - recipient_peers.insert(*peer); - } - } else { - // We have no fanout peers, select mesh_n of them and add them to the fanout - let new_peers = - get_random_peers(&self.connected_peers, &topic_hash, mesh_n, { - |p| { - !self.explicit_peers.contains(p) - && !self - .peer_score - .below_threshold(p, |ts| ts.publish_threshold) - .0 - } - }); - // Add the new peers to the fanout and recipient peers - self.fanout.insert(topic_hash.clone(), new_peers.clone()); - for peer in new_peers { - tracing::debug!(%peer, "Peer added to fanout"); - recipient_peers.insert(peer); - } - } - // We are publishing to fanout peers - update the time we published - self.fanout_last_pub - .insert(topic_hash.clone(), Instant::now()); - } - } + #[cfg(feature = "partial_messages")] + let recipient_peers = self.get_publish_peers(&topic_hash, |_, peer| { + peer.partial_opts + .get(&topic_hash) + .map(|opts| !opts.requests_partial) + .unwrap_or(true) + }); - // Explicit peers that are part of the topic - recipient_peers - .extend(peers_on_topic.filter(|peer_id| self.explicit_peers.contains(peer_id))); - - // Floodsub peers - for (peer, connections) in &self.connected_peers { - if connections.kind == PeerKind::Floodsub - && connections.topics.contains(&topic_hash) - && !self - .peer_score - .below_threshold(peer, |ts| ts.publish_threshold) - .0 - { - recipient_peers.insert(*peer); - } - } - } + #[cfg(not(feature = "partial_messages"))] + let recipient_peers = self.get_publish_peers(&topic_hash, |_, _| true); // If the message isn't a duplicate and we have sent it to some peers add it to the // duplicate cache and memcache. @@ -778,6 +738,192 @@ where Ok(msg_id) } + // Get Peers from the mesh or fanout to publish a message to + // filtered out further by the provided `f` callback. + fn get_publish_peers( + &mut self, + topic_hash: &TopicHash, + f: impl Fn(&PeerId, &PeerDetails) -> bool, + ) -> HashSet { + let peers_on_topic = self + .connected_peers + .iter() + .filter(|(_, peer)| peer.topics.contains(topic_hash)) + .filter(|(peer_id, _)| { + self.explicit_peers.contains(*peer_id) + || !self + .peer_score + .below_threshold(peer_id, |ts| ts.publish_threshold) + .0 + }) + .filter(|(peer_id, peer_details)| f(peer_id, peer_details)); + + // Forward to all peers above score and all explicit peers + if self.config.flood_publish() { + return peers_on_topic.map(|(peer_id, _)| *peer_id).collect(); + } + + let mesh_n = self.config.mesh_n_for_topic(topic_hash); + let mut recipient_peers = HashSet::new(); + // Explicit peers that are part of the topic and Floodsub peers. + recipient_peers.extend( + peers_on_topic + .clone() + .filter(|(peer_id, peer)| { + self.explicit_peers.contains(peer_id) || peer.kind == PeerKind::Floodsub + }) + .map(|(peer_id, _)| *peer_id), + ); + + match self.mesh.get(topic_hash) { + // Mesh peers + Some(mesh_peers) => { + // We have a mesh set. We want to make sure to publish to at least `mesh_n` + // peers (if possible). + let mesh_peers = peers_on_topic + .clone() + .filter_map(|(peer_id, _)| mesh_peers.get(peer_id)) + .copied() + .collect::>(); + + let needed_extra_peers = mesh_n.saturating_sub(mesh_peers.len()); + if needed_extra_peers > 0 { + // We don't have `mesh_n` peers in our mesh, we will randomly select extras + // and publish to them. + + // Get a random set of peers that are appropriate to send messages too. + let peer_list = + get_random_peers(peers_on_topic, topic_hash, needed_extra_peers, |_, _| { + true + }); + recipient_peers.extend(peer_list); + } + + recipient_peers.extend(mesh_peers); + } + // Gossipsub peers + None => { + tracing::debug!(topic=%topic_hash, "Topic not in the mesh"); + let fanout_peers = peers_on_topic + .clone() + .filter_map(|(peer_id, _)| { + self.fanout + .get(topic_hash) + .and_then(|fanout| fanout.get(peer_id)) + }) + .copied() + .collect::>(); + + // If we have fanout peers add them to the map. + if !fanout_peers.is_empty() { + recipient_peers.extend(fanout_peers); + } else { + // We have no fanout peers, select mesh_n of them and add them to the fanout + let new_peers = + get_random_peers(peers_on_topic, topic_hash, mesh_n, |_, _| true); + // Add the new peers to the fanout and recipient peers + self.fanout.insert(topic_hash.clone(), new_peers.clone()); + for peer in new_peers { + tracing::debug!(%peer, "Peer added to fanout"); + recipient_peers.insert(peer); + } + } + // We are publishing to fanout peers - update the time we published + self.fanout_last_pub + .insert(topic_hash.clone(), Instant::now()); + } + } + + recipient_peers + } + + #[cfg(feature = "partial_messages")] + pub fn publish_partial( + &mut self, + topic: impl Into, + partial_message: P, + ) -> Result<(), PublishError> { + let topic_hash = topic.into(); + + let group_id = partial_message.group_id(); + + let recipient_peers = self.get_publish_peers(&topic_hash, |_, peer| { + peer.partial_opts + .get(&topic_hash) + .map(|opts| opts.supports_partial) + .unwrap_or_default() + }); + let publish_metadata = partial_message.metadata(); + for peer_id in recipient_peers.iter() { + // TODO: this can be optimized, we are going to get the peer again on `send_message` + let Some(peer) = &mut self.connected_peers.get_mut(peer_id) else { + tracing::error!(peer = %peer_id, + "Could not get peer from connected peers, peer doesn't exist in connected peer list"); + continue; + }; + let Some(partial_opts) = peer.partial_opts.get(&topic_hash) else { + tracing::error!(peer = %peer_id, + "Could not get partial subscripion options from peer which subscribed for partial messages"); + continue; + }; + + let topic_partials = peer.partial_messages.entry(topic_hash.clone()).or_default(); + let group_partials = topic_partials.entry(group_id.clone()).or_default(); + + // Peer `supports_partial` but doesn't `requests_partial`. + if !partial_opts.requests_partial { + self.send_message( + *peer_id, + RpcOut::PartialMessage { + message: None, + metadata: publish_metadata.clone(), + group_id: group_id.clone(), + topic_id: topic_hash.clone(), + }, + ); + continue; + } + + let Ok(action) = partial_message.partial_message_bytes_from_metadata( + group_partials.metadata.as_ref().map(|p| p.as_ref()), + ) else { + tracing::error!(peer = %peer_id, group_id = ?group_id, + "Could not reconstruct message bytes for peer metadata"); + topic_partials.remove(&group_id); + if let PeerScoreState::Active(peer_score) = &mut self.peer_score { + peer_score.reject_invalid_partial(peer_id, &topic_hash); + } + continue; + }; + + // Check if we have new data for the peer. + let Some((message, peer_updated_metadata)) = action.send else { + continue; + }; + + group_partials.metadata = + Some(crate::types::PeerMetadata::Local(peer_updated_metadata)); + + self.send_message( + *peer_id, + RpcOut::PartialMessage { + message: Some(message), + metadata: publish_metadata.clone(), + group_id: group_id.clone(), + topic_id: topic_hash.clone(), + }, + ); + } + + if recipient_peers.is_empty() { + return Err(PublishError::NoPeersSubscribedToTopic); + } + + let cached_topic = self.cached_partials.entry(topic_hash).or_default(); + cached_topic.insert(partial_message.group_id(), Box::new(partial_message)); + Ok(()) + } + /// This function should be called when [`Config::validate_messages()`] is `true` after /// the message got validated by the caller. Messages are stored in the ['Memcache'] and /// validation is expected to be fast enough that the messages should still exist in the cache. @@ -1020,11 +1166,11 @@ where &self.connected_peers, topic_hash, mesh_n - added_peers.len(), - |peer| { - !added_peers.contains(peer) - && !self.explicit_peers.contains(peer) - && !self.peer_score.below_threshold(peer, |_| 0.0).0 - && !self.backoffs.is_backoff_with_slack(topic_hash, peer) + |peer_id, _| { + !added_peers.contains(peer_id) + && !self.explicit_peers.contains(peer_id) + && !self.peer_score.below_threshold(peer_id, |_| 0.0).0 + && !self.backoffs.is_backoff_with_slack(topic_hash, peer_id) }, ); @@ -1114,7 +1260,9 @@ where &self.connected_peers, topic_hash, self.config.prune_peers(), - |p| p != peer && !self.peer_score.below_threshold(p, |_| 0.0).0, + |peer_id, _| { + peer_id != peer && !self.peer_score.below_threshold(peer_id, |_| 0.0).0 + }, ) .into_iter() .map(|p| PeerInfo { peer_id: Some(p) }) @@ -1366,8 +1514,6 @@ where tracing::error!(peer_id = %peer_id, "Peer non-existent when handling graft"); return; }; - // Needs to be here to comply with the borrow checker. - let is_outbound = connected_peer.outbound; // For each topic, if a peer has grafted us, then we necessarily must be in their mesh // and they must be subscribed to the topic. Ensure we have recorded the mapping. @@ -1419,8 +1565,6 @@ where peer_score.add_penalty(peer_id, 1); // check the flood cutoff - // See: https://github.com/rust-lang/rust-clippy/issues/10061 - #[allow(unknown_lints, clippy::unchecked_duration_subtraction)] let flood_cutoff = (backoff_time + self.config.graft_flood_threshold()) - self.config.prune_backoff(); @@ -1455,10 +1599,9 @@ where } // check mesh upper bound and only allow graft if the upper bound is not reached - // or if it is an outbound peer let mesh_n_high = self.config.mesh_n_high_for_topic(&topic_hash); - if peers.len() >= mesh_n_high && !is_outbound { + if peers.len() >= mesh_n_high { to_prune_topics.insert(topic_hash.clone()); continue; } @@ -1523,6 +1666,165 @@ where tracing::debug!(peer=%peer_id, "Completed GRAFT handling for peer"); } + fn handle_extensions(&mut self, peer_id: &PeerId, extensions: Extensions) { + let Some(peer) = self.connected_peers.get_mut(peer_id) else { + tracing::error!( + peer=%peer_id, + "Extensions by unknown peer" + ); + return; + }; + + if peer.extensions.is_some() { + tracing::debug!( + peer=%peer_id, + "Peer had already sent us extensions message" + ); + return; + } + + peer.extensions = Some(extensions); + + if extensions.test_extension.unwrap_or(false) + && matches!(peer.kind, PeerKind::Gossipsubv1_3) + { + self.send_message(*peer_id, RpcOut::TestExtension); + } + } + + /// Handle incoming partial message from a peer. + #[cfg(feature = "partial_messages")] + fn handle_partial_message(&mut self, peer_id: &PeerId, partial_message: PartialMessage) { + use crate::types::PeerMetadata; + + tracing::debug!( + peer=%peer_id, + topic=%partial_message.topic_id, + group_id=?partial_message.group_id, + "Received partial message" + ); + + // Check if peer exists. + let Some(peer) = self.connected_peers.get_mut(peer_id) else { + tracing::error!( + peer=%peer_id, + "Partial message from unknown peer" + ); + return; + }; + + let topic_partials = peer + .partial_messages + .entry(partial_message.topic_id.clone()) + .or_default(); + + let group_partials = topic_partials + .entry(partial_message.group_id.clone()) + .or_default(); + + // Check if the local partial data we have from the peer is oudated. + let metadata_updated = match (&mut group_partials.metadata, &partial_message.metadata) { + (None, Some(remote_metadata)) => { + group_partials.metadata = Some(PeerMetadata::Remote(remote_metadata.clone())); + true + } + (Some(PeerMetadata::Remote(ref metadata)), Some(remote_metadata)) => { + if metadata != remote_metadata { + group_partials.metadata = Some(PeerMetadata::Remote(remote_metadata.clone())); + true + } else { + false + } + } + (Some(PeerMetadata::Local(metadata)), Some(remote_metadata)) => { + match metadata.update(remote_metadata) { + Ok(updated) => updated, + Err(err) => { + tracing::debug!( + peer=%peer_id, + topic=%partial_message.topic_id, + group_id=?partial_message.group_id, + err=%err, + "Error updating Partial metadata" + ); + if let PeerScoreState::Active(peer_score) = &mut self.peer_score { + peer_score.reject_invalid_partial(peer_id, &partial_message.topic_id); + } + false + } + } + } + (Some(_), None) | (None, None) => false, + }; + + if !metadata_updated { + return; + } + + // We may have already received other partials from this and other peers, + // but haven't responded to them yet, in those situations just return + // the partial to the application layer. + let Some(local_partial) = self + .cached_partials + .get_mut(&partial_message.topic_id) + .and_then(|t| t.get(&partial_message.group_id)) + else { + self.events + .push_back(ToSwarm::GenerateEvent(Event::Partial { + topic_id: partial_message.topic_id, + propagation_source: *peer_id, + group_id: partial_message.group_id, + message: partial_message.message, + metadata: partial_message.metadata, + })); + return; + }; + + let action = match local_partial + .partial_message_bytes_from_metadata(partial_message.metadata.as_deref()) + { + Ok(action) => action, + Err(err) => { + tracing::debug!(peer = %peer_id, group_id = ?partial_message.group_id,err = %err, + "Could not reconstruct message bytes for peer metadata from a received partial"); + // Should we remove the partial from the peer? + topic_partials.remove(&partial_message.group_id); + if let PeerScoreState::Active(peer_score) = &mut self.peer_score { + peer_score.reject_invalid_partial(peer_id, &partial_message.topic_id); + } + return; + } + }; + + // We have new data for that peer. + if let Some((message, peer_updated_metadata)) = action.send { + group_partials.metadata = + Some(crate::types::PeerMetadata::Local(peer_updated_metadata)); + + let cached_metadata = local_partial.metadata().as_slice().to_vec(); + self.send_message( + *peer_id, + RpcOut::PartialMessage { + message: Some(message), + metadata: cached_metadata, + group_id: partial_message.group_id.clone(), + topic_id: partial_message.topic_id.clone(), + }, + ); + } + + if action.need { + self.events + .push_back(ToSwarm::GenerateEvent(Event::Partial { + topic_id: partial_message.topic_id, + propagation_source: *peer_id, + group_id: partial_message.group_id, + message: partial_message.message, + metadata: partial_message.metadata, + })); + } + } + /// Removes the specified peer from the mesh, returning true if it was present. fn remove_peer_from_mesh( &mut self, @@ -1938,6 +2240,11 @@ where match subscription.action { SubscriptionAction::Subscribe => { + #[cfg(feature = "partial_messages")] + { + peer.partial_opts + .insert(topic_hash.clone(), subscription.partial_opts); + } if peer.topics.insert(topic_hash.clone()) { tracing::debug!( peer=%propagation_source, @@ -2187,13 +2494,17 @@ where ); // not enough peers - get mesh_n - current_length more let desired_peers = mesh_n - peers.len(); - let peer_list = - get_random_peers(&self.connected_peers, topic_hash, desired_peers, |peer| { - !peers.contains(peer) - && !explicit_peers.contains(peer) - && !backoffs.is_backoff_with_slack(topic_hash, peer) - && scores.get(peer).map(|r| r.score).unwrap_or_default() >= 0.0 - }); + let peer_list = get_random_peers( + &self.connected_peers, + topic_hash, + desired_peers, + |peer_id, _| { + !peers.contains(peer_id) + && !explicit_peers.contains(peer_id) + && !backoffs.is_backoff_with_slack(topic_hash, peer_id) + && scores.get(peer_id).map(|r| r.score).unwrap_or_default() >= 0.0 + }, + ); for peer in &peer_list { let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new); current_topic.push(topic_hash.clone()); @@ -2208,7 +2519,7 @@ where } // too many peers - remove some - if peers.len() > mesh_n_high { + if peers.len() >= mesh_n_high { tracing::debug!( topic=%topic_hash, "HEARTBEAT: Mesh high. Topic contains: {} will reduce to: {}", @@ -2228,7 +2539,9 @@ where score_p1.partial_cmp(&score_p2).unwrap_or(Ordering::Equal) }); // shuffle everything except the last retain_scores many peers (the best ones) - shuffled[..peers.len() - self.config.retain_scores()].shuffle(&mut rng); + if peers.len() > self.config.retain_scores() { + shuffled[..peers.len() - self.config.retain_scores()].shuffle(&mut rng); + } // count total number of outbound peers let mut outbound = shuffled @@ -2288,8 +2601,11 @@ where // if we have not enough outbound peers, graft to some new outbound peers if outbound < mesh_outbound_min { let needed = mesh_outbound_min - outbound; - let peer_list = - get_random_peers(&self.connected_peers, topic_hash, needed, |peer_id| { + let peer_list = get_random_peers( + &self.connected_peers, + topic_hash, + needed, + |peer_id, _| { !peers.contains(peer_id) && !explicit_peers.contains(peer_id) && !backoffs.is_backoff_with_slack(topic_hash, peer_id) @@ -2298,7 +2614,8 @@ where .connected_peers .get(peer_id) .is_some_and(|peer| peer.outbound) - }); + }, + ); for peer in &peer_list { let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new); @@ -2364,7 +2681,7 @@ where &self.connected_peers, topic_hash, self.config.opportunistic_graft_peers(), - |peer_id| { + |peer_id, _| { !peers.contains(peer_id) && !explicit_peers.contains(peer_id) && !backoffs.is_backoff_with_slack(topic_hash, peer_id) @@ -2456,15 +2773,19 @@ where ); let needed_peers = mesh_n - peers.len(); let explicit_peers = &self.explicit_peers; - let new_peers = - get_random_peers(&self.connected_peers, topic_hash, needed_peers, |peer_id| { + let new_peers = get_random_peers( + &self.connected_peers, + topic_hash, + needed_peers, + |peer_id, _| { !peers.contains(peer_id) && !explicit_peers.contains(peer_id) && !self .peer_score .below_threshold(peer_id, |ts| ts.publish_threshold) .0 - }); + }, + ); peers.extend(new_peers); } } @@ -2514,7 +2835,7 @@ where } self.failed_messages.shrink_to_fit(); - // Flush stale IDONTWANTs. + // Flush stale IDONTWANTs and partial messages. for peer in self.connected_peers.values_mut() { while let Some((_front, instant)) = peer.dont_send.front() { if (*instant + IDONTWANT_TIMEOUT) >= Instant::now() { @@ -2523,6 +2844,13 @@ where peer.dont_send.pop_front(); } } + #[cfg(feature = "partial_messages")] + for topics in peer.partial_messages.values_mut() { + topics.retain(|_, partial| { + partial.ttl -= 1; + partial.ttl == 0 + }); + } } #[cfg(feature = "metrics")] @@ -2563,15 +2891,19 @@ where ) }; // get gossip_lazy random peers - let to_msg_peers = - get_random_peers_dynamic(&self.connected_peers, topic_hash, n_map, |peer| { - !peers.contains(peer) - && !self.explicit_peers.contains(peer) + let to_msg_peers = get_random_peers_dynamic( + self.connected_peers.iter(), + topic_hash, + n_map, + |peer_id, _| { + !peers.contains(peer_id) + && !self.explicit_peers.contains(peer_id) && !self .peer_score - .below_threshold(peer, |ts| ts.gossip_threshold) + .below_threshold(peer_id, |ts| ts.gossip_threshold) .0 - }); + }, + ); tracing::debug!("Gossiping IHAVE to {} peers", to_msg_peers.len()); @@ -2947,7 +3279,19 @@ where tracing::debug!(peer=%peer_id, "New peer connected"); // We need to send our subscriptions to the newly-connected node. for topic_hash in self.mesh.clone().into_keys() { - self.send_message(peer_id, RpcOut::Subscribe(topic_hash)); + #[cfg(feature = "partial_messages")] + let Some(partial_opts) = self.partial_opts.get(&topic_hash).copied() else { + tracing::error!("Partial subscription options should exist for subscribed topic"); + return; + }; + self.send_message( + peer_id, + RpcOut::Subscribe { + topic: topic_hash.clone(), + #[cfg(feature = "partial_messages")] + partial_opts, + }, + ); } } @@ -2988,7 +3332,7 @@ where // connection handler. if !peer.connections.is_empty() { for topic in &peer.topics { - if let Some(mesh_peers) = self.mesh.get(topic) { + if let Some(mesh_peers) = self.mesh.get(&topic) { if mesh_peers.contains(&peer_id) { self.events.push_back(ToSwarm::NotifyHandler { peer_id, @@ -3127,16 +3471,34 @@ where messages: Queue::new(self.config.connection_handler_queue_len()), topics: Default::default(), dont_send: LinkedHashMap::new(), + extensions: None, + #[cfg(feature = "partial_messages")] + partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }); // Add the new connection connected_peer.connections.push(connection_id); + let queue = connected_peer.messages.clone(); + + // If this is the first connection send extensions message. + if connected_peer.connections.len() <= 1 { + self.send_message( + peer_id, + RpcOut::Extensions(Extensions { + test_extension: Some(true), + partial_messages: if cfg!(feature = "partial_messages") { + Some(true) + } else { + None + }, + }), + ); + } // This clones a reference to the Queue so any new handlers reference the same underlying // queue. No data is actually cloned here. - Ok(Handler::new( - self.config.protocol_config(), - connected_peer.messages.clone(), - )) + Ok(Handler::new(self.config.protocol_config(), queue)) } fn handle_established_outbound_connection( @@ -3156,16 +3518,34 @@ where messages: Queue::new(self.config.connection_handler_queue_len()), topics: Default::default(), dont_send: LinkedHashMap::new(), + extensions: None, + #[cfg(feature = "partial_messages")] + partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }); // Add the new connection connected_peer.connections.push(connection_id); + let queue = connected_peer.messages.clone(); + + // If this is the first connection send extensions message. + if connected_peer.connections.len() <= 1 { + self.send_message( + peer_id, + RpcOut::Extensions(Extensions { + test_extension: Some(true), + partial_messages: if cfg!(feature = "partial_messages") { + Some(true) + } else { + None + }, + }), + ); + } // This clones a reference to the Queue so any new handlers reference the same underlying // queue. No data is actually cloned here. - Ok(Handler::new( - self.config.protocol_config(), - connected_peer.messages.clone(), - )) + Ok(Handler::new(self.config.protocol_config(), queue)) } fn on_connection_handler_event( @@ -3334,6 +3714,11 @@ where } } } + ControlAction::Extensions(extensions) => { + if let Some(extensions) = extensions { + self.handle_extensions(&propagation_source, extensions); + } + } } } if !ihave_msgs.is_empty() { @@ -3345,6 +3730,15 @@ where if !prune_msgs.is_empty() { self.handle_prune(&propagation_source, prune_msgs); } + + if let Some(_extension) = rpc.test_extension { + tracing::debug!("Received Test Extension"); + } + + #[cfg(feature = "partial_messages")] + if let Some(partial_message) = rpc.partial_message { + self.handle_partial_message(&propagation_source, partial_message); + } } } } @@ -3477,17 +3871,17 @@ fn peer_removed_from_mesh( /// Helper function to get a subset of random gossipsub peers for a `topic_hash` /// filtered by the function `f`. The number of peers to get equals the output of `n_map` /// that gets as input the number of filtered peers. -fn get_random_peers_dynamic( - connected_peers: &HashMap, +fn get_random_peers_dynamic<'a>( + peers: impl IntoIterator, topic_hash: &TopicHash, // maps the number of total peers to the number of selected peers n_map: impl Fn(usize) -> usize, - mut f: impl FnMut(&PeerId) -> bool, + f: impl Fn(&PeerId, &PeerDetails) -> bool, ) -> BTreeSet { - let mut gossip_peers = connected_peers - .iter() + let mut gossip_peers = peers + .into_iter() .filter(|(_, p)| p.topics.contains(topic_hash)) - .filter(|(peer_id, _)| f(peer_id)) + .filter(|(peer_id, peer_details)| f(peer_id, peer_details)) .filter(|(_, p)| p.kind.is_gossipsub()) .map(|(peer_id, _)| *peer_id) .collect::>(); @@ -3510,13 +3904,13 @@ fn get_random_peers_dynamic( /// Helper function to get a set of `n` random gossipsub peers for a `topic_hash` /// filtered by the function `f`. -fn get_random_peers( - connected_peers: &HashMap, +fn get_random_peers<'a>( + peers: impl IntoIterator, topic_hash: &TopicHash, n: usize, - f: impl FnMut(&PeerId) -> bool, + f: impl Fn(&PeerId, &PeerDetails) -> bool, ) -> BTreeSet { - get_random_peers_dynamic(connected_peers, topic_hash, |_| n, f) + get_random_peers_dynamic(peers, topic_hash, |_| n, f) } /// Validates the combination of signing, privacy and message validation to ensure the diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index b9c16bcbe8a..6a671e7f8d0 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -33,7 +33,7 @@ use crate::{ config::{ConfigBuilder, TopicMeshConfig}, protocol::GossipsubCodec, subscription_filter::WhitelistSubscriptionFilter, - types::RpcIn, + types::{ControlAction, Extensions, RpcIn, RpcOut}, IdentTopic as Topic, }; @@ -85,7 +85,14 @@ where // subscribe to the topics for t in self.topics { let topic = Topic::new(t); - gs.subscribe(&topic).unwrap(); + gs.subscribe( + &topic, + #[cfg(feature = "partial_messages")] + false, + #[cfg(feature = "partial_messages")] + false, + ) + .unwrap(); topic_hashes.push(topic.hash().clone()); } @@ -173,8 +180,6 @@ fn inject_nodes1() -> InjectNodes InjectNodes::::default() } -// helper functions for testing - fn add_peer( gs: &mut Behaviour, topic_hashes: &[TopicHash], @@ -247,6 +252,11 @@ where topics: Default::default(), messages: queue, dont_send: LinkedHashMap::new(), + extensions: None, + #[cfg(feature = "partial_messages")] + partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }, ); @@ -275,6 +285,8 @@ where .map(|t| Subscription { action: SubscriptionAction::Subscribe, topic_hash: t, + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }) .collect::>(), &peer, @@ -414,9 +426,14 @@ fn proto_to_message(rpc: &proto::RPC) -> RpcIn { SubscriptionAction::Unsubscribe }, topic_hash: TopicHash::from_raw(sub.topic_id.unwrap_or_default()), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }) .collect(), control_msgs, + test_extension: None, + #[cfg(feature = "partial_messages")] + partial_message: None, } } @@ -453,7 +470,7 @@ fn test_subscribe() { .into_values() .fold(0, |mut collected_subscriptions, mut queue| { while !queue.is_empty() { - if let Some(RpcOut::Subscribe(_)) = queue.try_pop() { + if let Some(RpcOut::Subscribe { .. }) = queue.try_pop() { collected_subscriptions += 1 } } @@ -513,7 +530,7 @@ fn test_unsubscribe() { .into_values() .fold(0, |mut collected_subscriptions, mut queue| { while !queue.is_empty() { - if let Some(RpcOut::Subscribe(_)) = queue.try_pop() { + if let Some(RpcOut::Subscribe { .. }) = queue.try_pop() { collected_subscriptions += 1 } } @@ -571,7 +588,14 @@ fn test_join() { // re-subscribe - there should be peers associated with the topic assert!( - gs.subscribe(&topics[0]).unwrap(), + gs.subscribe( + &topics[0], + #[cfg(feature = "partial_messages")] + false, + #[cfg(feature = "partial_messages")] + false + ) + .unwrap(), "should be able to subscribe successfully" ); @@ -633,6 +657,11 @@ fn test_join() { topics: Default::default(), messages: queue, dont_send: LinkedHashMap::new(), + extensions: None, + #[cfg(feature = "partial_messages")] + partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }, ); queues.insert(random_peer, receiver_queue); @@ -656,7 +685,14 @@ fn test_join() { } // subscribe to topic1 - gs.subscribe(&topics[1]).unwrap(); + gs.subscribe( + &topics[1], + #[cfg(feature = "partial_messages")] + false, + #[cfg(feature = "partial_messages")] + false, + ) + .unwrap(); // the three new peers should have been added, along with 3 more from the pool. assert!( @@ -856,7 +892,7 @@ fn test_inject_connected() { HashMap::>::new(), |mut collected_subscriptions, (peer, mut queue)| { while !queue.is_empty() { - if let Some(RpcOut::Subscribe(topic)) = queue.try_pop() { + if let Some(RpcOut::Subscribe { topic, .. }) = queue.try_pop() { let mut peer_subs = collected_subscriptions.remove(&peer).unwrap_or_default(); peer_subs.push(topic.into_string()); collected_subscriptions.insert(peer, peer_subs); @@ -911,12 +947,16 @@ fn test_handle_received_subscriptions() { .map(|topic_hash| Subscription { action: SubscriptionAction::Subscribe, topic_hash: topic_hash.clone(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }) .collect::>(); subscriptions.push(Subscription { action: SubscriptionAction::Unsubscribe, topic_hash: topic_hashes[topic_hashes.len() - 1].clone(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }); let unknown_peer = PeerId::random(); @@ -974,6 +1014,8 @@ fn test_handle_received_subscriptions() { &[Subscription { action: SubscriptionAction::Unsubscribe, topic_hash: topic_hashes[0].clone(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }], &peers[0], ); @@ -1027,31 +1069,36 @@ fn test_get_random_peers() { topics: topics.clone(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), + extensions: None, + #[cfg(feature = "partial_messages")] + partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }, ); } - let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 5, |_| true); + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 5, |_, _| true); assert_eq!(random_peers.len(), 5, "Expected 5 peers to be returned"); - let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 30, |_| true); + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 30, |_, _| true); assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); assert!( random_peers == peers.iter().cloned().collect(), "Expected no shuffling" ); - let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 20, |_| true); + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 20, |_, _| true); assert!(random_peers.len() == 20, "Expected 20 peers to be returned"); assert!( random_peers == peers.iter().cloned().collect(), "Expected no shuffling" ); - let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 0, |_| true); + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 0, |_, _| true); assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); // test the filter - let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 5, |_| false); + let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 5, |_, _| false); assert!(random_peers.is_empty(), "Expected 0 peers to be returned"); let random_peers = get_random_peers(&gs.connected_peers, &topic_hash, 10, { - |peer| peers.contains(peer) + |peer_id, _| peers.contains(peer_id) }); assert!(random_peers.len() == 10, "Expected 10 peers to be returned"); } @@ -1229,6 +1276,9 @@ fn test_handle_iwant_msg_but_already_sent_idontwant() { control_msgs: vec![ControlAction::IDontWant(IDontWant { message_ids: vec![msg_id.clone()], })], + test_extension: None, + #[cfg(feature = "partial_messages")] + partial_message: None, }; gs.on_connection_handler_event( peers[1], @@ -1701,13 +1751,22 @@ fn explicit_peers_not_added_to_mesh_on_subscribe() { &[Subscription { action: SubscriptionAction::Subscribe, topic_hash: topic_hash.clone(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }], peer, ); } // subscribe now to topic - gs.subscribe(&topic).unwrap(); + gs.subscribe( + &topic, + #[cfg(feature = "partial_messages")] + false, + #[cfg(feature = "partial_messages")] + false, + ) + .unwrap(); // only peer 1 is in the mesh not peer 0 (which is an explicit peer) assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); @@ -1749,6 +1808,8 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { &[Subscription { action: SubscriptionAction::Subscribe, topic_hash: topic_hash.clone(), + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }], peer, ); @@ -1758,7 +1819,14 @@ fn explicit_peers_not_added_to_mesh_from_fanout_on_subscribe() { gs.publish(topic.clone(), vec![1, 2, 3]).unwrap(); // subscribe now to topic - gs.subscribe(&topic).unwrap(); + gs.subscribe( + &topic, + #[cfg(feature = "partial_messages")] + false, + #[cfg(feature = "partial_messages")] + false, + ) + .unwrap(); // only peer 1 is in the mesh not peer 0 (which is an explicit peer) assert_eq!(gs.mesh[&topic_hash], vec![peers[1]].into_iter().collect()); @@ -2163,7 +2231,13 @@ fn test_unsubscribe_backoff() { "Peer should be pruned with `unsubscribe_backoff`." ); - let _ = gs.subscribe(&Topic::new(topics[0].to_string())); + let _ = gs.subscribe( + &Topic::new(topics[0].to_string()), + #[cfg(feature = "partial_messages")] + false, + #[cfg(feature = "partial_messages")] + false, + ); // forget all events until now let queues = flush_events(&mut gs, queues); @@ -2340,59 +2414,23 @@ fn test_gossip_to_at_most_gossip_factor_peers() { ); } -#[test] -fn test_accept_only_outbound_peer_grafts_when_mesh_full() { - let config: Config = Config::default(); - - // enough peers to fill the mesh - let (mut gs, peers, _, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(true) - .create_network(); - - // graft all the peers => this will fill the mesh - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } - - // assert current mesh size - assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); - - // create an outbound and an inbound peer - let (inbound, _in_queue) = add_peer(&mut gs, &topics, false, false); - let (outbound, _out_queue) = add_peer(&mut gs, &topics, true, false); - - // send grafts - gs.handle_graft(&inbound, vec![topics[0].clone()]); - gs.handle_graft(&outbound, vec![topics[0].clone()]); - - // assert mesh size - assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high() + 1); - - // inbound is not in mesh - assert!(!gs.mesh[&topics[0]].contains(&inbound)); - - // outbound is in mesh - assert!(gs.mesh[&topics[0]].contains(&outbound)); -} - #[test] fn test_do_not_remove_too_many_outbound_peers() { // use an extreme case to catch errors with high probability - let m = 50; - let n = 2 * m; + let mesh_n = 50; + let mesh_n_high = 2 * mesh_n; let config = ConfigBuilder::default() - .mesh_n_high(n) - .mesh_n(n) - .mesh_n_low(n) - .mesh_outbound_min(m) + .mesh_n_high(mesh_n_high) + .mesh_n(mesh_n) + // Irrelevant for this test. + .mesh_n_low(mesh_n) + .mesh_outbound_min(mesh_n) .build() .unwrap(); // fill the mesh with inbound connections let (mut gs, peers, _queues, topics) = inject_nodes1() - .peer_no(n) + .peer_no(mesh_n) .topics(vec!["test".into()]) .to_subscribe(true) .gs_config(config) @@ -2405,60 +2443,26 @@ fn test_do_not_remove_too_many_outbound_peers() { // create m outbound connections and graft (we will accept the graft) let mut outbound = HashSet::new(); - for _ in 0..m { + // Go from 50 (mesh_n) to 100 (mesh_n_high) to trigger prunning. + for _ in 0..mesh_n { let (peer, _) = add_peer(&mut gs, &topics, true, false); outbound.insert(peer); gs.handle_graft(&peer, topics.clone()); } // mesh is overly full - assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n + m); + assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), mesh_n_high); // run a heartbeat gs.heartbeat(); - // Peers should be removed to reach n - assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), n); + // Peers should be removed to reach `mesh_n` + assert_eq!(gs.mesh.get(&topics[0]).unwrap().len(), mesh_n); // all outbound peers are still in the mesh assert!(outbound.iter().all(|p| gs.mesh[&topics[0]].contains(p))); } -#[test] -fn test_add_outbound_peers_if_min_is_not_satisfied() { - let config: Config = Config::default(); - - // Fill full mesh with inbound peers - let (mut gs, peers, _, topics) = inject_nodes1() - .peer_no(config.mesh_n_high()) - .topics(vec!["test".into()]) - .to_subscribe(true) - .create_network(); - - // graft all the peers - for peer in peers { - gs.handle_graft(&peer, topics.clone()); - } - - // create config.mesh_outbound_min() many outbound connections without grafting - let mut peers = vec![]; - for _ in 0..config.mesh_outbound_min() { - peers.push(add_peer(&mut gs, &topics, true, false)); - } - - // Nothing changed in the mesh yet - assert_eq!(gs.mesh[&topics[0]].len(), config.mesh_n_high()); - - // run a heartbeat - gs.heartbeat(); - - // The outbound peers got additionally added - assert_eq!( - gs.mesh[&topics[0]].len(), - config.mesh_n_high() + config.mesh_outbound_min() - ); -} - #[test] fn test_prune_negative_scored_peers() { let config = Config::default(); @@ -3073,6 +3077,8 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { let subscription = Subscription { action: SubscriptionAction::Subscribe, topic_hash: topics[0].clone(), + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }; let control_action = ControlAction::IHave(IHave { @@ -3092,6 +3098,9 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { messages: vec![raw_message1], subscriptions: vec![subscription.clone()], control_msgs: vec![control_action], + test_extension: None, + #[cfg(feature = "partial_messages")] + partial_message: None, }, invalid_messages: Vec::new(), }, @@ -3118,6 +3127,9 @@ fn test_ignore_rpc_from_peers_below_graylist_threshold() { messages: vec![raw_message3], subscriptions: vec![subscription], control_msgs: vec![control_action], + test_extension: None, + #[cfg(feature = "partial_messages")] + partial_message: None, }, invalid_messages: Vec::new(), }, @@ -3205,22 +3217,20 @@ fn test_keep_best_scoring_peers_on_oversubscription() { .build() .unwrap(); - // build mesh with more peers than mesh can hold - let n = config.mesh_n_high() + 1; + let mesh_n_high = config.mesh_n_high(); + let (mut gs, peers, _queues, topics) = inject_nodes1() - .peer_no(n) + .peer_no(mesh_n_high) .topics(vec!["test".into()]) .to_subscribe(true) .gs_config(config.clone()) .explicit(0) - .outbound(n) .scoring(Some(( PeerScoreParams::default(), PeerScoreThresholds::default(), ))) .create_network(); - // graft all, will be accepted since the are outbound for peer in &peers { gs.handle_graft(peer, topics.clone()); } @@ -3232,7 +3242,7 @@ fn test_keep_best_scoring_peers_on_oversubscription() { gs.set_application_score(peer, index as f64); } - assert_eq!(gs.mesh[&topics[0]].len(), n); + assert_eq!(gs.mesh[&topics[0]].len(), mesh_n_high); // heartbeat to prune some peers gs.heartbeat(); @@ -3241,7 +3251,7 @@ fn test_keep_best_scoring_peers_on_oversubscription() { // mesh contains retain_scores best peers assert!(gs.mesh[&topics[0]].is_superset( - &peers[(n - config.retain_scores())..] + &peers[(mesh_n_high - config.retain_scores())..] .iter() .cloned() .collect() @@ -3728,6 +3738,9 @@ fn test_scoring_p4_invalid_signature() { messages: vec![], subscriptions: vec![], control_msgs: vec![], + test_extension: None, + #[cfg(feature = "partial_messages")] + partial_message: None, }, invalid_messages: vec![(m, ValidationError::InvalidSignature)], }, @@ -4199,7 +4212,7 @@ fn test_scoring_p6() { // create 5 peers with the same ip let addr = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 3)); - let peers = vec![ + let peers = [ add_peer_with_addr(&mut gs, &[], false, false, addr.clone()).0, add_peer_with_addr(&mut gs, &[], false, false, addr.clone()).0, add_peer_with_addr(&mut gs, &[], true, false, addr.clone()).0, @@ -4209,7 +4222,7 @@ fn test_scoring_p6() { // create 4 other peers with other ip let addr2 = Multiaddr::from(Ipv4Addr::new(10, 1, 2, 4)); - let others = vec![ + let others = [ add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()).0, add_peer_with_addr(&mut gs, &[], false, false, addr2.clone()).0, add_peer_with_addr(&mut gs, &[], true, false, addr2.clone()).0, @@ -5184,8 +5197,24 @@ fn test_subscribe_to_invalid_topic() { .to_subscribe(false) .create_network(); - assert!(gs.subscribe(&t1).is_ok()); - assert!(gs.subscribe(&t2).is_err()); + assert!(gs + .subscribe( + &t1, + #[cfg(feature = "partial_messages")] + false, + #[cfg(feature = "partial_messages")] + false + ) + .is_ok()); + assert!(gs + .subscribe( + &t2, + #[cfg(feature = "partial_messages")] + false, + #[cfg(feature = "partial_messages")] + false + ) + .is_err()); } #[test] @@ -5214,7 +5243,14 @@ fn test_subscribe_and_graft_with_negative_score() { let original_score = gs1.as_peer_score_mut().score_report(&p2).score; // subscribe to topic in gs2 - gs2.subscribe(&topic).unwrap(); + gs2.subscribe( + &topic, + #[cfg(feature = "partial_messages")] + false, + #[cfg(feature = "partial_messages")] + false, + ) + .unwrap(); let forward_messages_to_p1 = |gs1: &mut Behaviour<_, _>, p1: PeerId, @@ -5482,6 +5518,9 @@ fn parses_idontwant() { control_msgs: vec![ControlAction::IDontWant(IDontWant { message_ids: vec![message_id.clone()], })], + test_extension: None, + #[cfg(feature = "partial_messages")] + partial_message: None, }; gs.on_connection_handler_event( peers[1], @@ -5541,6 +5580,11 @@ fn test_all_queues_full() { topics: topics.clone(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), + extensions: None, + #[cfg(feature = "partial_messages")] + partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }, ); @@ -5578,6 +5622,11 @@ fn test_slow_peer_returns_failed_publish() { topics: topics.clone(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), + extensions: None, + #[cfg(feature = "partial_messages")] + partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }, ); let peer_id = PeerId::random(); @@ -5591,6 +5640,11 @@ fn test_slow_peer_returns_failed_publish() { topics: topics.clone(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), + extensions: None, + #[cfg(feature = "partial_messages")] + partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }, ); @@ -5643,6 +5697,11 @@ fn test_slow_peer_returns_failed_ihave_handling() { topics: topics.clone(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), + extensions: None, + #[cfg(feature = "partial_messages")] + partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }, ); peers.push(slow_peer_id); @@ -5660,6 +5719,11 @@ fn test_slow_peer_returns_failed_ihave_handling() { topics: topics.clone(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), + extensions: None, + #[cfg(feature = "partial_messages")] + partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }, ); @@ -5748,6 +5812,11 @@ fn test_slow_peer_returns_failed_iwant_handling() { topics: topics.clone(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), + extensions: None, + #[cfg(feature = "partial_messages")] + partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }, ); peers.push(slow_peer_id); @@ -5765,6 +5834,11 @@ fn test_slow_peer_returns_failed_iwant_handling() { topics: topics.clone(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), + extensions: None, + #[cfg(feature = "partial_messages")] + partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }, ); @@ -5833,6 +5907,11 @@ fn test_slow_peer_returns_failed_forward() { topics: topics.clone(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), + extensions: None, + #[cfg(feature = "partial_messages")] + partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }, ); peers.push(slow_peer_id); @@ -5850,6 +5929,11 @@ fn test_slow_peer_returns_failed_forward() { topics: topics.clone(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), + extensions: None, + #[cfg(feature = "partial_messages")] + partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }, ); @@ -5923,6 +6007,11 @@ fn test_slow_peer_is_downscored_on_publish() { topics: topics.clone(), messages: Queue::new(1), dont_send: LinkedHashMap::new(), + extensions: None, + #[cfg(feature = "partial_messages")] + partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }, ); gs.as_peer_score_mut().add_peer(slow_peer_id); @@ -5937,6 +6026,11 @@ fn test_slow_peer_is_downscored_on_publish() { topics: topics.clone(), messages: Queue::new(gs.config.connection_handler_queue_len()), dont_send: LinkedHashMap::new(), + extensions: None, + #[cfg(feature = "partial_messages")] + partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), }, ); @@ -6118,10 +6212,13 @@ fn test_mesh_subtraction_with_topic_config() { let topic = String::from("topic1"); let topic_hash = TopicHash::from_raw(topic.clone()); + let mesh_n = 5; + let mesh_n_high = 7; + let topic_config = TopicMeshConfig { - mesh_n: 5, + mesh_n, + mesh_n_high, mesh_n_low: 3, - mesh_n_high: 7, mesh_outbound_min: 2, }; @@ -6130,15 +6227,12 @@ fn test_mesh_subtraction_with_topic_config() { .build() .unwrap(); - let peer_no = 12; - - // make all outbound connections so grafting to all will be allowed let (mut gs, peers, _, topics) = inject_nodes1() - .peer_no(peer_no) + .peer_no(mesh_n_high) .topics(vec![topic]) .to_subscribe(true) .gs_config(config.clone()) - .outbound(peer_no) + .outbound(mesh_n_high) .create_network(); // graft all peers @@ -6148,7 +6242,7 @@ fn test_mesh_subtraction_with_topic_config() { assert_eq!( gs.mesh.get(&topics[0]).unwrap().len(), - peer_no, + mesh_n_high, "Initially all peers should be in the mesh" ); @@ -6163,6 +6257,60 @@ fn test_mesh_subtraction_with_topic_config() { ); } +/// Tests that if a mesh reaches `mesh_n_high`, +/// but is only composed of outbound peers, it is not reduced to `mesh_n`. +#[test] +fn test_mesh_subtraction_with_topic_config_min_outbound() { + let topic = String::from("topic1"); + let topic_hash = TopicHash::from_raw(topic.clone()); + + let mesh_n = 5; + let mesh_n_high = 7; + + let topic_config = TopicMeshConfig { + mesh_n, + mesh_n_high, + mesh_n_low: 3, + mesh_outbound_min: 7, + }; + + let config = ConfigBuilder::default() + .set_topic_config(topic_hash.clone(), topic_config) + .build() + .unwrap(); + + let peer_no = 12; + + // make all outbound connections. + let (mut gs, peers, _, topics) = inject_nodes1() + .peer_no(peer_no) + .topics(vec![topic]) + .to_subscribe(true) + .gs_config(config.clone()) + .outbound(peer_no) + .create_network(); + + // graft all peers + for peer in peers { + gs.handle_graft(&peer, topics.clone()); + } + + assert_eq!( + gs.mesh.get(&topics[0]).unwrap().len(), + mesh_n_high, + "Initially mesh should be {mesh_n_high}" + ); + + // run a heartbeat + gs.heartbeat(); + + assert_eq!( + gs.mesh.get(&topics[0]).unwrap().len(), + mesh_n_high, + "After heartbeat, mesh should still be {mesh_n_high} as these are all outbound peers" + ); +} + /// Test behavior with multiple topics having different configs #[test] fn test_multiple_topics_with_different_configs() { @@ -6262,8 +6410,14 @@ fn test_multiple_topics_with_different_configs() { // re-subscribe to topic1 assert!( - gs.subscribe(&Topic::new(topic_hashes[0].to_string())) - .unwrap(), + gs.subscribe( + &Topic::new(topic_hashes[0].to_string()), + #[cfg(feature = "partial_messages")] + false, + #[cfg(feature = "partial_messages")] + false + ) + .unwrap(), "Should subscribe successfully" ); @@ -6343,7 +6497,7 @@ fn test_publish_message_with_default_transmit_size_config() { let topic_hash = topic.hash(); let config = ConfigBuilder::default() - .set_topic_max_transmit_size(topic_hash.clone(), Config::default_max_transmit_size()) + .max_transmit_size_for_topic(Config::default_max_transmit_size(), topic_hash.clone()) .validation_mode(ValidationMode::Strict) .build() .unwrap(); @@ -6375,7 +6529,7 @@ fn test_publish_large_message_with_default_transmit_size_config() { let topic_hash = topic.hash(); let config = ConfigBuilder::default() - .set_topic_max_transmit_size(topic_hash.clone(), Config::default_max_transmit_size()) + .max_transmit_size_for_topic(Config::default_max_transmit_size(), topic_hash.clone()) .validation_mode(ValidationMode::Strict) .build() .unwrap(); @@ -6403,7 +6557,7 @@ fn test_publish_message_with_specific_transmit_size_config() { let max_topic_transmit_size = 2000; let config = ConfigBuilder::default() - .set_topic_max_transmit_size(topic_hash.clone(), max_topic_transmit_size) + .max_transmit_size_for_topic(max_topic_transmit_size, topic_hash.clone()) .validation_mode(ValidationMode::Strict) .build() .unwrap(); @@ -6436,7 +6590,7 @@ fn test_publish_large_message_with_specific_transmit_size_config() { let max_topic_transmit_size = 2048; let config = ConfigBuilder::default() - .set_topic_max_transmit_size(topic_hash.clone(), max_topic_transmit_size) + .max_transmit_size_for_topic(max_topic_transmit_size, topic_hash.clone()) .validation_mode(ValidationMode::Strict) .build() .unwrap(); @@ -6464,7 +6618,7 @@ fn test_validation_error_message_size_too_large_topic_specific() { let max_size = 2048; let config = ConfigBuilder::default() - .set_topic_max_transmit_size(topic_hash.clone(), max_size) + .max_transmit_size_for_topic(max_size, topic_hash.clone()) .validation_mode(ValidationMode::None) .build() .unwrap(); @@ -6495,6 +6649,9 @@ fn test_validation_error_message_size_too_large_topic_specific() { messages: vec![raw_message], subscriptions: vec![], control_msgs: vec![], + test_extension: None, + #[cfg(feature = "partial_messages")] + partial_message: None, }, invalid_messages: vec![], }, @@ -6539,6 +6696,8 @@ fn test_validation_error_message_size_too_large_topic_specific() { }], subscriptions: vec![], control: None, + testExtension: None, + partial: None, }; codec.encode(rpc, &mut buf).unwrap(); @@ -6568,7 +6727,7 @@ fn test_validation_message_size_within_topic_specific() { let max_size = 2048; let config = ConfigBuilder::default() - .set_topic_max_transmit_size(topic_hash.clone(), max_size) + .max_transmit_size_for_topic(max_size, topic_hash.clone()) .validation_mode(ValidationMode::None) .build() .unwrap(); @@ -6599,6 +6758,9 @@ fn test_validation_message_size_within_topic_specific() { messages: vec![raw_message], subscriptions: vec![], control_msgs: vec![], + test_extension: None, + #[cfg(feature = "partial_messages")] + partial_message: None, }, invalid_messages: vec![], }, @@ -6643,6 +6805,8 @@ fn test_validation_message_size_within_topic_specific() { }], subscriptions: vec![], control: None, + testExtension: None, + partial: None, }; codec.encode(rpc, &mut buf).unwrap(); @@ -6659,3 +6823,94 @@ fn test_validation_message_size_within_topic_specific() { _ => panic!("Unexpected event"), } } + +#[test] +fn test_extensions_message_creation() { + let extensions_rpc = RpcOut::Extensions(Extensions { + test_extension: Some(true), + partial_messages: None, + }); + let proto_rpc: proto::RPC = extensions_rpc.into(); + + assert!(proto_rpc.control.is_some()); + let control = proto_rpc.control.unwrap(); + assert!(control.extensions.is_some()); + let test_extension = control.extensions.unwrap().testExtension.unwrap(); + assert!(test_extension); + assert!(control.ihave.is_empty()); + assert!(control.iwant.is_empty()); + assert!(control.graft.is_empty()); + assert!(control.prune.is_empty()); + assert!(control.idontwant.is_empty()); +} + +#[test] +fn test_handle_extensions_message() { + let mut gs: Behaviour = Behaviour::new( + MessageAuthenticity::Anonymous, + ConfigBuilder::default() + .validation_mode(ValidationMode::None) + .build() + .unwrap(), + ) + .unwrap(); + + let peer_id = PeerId::random(); + let messages = Queue::new(gs.config.connection_handler_queue_len()); + + // Add peer without extensions + gs.connected_peers.insert( + peer_id, + PeerDetails { + kind: PeerKind::Gossipsubv1_3, + connections: vec![ConnectionId::new_unchecked(0)], + outbound: false, + topics: BTreeSet::new(), + messages, + dont_send: LinkedHashMap::new(), + extensions: None, + #[cfg(feature = "partial_messages")] + partial_messages: Default::default(), + #[cfg(feature = "partial_messages")] + partial_opts: Default::default(), + }, + ); + + // Simulate receiving extensions message + let extensions = Extensions { + test_extension: Some(false), + partial_messages: None, + }; + gs.handle_extensions(&peer_id, extensions); + + // Verify extensions were stored + let peer_details = gs.connected_peers.get(&peer_id).unwrap(); + assert!(peer_details.extensions.is_some()); + + // Simulate receiving duplicate extensions message from another peer + let duplicate_rpc = RpcIn { + messages: vec![], + subscriptions: vec![], + control_msgs: vec![ControlAction::Extensions(Some(Extensions { + test_extension: Some(true), + partial_messages: None, + }))], + test_extension: None, + #[cfg(feature = "partial_messages")] + partial_message: None, + }; + + gs.on_connection_handler_event( + peer_id, + ConnectionId::new_unchecked(0), + HandlerEvent::Message { + rpc: duplicate_rpc, + invalid_messages: vec![], + }, + ); + + // Extensions should still be present (not cleared or changed) + let peer_details = gs.connected_peers.get(&peer_id).unwrap(); + let test_extension = peer_details.extensions.unwrap().test_extension.unwrap(); + assert!(!test_extension); +} diff --git a/protocols/gossipsub/src/config.rs b/protocols/gossipsub/src/config.rs index fa685f3085a..615bd08da30 100644 --- a/protocols/gossipsub/src/config.rs +++ b/protocols/gossipsub/src/config.rs @@ -1075,15 +1075,6 @@ impl ConfigBuilder { self } - /// The topic max size sets message sizes for a given topic. - pub fn set_topic_max_transmit_size(&mut self, topic: TopicHash, max_size: usize) -> &mut Self { - self.config - .protocol - .max_transmit_sizes - .insert(topic, max_size); - self - } - /// Constructs a [`Config`] from the given configuration and validates the settings. pub fn build(&self) -> Result { // check all constraints on config diff --git a/protocols/gossipsub/src/error.rs b/protocols/gossipsub/src/error.rs index 7af14d84ac0..fac93210e7d 100644 --- a/protocols/gossipsub/src/error.rs +++ b/protocols/gossipsub/src/error.rs @@ -40,6 +40,9 @@ pub enum PublishError { /// Messages could not be sent because the queues for all peers were full. The usize represents /// the number of peers that were attempted. AllQueuesFull(usize), + + /// An Error while trying to publish a partial message. + Partial(PartialMessageError), } impl std::fmt::Display for PublishError { @@ -160,3 +163,70 @@ impl std::fmt::Display for ConfigBuilderError { } } } + +/// Errors that can occur during partial message processing. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum PartialMessageError { + /// The received data is too short to contain required headers/metadata. + InsufficientData { + /// Expected minimum number of bytes. + expected: usize, + /// Actual number of bytes received. + received: usize, + }, + + /// The data format is invalid or corrupted. + InvalidFormat, + + /// The partial data doesn't belong to this message group. + WrongGroup { + /// Group Id of the received message. + received: Vec, + }, + + /// The partial data is a duplicate of already received data. + DuplicateData(Vec), + + /// The partial data is out of the expected range or sequence. + OutOfRange, + + /// The message is already complete and cannot accept more data. + AlreadyComplete, + + /// Application-specific validation failed. + ValidationFailed, +} + +impl std::error::Error for PartialMessageError {} + +impl std::fmt::Display for PartialMessageError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::InsufficientData { expected, received } => { + write!( + f, + "Insufficient data: expected at least {} bytes, got {}", + expected, received + ) + } + Self::InvalidFormat => { + write!(f, "Invalid data format") + } + Self::WrongGroup { received } => { + write!(f, "Wrong group ID: got {:?}", received) + } + Self::DuplicateData(part_id) => { + write!(f, "Duplicate data for part {:?}", part_id) + } + Self::OutOfRange => { + write!(f, "Data out of range") + } + Self::AlreadyComplete => { + write!(f, "Message is already complete") + } + Self::ValidationFailed => { + write!(f, "Validation failed") + } + } + } +} diff --git a/protocols/gossipsub/src/generated/gossipsub/pb.rs b/protocols/gossipsub/src/generated/gossipsub/pb.rs index 24ac80d2755..6aa7fd4eeeb 100644 --- a/protocols/gossipsub/src/generated/gossipsub/pb.rs +++ b/protocols/gossipsub/src/generated/gossipsub/pb.rs @@ -19,6 +19,8 @@ pub struct RPC { pub subscriptions: Vec, pub publish: Vec, pub control: Option, + pub testExtension: Option, + pub partial: Option, } impl<'a> MessageRead<'a> for RPC { @@ -29,6 +31,8 @@ impl<'a> MessageRead<'a> for RPC { Ok(10) => msg.subscriptions.push(r.read_message::(bytes)?), Ok(18) => msg.publish.push(r.read_message::(bytes)?), Ok(26) => msg.control = Some(r.read_message::(bytes)?), + Ok(51939474) => msg.testExtension = Some(r.read_message::(bytes)?), + Ok(131350034) => msg.partial = Some(r.read_message::(bytes)?), Ok(t) => { r.read_unknown(bytes, t)?; } Err(e) => return Err(e), } @@ -43,12 +47,16 @@ impl MessageWrite for RPC { + self.subscriptions.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + self.publish.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + self.control.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) + + self.testExtension.as_ref().map_or(0, |m| 4 + sizeof_len((m).get_size())) + + self.partial.as_ref().map_or(0, |m| 4 + sizeof_len((m).get_size())) } fn write_message(&self, w: &mut Writer) -> Result<()> { for s in &self.subscriptions { w.write_with_tag(10, |w| w.write_message(s))?; } for s in &self.publish { w.write_with_tag(18, |w| w.write_message(s))?; } if let Some(ref s) = self.control { w.write_with_tag(26, |w| w.write_message(s))?; } + if let Some(ref s) = self.testExtension { w.write_with_tag(51939474, |w| w.write_message(s))?; } + if let Some(ref s) = self.partial { w.write_with_tag(131350034, |w| w.write_message(s))?; } Ok(()) } } @@ -62,6 +70,8 @@ use super::*; pub struct SubOpts { pub subscribe: Option, pub topic_id: Option, + pub requestsPartial: Option, + pub supportsPartial: Option, } impl<'a> MessageRead<'a> for SubOpts { @@ -71,6 +81,8 @@ impl<'a> MessageRead<'a> for SubOpts { match r.next_tag(bytes) { Ok(8) => msg.subscribe = Some(r.read_bool(bytes)?), Ok(18) => msg.topic_id = Some(r.read_string(bytes)?.to_owned()), + Ok(24) => msg.requestsPartial = Some(r.read_bool(bytes)?), + Ok(32) => msg.supportsPartial = Some(r.read_bool(bytes)?), Ok(t) => { r.read_unknown(bytes, t)?; } Err(e) => return Err(e), } @@ -84,11 +96,15 @@ impl MessageWrite for SubOpts { 0 + self.subscribe.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64)) + self.topic_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.requestsPartial.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64)) + + self.supportsPartial.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64)) } fn write_message(&self, w: &mut Writer) -> Result<()> { if let Some(ref s) = self.subscribe { w.write_with_tag(8, |w| w.write_bool(*s))?; } if let Some(ref s) = self.topic_id { w.write_with_tag(18, |w| w.write_string(&**s))?; } + if let Some(ref s) = self.requestsPartial { w.write_with_tag(24, |w| w.write_bool(*s))?; } + if let Some(ref s) = self.supportsPartial { w.write_with_tag(32, |w| w.write_bool(*s))?; } Ok(()) } } @@ -155,6 +171,7 @@ pub struct ControlMessage { pub graft: Vec, pub prune: Vec, pub idontwant: Vec, + pub extensions: Option, } impl<'a> MessageRead<'a> for ControlMessage { @@ -167,6 +184,7 @@ impl<'a> MessageRead<'a> for ControlMessage { Ok(26) => msg.graft.push(r.read_message::(bytes)?), Ok(34) => msg.prune.push(r.read_message::(bytes)?), Ok(42) => msg.idontwant.push(r.read_message::(bytes)?), + Ok(50) => msg.extensions = Some(r.read_message::(bytes)?), Ok(t) => { r.read_unknown(bytes, t)?; } Err(e) => return Err(e), } @@ -183,6 +201,7 @@ impl MessageWrite for ControlMessage { + self.graft.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + self.prune.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + self.idontwant.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::() + + self.extensions.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size())) } fn write_message(&self, w: &mut Writer) -> Result<()> { @@ -191,6 +210,7 @@ impl MessageWrite for ControlMessage { for s in &self.graft { w.write_with_tag(26, |w| w.write_message(s))?; } for s in &self.prune { w.write_with_tag(34, |w| w.write_message(s))?; } for s in &self.idontwant { w.write_with_tag(42, |w| w.write_message(s))?; } + if let Some(ref s) = self.extensions { w.write_with_tag(50, |w| w.write_message(s))?; } Ok(()) } } @@ -367,6 +387,55 @@ impl MessageWrite for ControlIDontWant { } } +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct ControlExtensions { + pub testExtension: Option, + pub partialMessages: Option, +} + +impl<'a> MessageRead<'a> for ControlExtensions { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(51939472) => msg.testExtension = Some(r.read_bool(bytes)?), + Ok(131350032) => msg.partialMessages = Some(r.read_bool(bytes)?), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for ControlExtensions { + fn get_size(&self) -> usize { + 0 + + self.testExtension.as_ref().map_or(0, |m| 4 + sizeof_varint(*(m) as u64)) + + self.partialMessages.as_ref().map_or(0, |m| 4 + sizeof_varint(*(m) as u64)) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.testExtension { w.write_with_tag(51939472, |w| w.write_bool(*s))?; } + if let Some(ref s) = self.partialMessages { w.write_with_tag(131350032, |w| w.write_bool(*s))?; } + Ok(()) + } +} + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct TestExtension { } + +impl<'a> MessageRead<'a> for TestExtension { + fn from_reader(r: &mut BytesReader, _: &[u8]) -> Result { + r.read_to_end(); + Ok(Self::default()) + } +} + +impl MessageWrite for TestExtension { } + #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Debug, Default, PartialEq, Clone)] pub struct PeerInfo { @@ -601,3 +670,47 @@ impl<'a> From<&'a str> for EncMode { } +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct PartialMessagesExtension { + pub topicID: Option>, + pub groupID: Option>, + pub partialMessage: Option>, + pub partsMetadata: Option>, +} + +impl<'a> MessageRead<'a> for PartialMessagesExtension { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.topicID = Some(r.read_bytes(bytes)?.to_owned()), + Ok(18) => msg.groupID = Some(r.read_bytes(bytes)?.to_owned()), + Ok(26) => msg.partialMessage = Some(r.read_bytes(bytes)?.to_owned()), + Ok(34) => msg.partsMetadata = Some(r.read_bytes(bytes)?.to_owned()), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for PartialMessagesExtension { + fn get_size(&self) -> usize { + 0 + + self.topicID.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.groupID.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.partialMessage.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + + self.partsMetadata.as_ref().map_or(0, |m| 1 + sizeof_len((m).len())) + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if let Some(ref s) = self.topicID { w.write_with_tag(10, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.groupID { w.write_with_tag(18, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.partialMessage { w.write_with_tag(26, |w| w.write_bytes(&**s))?; } + if let Some(ref s) = self.partsMetadata { w.write_with_tag(34, |w| w.write_bytes(&**s))?; } + Ok(()) + } +} + diff --git a/protocols/gossipsub/src/generated/rpc.proto b/protocols/gossipsub/src/generated/rpc.proto index fe4d3bc9366..664211373fb 100644 --- a/protocols/gossipsub/src/generated/rpc.proto +++ b/protocols/gossipsub/src/generated/rpc.proto @@ -9,9 +9,24 @@ message RPC { message SubOpts { optional bool subscribe = 1; // subscribe or unsubscribe optional string topic_id = 2; + // Used with Partial Messages extension. + // If set to true, signals to the receiver that the sender prefers partial + // messages. + optional bool requestsPartial = 3; + // If set to true, signals to the receiver that the sender supports sending + // partial messages on this topic. If requestsPartial is true, this is + // assumed to be true. + optional bool supportsPartial = 4; } optional ControlMessage control = 3; + // Canonical Extensions should register their messages here. + + // Experimental Extensions should register their messages here. They + // must use field numbers larger than 0x200000 to be encoded with at least 4 + // bytes + optional TestExtension testExtension = 6492434; + optional PartialMessagesExtension partial = 16418754; } message Message { @@ -29,6 +44,7 @@ message ControlMessage { repeated ControlGraft graft = 3; repeated ControlPrune prune = 4; repeated ControlIDontWant idontwant = 5; + optional ControlExtensions extensions = 6; } message ControlIHave { @@ -51,9 +67,21 @@ message ControlPrune { } message ControlIDontWant { - repeated bytes message_ids = 1; + repeated bytes message_ids = 1; } +message ControlExtensions { + // Initially empty. Future extensions will be added here along with a + // reference to their specification. + + // Experimental extensions must use field numbers larger than 0x200000 to be + // encoded with at least 4 bytes + optional bool testExtension = 6492434; + optional bool partialMessages = 16418754; +} + +message TestExtension {} + message PeerInfo { optional bytes peer_id = 1; optional bytes signed_peer_record = 2; @@ -87,3 +115,14 @@ message TopicDescriptor { } } } + +message PartialMessagesExtension { + optional bytes topicID = 1; + optional bytes groupID = 2; + + // An encoded partial message + optional bytes partialMessage = 3; + + // An encoded representation of the parts a peer has and wants. + optional bytes partsMetadata = 4; +} diff --git a/protocols/gossipsub/src/lib.rs b/protocols/gossipsub/src/lib.rs index f1d42d6cddb..1d68bec7208 100644 --- a/protocols/gossipsub/src/lib.rs +++ b/protocols/gossipsub/src/lib.rs @@ -113,6 +113,9 @@ mod topic; mod transform; mod types; +#[cfg(feature = "partial_messages")] +pub mod partial; + #[cfg(feature = "metrics")] pub use metrics::Config as MetricsConfig; @@ -133,6 +136,8 @@ pub use self::{ transform::{DataTransform, IdentityTransform}, types::{FailedMessages, Message, MessageAcceptance, MessageId, RawMessage}, }; +#[cfg(feature = "partial_messages")] +pub use self::{error::PartialMessageError, partial::Partial}; pub type IdentTopic = Topic; pub type Sha256Topic = Topic; diff --git a/protocols/gossipsub/src/partial.rs b/protocols/gossipsub/src/partial.rs new file mode 100644 index 00000000000..5e6aad54648 --- /dev/null +++ b/protocols/gossipsub/src/partial.rs @@ -0,0 +1,79 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use std::fmt::Debug; + +use crate::error::PartialMessageError; + +/// PartialMessage is a message that can be broken up into parts. +/// This trait allows applications to define custom strategies for splitting large messages +/// into parts and reconstructing them from received partial data. It provides the core +/// operations needed for the gossipsub partial messages extension. +/// +/// The partial message protocol works as follows: +/// 1. Applications implement this trait to define how messages are split and reconstructed +/// 2. Peers advertise available parts using `available_parts()` metadata in PartialIHAVE +/// 3. Peers request missing parts using `missing_parts()` metadata in PartialIWANT +/// 4. When requests are received, `partial_message_bytes_from_metadata()` generates the response +/// 5. Received partial data is integrated using `extend_from_encoded_partial_message()` +/// 6. The `group_id()` ties all parts of the same logical message together +pub trait Partial: Send + Sync { + /// Returns the unique identifier for this message group. + /// + /// All partial messages belonging to the same logical message should return + /// the same group ID. This is used to associate partial messages together + /// during reconstruction. + fn group_id(&self) -> Vec; + + /// Returns application defined metadata describing which parts of the message + /// are available and which parts we want. + /// + /// The returned bytes will be sent in partsMetadata field to advertise + /// available and wanted parts to peers. + fn metadata(&self) -> Vec; + + /// Generates partial message bytes from the given metadata. + /// + /// When a peer requests specific parts (via PartialIWANT), this method + /// generates the actual message data to send back. The `metadata` parameter + /// describes what parts are being requested. + /// + /// Returns a [`PublishAction`] for the given metadata, or an error. + fn partial_message_bytes_from_metadata( + &self, + metadata: Option<&[u8]>, + ) -> Result; +} + +pub trait Metadata: Debug + Send + Sync { + /// Return the `Metadata` as a byte slice. + fn as_slice(&self) -> &[u8]; + /// try to Update the `Metadata` with the remote data, + /// return true if it was updated. + fn update(&mut self, data: &[u8]) -> Result; +} + +/// Indicates the action to take for the given metadata. +pub struct PublishAction { + /// Indicate if we want remote data from the peer. + pub need: bool, + /// Indicate if we have data to send for that peer + pub send: Option<(Vec, Box)>, +} diff --git a/protocols/gossipsub/src/peer_score.rs b/protocols/gossipsub/src/peer_score.rs index 7a30038c48b..06d9563a850 100644 --- a/protocols/gossipsub/src/peer_score.rs +++ b/protocols/gossipsub/src/peer_score.rs @@ -532,6 +532,24 @@ impl PeerScore { } } + /// Indicate that a peer has sent us invalid partial message data. + #[cfg(feature = "partial_messages")] + pub(crate) fn reject_invalid_partial(&mut self, peer_id: &PeerId, topic_hash: &TopicHash) { + if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { + if let Some(topic_stats) = + peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params) + { + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "[Penalty] Peer delivered invalid partial data in topic and gets penalized \ + for it", + ); + topic_stats.invalid_message_deliveries += 1f64; + } + } + } + /// Removes an ip from a peer pub(crate) fn remove_ip(&mut self, peer_id: &PeerId, ip: &IpAddr) { if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 74dcc669f55..54fe553e7f2 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -29,20 +29,27 @@ use libp2p_identity::{PeerId, PublicKey}; use libp2p_swarm::StreamProtocol; use quick_protobuf::{MessageWrite, Writer}; +#[cfg(feature = "partial_messages")] +use crate::types::{PartialMessage, PartialSubOpts}; use crate::{ config::ValidationMode, handler::HandlerEvent, rpc_proto::proto, topic::TopicHash, types::{ - ControlAction, Graft, IDontWant, IHave, IWant, MessageId, PeerInfo, PeerKind, Prune, - RawMessage, RpcIn, Subscription, SubscriptionAction, + ControlAction, Extensions, Graft, IDontWant, IHave, IWant, MessageId, PeerInfo, PeerKind, + Prune, RawMessage, RpcIn, Subscription, SubscriptionAction, TestExtension, }, ValidationError, }; pub(crate) const SIGNING_PREFIX: &[u8] = b"libp2p-pubsub:"; +pub(crate) const GOSSIPSUB_1_3_0_PROTOCOL: ProtocolId = ProtocolId { + protocol: StreamProtocol::new("/meshsub/1.3.0"), + kind: PeerKind::Gossipsubv1_2, +}; + pub(crate) const GOSSIPSUB_1_2_0_PROTOCOL: ProtocolId = ProtocolId { protocol: StreamProtocol::new("/meshsub/1.2.0"), kind: PeerKind::Gossipsubv1_2, @@ -79,6 +86,7 @@ impl Default for ProtocolConfig { Self { validation_mode: ValidationMode::Strict, protocol_ids: vec![ + GOSSIPSUB_1_3_0_PROTOCOL, GOSSIPSUB_1_2_0_PROTOCOL, GOSSIPSUB_1_1_0_PROTOCOL, GOSSIPSUB_1_0_0_PROTOCOL, @@ -556,13 +564,40 @@ impl Decoder for GossipsubCodec { }) .collect(); + let extensions_msg = rpc_control.extensions.map(|extensions| Extensions { + test_extension: extensions.testExtension, + partial_messages: extensions.partialMessages, + }); + control_msgs.extend(ihave_msgs); control_msgs.extend(iwant_msgs); control_msgs.extend(graft_msgs); control_msgs.extend(prune_msgs); control_msgs.extend(idontwant_msgs); + control_msgs.push(ControlAction::Extensions(extensions_msg)); } + #[cfg(feature = "partial_messages")] + let partial_message = rpc.partial.and_then(|partial_proto| { + let Some(topic_id_bytes) = partial_proto.topicID else { + tracing::debug!("Partial message without topic_id, discarding"); + return None; + }; + let topic_id = TopicHash::from_raw(String::from_utf8_lossy(&topic_id_bytes)); + + let Some(group_id) = partial_proto.groupID else { + tracing::debug!("Partial message without group_id, discarding"); + return None; + }; + + Some(PartialMessage { + topic_id, + group_id, + metadata: partial_proto.partsMetadata, + message: partial_proto.partialMessage, + }) + }); + Ok(Some(HandlerEvent::Message { rpc: RpcIn { messages, @@ -576,9 +611,17 @@ impl Decoder for GossipsubCodec { SubscriptionAction::Unsubscribe }, topic_hash: TopicHash::from_raw(sub.topic_id.unwrap_or_default()), + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts { + requests_partial: sub.requestsPartial.unwrap_or_default(), + supports_partial: sub.supportsPartial.unwrap_or_default(), + }, }) .collect(), control_msgs, + test_extension: rpc.testExtension.map(|_test_extension| TestExtension {}), + #[cfg(feature = "partial_messages")] + partial_message, }, invalid_messages, })) diff --git a/protocols/gossipsub/src/queue.rs b/protocols/gossipsub/src/queue.rs index ff04392e618..24811c70242 100644 --- a/protocols/gossipsub/src/queue.rs +++ b/protocols/gossipsub/src/queue.rs @@ -62,7 +62,7 @@ impl Queue { /// which will only happen for control and non priority messages. pub(crate) fn try_push(&mut self, message: RpcOut) -> Result<(), Box> { match message { - RpcOut::Subscribe(_) | RpcOut::Unsubscribe(_) => { + RpcOut::Extensions(_) | RpcOut::Subscribe { .. } | RpcOut::Unsubscribe(_) => { self.priority .try_push(message) .expect("Shared is unbounded"); @@ -74,6 +74,8 @@ impl Queue { RpcOut::Publish { .. } | RpcOut::Forward { .. } | RpcOut::IHave(_) + | RpcOut::PartialMessage { .. } + | RpcOut::TestExtension | RpcOut::IWant(_) => self.non_priority.try_push(message), } } diff --git a/protocols/gossipsub/src/rpc.rs b/protocols/gossipsub/src/rpc.rs index 943df31f599..bc2e82d40f6 100644 --- a/protocols/gossipsub/src/rpc.rs +++ b/protocols/gossipsub/src/rpc.rs @@ -87,11 +87,15 @@ impl Sender { RpcOut::Publish { .. } | RpcOut::Graft(_) | RpcOut::Prune(_) + | RpcOut::Extensions(_) | RpcOut::Subscribe(_) | RpcOut::Unsubscribe(_) => &self.priority_sender, - RpcOut::Forward { .. } | RpcOut::IHave(_) | RpcOut::IWant(_) | RpcOut::IDontWant(_) => { - &self.non_priority_sender - } + RpcOut::Forward { .. } + | RpcOut::IHave(_) + | RpcOut::IWant(_) + | RpcOut::IDontWant(_) + | RpcOut::TestExtension + | RpcOut::PartialMessage { .. } => &self.non_priority_sender, }; sender.try_send(rpc).map_err(|err| err.into_inner()) } diff --git a/protocols/gossipsub/src/subscription_filter.rs b/protocols/gossipsub/src/subscription_filter.rs index c051b6c333b..224de864935 100644 --- a/protocols/gossipsub/src/subscription_filter.rs +++ b/protocols/gossipsub/src/subscription_filter.rs @@ -211,6 +211,8 @@ impl TopicSubscriptionFilter for RegexSubscriptionFilter { #[cfg(test)] mod test { use super::*; + #[cfg(feature = "partial_messages")] + use crate::types::PartialSubOpts; use crate::types::SubscriptionAction::*; #[test] @@ -225,22 +227,32 @@ mod test { Subscription { action: Unsubscribe, topic_hash: t1.clone(), + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, Subscription { action: Unsubscribe, topic_hash: t2.clone(), + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t2, + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t1.clone(), + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, Subscription { action: Unsubscribe, topic_hash: t1, + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, ]; @@ -262,10 +274,14 @@ mod test { Subscription { action: Subscribe, topic_hash: t1, + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t2, + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, ]; @@ -291,14 +307,20 @@ mod test { Subscription { action: Subscribe, topic_hash: t1.clone(), + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, Subscription { action: Unsubscribe, topic_hash: t1.clone(), + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t1, + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, ]; @@ -324,10 +346,14 @@ mod test { Subscription { action: Subscribe, topic_hash: t[2].clone(), + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t[3].clone(), + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, ]; @@ -353,22 +379,32 @@ mod test { Subscription { action: Subscribe, topic_hash: t[4].clone(), + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t[2].clone(), + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t[3].clone(), + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, Subscription { action: Unsubscribe, topic_hash: t[0].clone(), + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, Subscription { action: Unsubscribe, topic_hash: t[1].clone(), + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, ]; @@ -390,10 +426,14 @@ mod test { Subscription { action: Subscribe, topic_hash: t1, + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t2, + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, ]; @@ -416,14 +456,20 @@ mod test { Subscription { action: Subscribe, topic_hash: t1, + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t2, + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, Subscription { action: Subscribe, topic_hash: t3, + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts::default(), }, ]; diff --git a/protocols/gossipsub/src/topic.rs b/protocols/gossipsub/src/topic.rs index 53e9fe2c172..adbdf58637f 100644 --- a/protocols/gossipsub/src/topic.rs +++ b/protocols/gossipsub/src/topic.rs @@ -65,7 +65,7 @@ impl Hasher for Sha256Hash { } } -#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[derive(Debug, Default, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] #[cfg_attr( feature = "metrics", derive(prometheus_client::encoding::EncodeLabelSet) diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index bea0786e060..430f0fdface 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -19,6 +19,8 @@ // DEALINGS IN THE SOFTWARE. //! A collection of types using the Gossipsub system. +#[cfg(feature = "partial_messages")] +use std::collections::HashMap; use std::{ collections::BTreeSet, fmt::{self, Debug}, @@ -91,6 +93,8 @@ impl std::fmt::Debug for MessageId { pub(crate) struct PeerDetails { /// The kind of protocol the peer supports. pub(crate) kind: PeerKind, + /// The Extensions supported by the peer if any. + pub(crate) extensions: Option, /// If the peer is an outbound connection. pub(crate) outbound: bool, /// Its current connections. @@ -99,8 +103,66 @@ pub(crate) struct PeerDetails { pub(crate) topics: BTreeSet, /// Don't send messages. pub(crate) dont_send: LinkedHashMap, + /// Message queue consumed by the connection handler. pub(crate) messages: Queue, + + /// Peer Partial messages. + #[cfg(feature = "partial_messages")] + pub(crate) partial_messages: HashMap, PartialData>>, + + /// Partial options for subscribed topics + #[cfg(feature = "partial_messages")] + pub(crate) partial_opts: HashMap, +} + +/// Partial ptions when subscribing a topic. +#[cfg(feature = "partial_messages")] +#[derive(Debug, Clone, Copy, Default, Eq, Hash, PartialEq)] +pub struct PartialSubOpts { + pub(crate) requests_partial: bool, + pub(crate) supports_partial: bool, +} + +/// Stored `Metadata` for a peer, +/// `Remote` or `Local` depends on who last updated it. +#[cfg(feature = "partial_messages")] +#[derive(Debug)] +pub(crate) enum PeerMetadata { + /// The metadata was updated with data from a remote peer. + Remote(Vec), + /// The metadata was updated by us when publishing a partial message. + Local(Box), +} + +#[cfg(feature = "partial_messages")] +impl AsRef<[u8]> for PeerMetadata { + fn as_ref(&self) -> &[u8] { + match self { + PeerMetadata::Remote(metadata) => metadata, + PeerMetadata::Local(metadata) => metadata.as_slice(), + } + } +} + +/// The partial message data the peer has. +#[cfg(feature = "partial_messages")] +#[derive(Debug)] +pub(crate) struct PartialData { + /// The current peer partial metadata. + pub(crate) metadata: Option, + /// The remaining heartbeats for this message to be deleted. + pub(crate) ttl: usize, +} + +#[cfg(feature = "partial_messages")] +impl Default for PartialData { + fn default() -> Self { + Self { + metadata: Default::default(), + ttl: 5, + } + } } /// Describes the types of peers that can exist in the gossipsub context. @@ -110,6 +172,8 @@ pub(crate) struct PeerDetails { derive(prometheus_client::encoding::EncodeLabelValue) )] pub enum PeerKind { + /// A gossipsub 1.3 peer. + Gossipsubv1_3, /// A gossipsub 1.2 peer. Gossipsubv1_2, /// A gossipsub 1.1 peer. @@ -223,6 +287,9 @@ pub struct Subscription { pub action: SubscriptionAction, /// The topic from which to subscribe or unsubscribe. pub topic_hash: TopicHash, + /// Partial options. + #[cfg(feature = "partial_messages")] + pub partial_opts: PartialSubOpts, } /// Action that a subscription wants to perform. @@ -257,6 +324,8 @@ pub enum ControlAction { /// The node requests us to not forward message ids (peer_id + sequence _number) - IDontWant /// control message. IDontWant(IDontWant), + /// The Node has sent us its supported extensions. + Extensions(Option), } /// Node broadcasts known messages per topic - IHave control message. @@ -300,6 +369,30 @@ pub struct IDontWant { pub(crate) message_ids: Vec, } +/// A received partial message. +#[cfg(feature = "partial_messages")] +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct PartialMessage { + /// The topic ID this partial message belongs to. + pub topic_id: TopicHash, + /// The group ID that identifies the complete logical message. + pub group_id: Vec, + /// The partial metadata we have and we want. + pub metadata: Option>, + /// The partial message itself. + pub message: Option>, +} + +/// The node has sent us the supported Gossipsub Extensions. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct Extensions { + pub(crate) test_extension: Option, + pub(crate) partial_messages: Option, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct TestExtension {} + /// A Gossipsub RPC message sent. #[derive(Debug)] pub enum RpcOut { @@ -318,7 +411,11 @@ pub enum RpcOut { timeout: Delay, }, /// Subscribe a topic. - Subscribe(TopicHash), + Subscribe { + topic: TopicHash, + #[cfg(feature = "partial_messages")] + partial_opts: PartialSubOpts, + }, /// Unsubscribe a topic. Unsubscribe(TopicHash), /// Send a GRAFT control message. @@ -332,6 +429,21 @@ pub enum RpcOut { /// The node requests us to not forward message ids (peer_id + sequence _number) - IDontWant /// control message. IDontWant(IDontWant), + /// Send a Extensions control message. + Extensions(Extensions), + /// Send a test extension message. + TestExtension, + /// Send a partial messages extension. + PartialMessage { + /// The group ID that identifies the complete logical message. + group_id: Vec, + /// The topic ID this partial message belongs to. + topic_id: TopicHash, + /// The partial message itself. + message: Option>, + /// The partial metadata we have and want. + metadata: Vec, + }, } impl RpcOut { @@ -345,7 +457,7 @@ impl RpcOut { pub(crate) fn priority(&self) -> bool { matches!( self, - RpcOut::Subscribe(_) + RpcOut::Subscribe { .. } | RpcOut::Unsubscribe(_) | RpcOut::Graft(_) | RpcOut::Prune(_) @@ -362,27 +474,49 @@ impl From for proto::RPC { subscriptions: Vec::new(), publish: vec![message.into()], control: None, + testExtension: None, + partial: None, }, RpcOut::Forward { message, .. } => proto::RPC { publish: vec![message.into()], subscriptions: Vec::new(), control: None, + testExtension: None, + partial: None, }, - RpcOut::Subscribe(topic) => proto::RPC { + RpcOut::Subscribe { + topic, + #[cfg(feature = "partial_messages")] + partial_opts, + } => proto::RPC { publish: Vec::new(), subscriptions: vec![proto::SubOpts { subscribe: Some(true), topic_id: Some(topic.into_string()), + #[cfg(feature = "partial_messages")] + requestsPartial: Some(partial_opts.requests_partial), + #[cfg(not(feature = "partial_messages"))] + requestsPartial: None, + #[cfg(feature = "partial_messages")] + supportsPartial: Some(partial_opts.supports_partial), + #[cfg(not(feature = "partial_messages"))] + supportsPartial: None, }], control: None, + testExtension: None, + partial: None, }, RpcOut::Unsubscribe(topic) => proto::RPC { publish: Vec::new(), subscriptions: vec![proto::SubOpts { subscribe: Some(false), topic_id: Some(topic.into_string()), + requestsPartial: None, + supportsPartial: None, }], control: None, + testExtension: None, + partial: None, }, RpcOut::IHave(IHave { topic_hash, @@ -399,7 +533,10 @@ impl From for proto::RPC { graft: vec![], prune: vec![], idontwant: vec![], + extensions: None, }), + testExtension: None, + partial: None, }, RpcOut::IWant(IWant { message_ids }) => proto::RPC { publish: Vec::new(), @@ -412,7 +549,10 @@ impl From for proto::RPC { graft: vec![], prune: vec![], idontwant: vec![], + extensions: None, }), + testExtension: None, + partial: None, }, RpcOut::Graft(Graft { topic_hash }) => proto::RPC { publish: Vec::new(), @@ -425,7 +565,10 @@ impl From for proto::RPC { }], prune: vec![], idontwant: vec![], + extensions: None, }), + testExtension: None, + partial: None, }, RpcOut::Prune(Prune { topic_hash, @@ -452,7 +595,10 @@ impl From for proto::RPC { backoff, }], idontwant: vec![], + extensions: None, }), + testExtension: None, + partial: None, } } RpcOut::IDontWant(IDontWant { message_ids }) => proto::RPC { @@ -466,6 +612,53 @@ impl From for proto::RPC { idontwant: vec![proto::ControlIDontWant { message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), }], + extensions: None, + }), + testExtension: None, + partial: None, + }, + RpcOut::Extensions(Extensions { + partial_messages, + test_extension, + }) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![], + prune: vec![], + idontwant: vec![], + extensions: Some(proto::ControlExtensions { + testExtension: test_extension, + partialMessages: partial_messages, + }), + }), + testExtension: None, + partial: None, + }, + RpcOut::TestExtension => proto::RPC { + subscriptions: vec![], + publish: vec![], + control: None, + testExtension: Some(proto::TestExtension {}), + partial: None, + }, + RpcOut::PartialMessage { + topic_id, + group_id, + metadata, + message, + } => proto::RPC { + subscriptions: vec![], + publish: vec![], + control: None, + testExtension: None, + partial: Some(proto::PartialMessagesExtension { + topicID: Some(topic_id.as_str().as_bytes().to_vec()), + groupID: Some(group_id), + partialMessage: message, + partsMetadata: Some(metadata), }), }, } @@ -481,6 +674,11 @@ pub struct RpcIn { pub subscriptions: Vec, /// List of Gossipsub control messages. pub control_msgs: Vec, + /// Gossipsub test extension. + pub test_extension: Option, + /// Partial messages extension. + #[cfg(feature = "partial_messages")] + pub partial_message: Option, } impl fmt::Debug for RpcIn { @@ -495,6 +693,9 @@ impl fmt::Debug for RpcIn { if !self.control_msgs.is_empty() { b.field("control_msgs", &self.control_msgs); } + #[cfg(feature = "partial_messages")] + b.field("partial_messages", &self.partial_message); + b.finish() } } @@ -507,6 +708,7 @@ impl PeerKind { Self::Gossipsub => "Gossipsub v1.0", Self::Gossipsubv1_1 => "Gossipsub v1.1", Self::Gossipsubv1_2 => "Gossipsub v1.2", + Self::Gossipsubv1_3 => "Gossipsub v1.3", } } } diff --git a/protocols/gossipsub/tests/smoke.rs b/protocols/gossipsub/tests/smoke.rs index d94297808ba..b474eb4f0aa 100644 --- a/protocols/gossipsub/tests/smoke.rs +++ b/protocols/gossipsub/tests/smoke.rs @@ -152,7 +152,7 @@ fn multi_hop_propagation() { // Subscribe each node to the same topic. let topic = gossipsub::IdentTopic::new("test-net"); for node in &mut graph.nodes { - node.behaviour_mut().subscribe(&topic).unwrap(); + node.behaviour_mut().subscribe(&topic, #[cfg(feature = "partial_messages")] false, #[cfg(feature = "partial_messages")] false).unwrap(); } // Wait for all nodes to be subscribed. diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index 191585b2f22..f5f44baec74 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -1230,7 +1230,7 @@ where let addrs = peer.multiaddrs.iter().cloned().collect(); query.peers.addresses.insert(peer.node_id, addrs); } - query.on_success(source, others_iter.cloned().map(|kp| kp.node_id)) + query.on_success(source, others_iter.map(|kp| kp.node_id)) } } diff --git a/protocols/upnp/CHANGELOG.md b/protocols/upnp/CHANGELOG.md index 5d451d187d0..ed693bcf93f 100644 --- a/protocols/upnp/CHANGELOG.md +++ b/protocols/upnp/CHANGELOG.md @@ -1,17 +1,15 @@ ## 0.6.0 -- Change `Event::NewExternalAddr` and `Event::ExpiredExternalAddr` from tuple variants to struct variants - that include both local and external addresses. This allows users to correlate which local listen +- Change `Event::NewExternalAddr` and `Event::ExpiredExternalAddr` from tuple variants to struct variants + that include both local and external addresses. This allows users to correlate which local listen address was mapped to which external address. - `Event::NewExternalAddr` now contains `local_addr` and `external_addr` fields - `Event::ExpiredExternalAddr` now contains `local_addr` and `external_addr` fields See [PR 6121](https://github.com/libp2p/rust-libp2p/pull/6121). -## 0.5.1 - - Skip port mapping when an active port mapping is present. - Previously, the behavior would skip creating new mappings if any mapping - (active or inactive or pending) existed for the same port. Now it correctly only + Previously, the behavior would skip creating new mappings if any mapping + (active or inactive or pending) existed for the same port. Now it correctly only checks active mappings on the gateway. See [PR 6127](https://github.com/libp2p/rust-libp2p/pull/6127). diff --git a/types.rs b/types.rs new file mode 100644 index 00000000000..5f7c0618c33 --- /dev/null +++ b/types.rs @@ -0,0 +1,710 @@ +// Copyright 2020 Sigma Prime Pty Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! A collection of types using the Gossipsub system. +#[cfg(feature = "partial_messages")] +use std::collections::HashMap; +use std::{ + collections::BTreeSet, + fmt::{self, Debug}, +}; + +use futures_timer::Delay; +use hashlink::LinkedHashMap; +use libp2p_identity::PeerId; +use libp2p_swarm::ConnectionId; +use quick_protobuf::MessageWrite; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +use web_time::Instant; + +use crate::{queue::Queue, rpc_proto::proto, TopicHash}; + +/// Messages that have expired while attempting to be sent to a peer. +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct FailedMessages { + /// The number of messages that were failed to be sent to the priority queue as it was + /// full. + pub priority: usize, + /// The number of messages that were failed to be sent to the non priority queue as it was + /// full. + pub non_priority: usize, +} + +#[derive(Debug)] +/// Validation kinds from the application for received messages. +pub enum MessageAcceptance { + /// The message is considered valid, and it should be delivered and forwarded to the network. + Accept, + /// The message is considered invalid, and it should be rejected and trigger the P₄ penalty. + Reject, + /// The message is neither delivered nor forwarded to the network, but the router does not + /// trigger the P₄ penalty. + Ignore, +} + +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct MessageId(pub Vec); + +impl MessageId { + pub fn new(value: &[u8]) -> Self { + Self(value.to_vec()) + } +} + +impl>> From for MessageId { + fn from(value: T) -> Self { + Self(value.into()) + } +} + +impl std::fmt::Display for MessageId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", hex_fmt::HexFmt(&self.0)) + } +} + +impl std::fmt::Debug for MessageId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "MessageId({})", hex_fmt::HexFmt(&self.0)) + } +} + +#[derive(Debug)] +/// Connected peer details. +pub(crate) struct PeerDetails { + /// The kind of protocol the peer supports. + pub(crate) kind: PeerKind, + /// The Extensions supported by the peer if any. + pub(crate) extensions: Option, + /// If the peer is an outbound connection. + pub(crate) outbound: bool, + /// Its current connections. + pub(crate) connections: Vec, + /// Subscribed topics. + pub(crate) topics: BTreeSet, + /// Don't send messages. + pub(crate) dont_send: LinkedHashMap, + + /// Message queue consumed by the connection handler. + pub(crate) messages: Queue, + + /// Peer Partial messages. + #[cfg(feature = "partial_messages")] + pub(crate) partial_messages: HashMap, PartialData>>, + + /// Partial only subscribed topics. + #[cfg(feature = "partial_messages")] + pub(crate) partial_only_topics: BTreeSet, +} + +/// Stored `Metadata` for a peer. +#[cfg(feature = "partial_messages")] +#[derive(Debug)] +pub(crate) enum PeerMetadata { + Remote(Vec), + Local(Box), +} + +#[cfg(feature = "partial_messages")] +impl AsRef<[u8]> for PeerMetadata { + fn as_ref(&self) -> &[u8] { + match self { + PeerMetadata::Remote(metadata) => metadata, + PeerMetadata::Local(metadata) => metadata.as_slice(), + } + } +} + +/// The partial message data the peer has. +#[cfg(feature = "partial_messages")] +#[derive(Debug)] +pub(crate) struct PartialData { + /// The current peer partial metadata. + pub(crate) metadata: Option, + /// The remaining heartbeats for this message to be deleted. + pub(crate) ttl: usize, +} + +#[cfg(feature = "partial_messages")] +impl Default for PartialData { + fn default() -> Self { + Self { + metadata: Default::default(), + ttl: 5, + } + } +} + +/// Describes the types of peers that can exist in the gossipsub context. +#[derive(Debug, Clone, Copy, PartialEq, Hash, Eq)] +#[cfg_attr( + feature = "metrics", + derive(prometheus_client::encoding::EncodeLabelValue) +)] +pub enum PeerKind { + /// A gossipsub 1.3 peer. + Gossipsubv1_3, + /// A gossipsub 1.2 peer. + Gossipsubv1_2, + /// A gossipsub 1.1 peer. + Gossipsubv1_1, + /// A gossipsub 1.0 peer. + Gossipsub, + /// A floodsub peer. + Floodsub, + /// The peer doesn't support any of the protocols. + NotSupported, +} + +/// A message received by the gossipsub system and stored locally in caches.. +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub struct RawMessage { + /// Id of the peer that published this message. + pub source: Option, + + /// Content of the message. Its meaning is out of scope of this library. + pub data: Vec, + + /// A random sequence number. + pub sequence_number: Option, + + /// The topic this message belongs to + pub topic: TopicHash, + + /// The signature of the message if it's signed. + pub signature: Option>, + + /// The public key of the message if it is signed and the source [`PeerId`] cannot be inlined. + pub key: Option>, + + /// Flag indicating if this message has been validated by the application or not. + pub validated: bool, +} + +impl PeerKind { + /// Returns true if peer speaks any gossipsub version. + pub(crate) fn is_gossipsub(&self) -> bool { + matches!( + self, + Self::Gossipsubv1_2 | Self::Gossipsubv1_1 | Self::Gossipsub + ) + } +} + +impl RawMessage { + /// Calculates the encoded length of this message (used for calculating metrics). + pub fn raw_protobuf_len(&self) -> usize { + let message = proto::Message { + from: self.source.map(|m| m.to_bytes()), + data: Some(self.data.clone()), + seqno: self.sequence_number.map(|s| s.to_be_bytes().to_vec()), + topic: TopicHash::into_string(self.topic.clone()), + signature: self.signature.clone(), + key: self.key.clone(), + }; + message.get_size() + } +} + +impl From for proto::Message { + fn from(raw: RawMessage) -> Self { + proto::Message { + from: raw.source.map(|m| m.to_bytes()), + data: Some(raw.data), + seqno: raw.sequence_number.map(|s| s.to_be_bytes().to_vec()), + topic: TopicHash::into_string(raw.topic), + signature: raw.signature, + key: raw.key, + } + } +} + +/// The message sent to the user after a [`RawMessage`] has been transformed by a +/// [`crate::DataTransform`]. +#[derive(Clone, PartialEq, Eq, Hash)] +pub struct Message { + /// Id of the peer that published this message. + pub source: Option, + + /// Content of the message. + pub data: Vec, + + /// A random sequence number. + pub sequence_number: Option, + + /// The topic this message belongs to + pub topic: TopicHash, +} + +impl fmt::Debug for Message { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Message") + .field( + "data", + &format_args!("{:<20}", &hex_fmt::HexFmt(&self.data)), + ) + .field("source", &self.source) + .field("sequence_number", &self.sequence_number) + .field("topic", &self.topic) + .finish() + } +} + +/// A subscription received by the gossipsub system. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct Subscription { + /// Action to perform. + pub action: SubscriptionAction, + /// The topic from which to subscribe or unsubscribe. + pub topic_hash: TopicHash, + /// Peer only wants to receive partial messages instead of full messages. + #[cfg(feature = "partial_messages")] + pub partial: bool, +} + +/// Action that a subscription wants to perform. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum SubscriptionAction { + /// The remote wants to subscribe to the given topic. + Subscribe, + /// The remote wants to unsubscribe from the given topic. + Unsubscribe, +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub(crate) struct PeerInfo { + pub(crate) peer_id: Option, + // TODO add this when RFC: Signed Address Records got added to the spec (see pull request + // https://github.com/libp2p/specs/pull/217) + // pub signed_peer_record: ?, +} + +/// A Control message received by the gossipsub system. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum ControlAction { + /// Node broadcasts known messages per topic - IHave control message. + IHave(IHave), + /// The node requests specific message ids (peer_id + sequence _number) - IWant control + /// message. + IWant(IWant), + /// The node has been added to the mesh - Graft control message. + Graft(Graft), + /// The node has been removed from the mesh - Prune control message. + Prune(Prune), + /// The node requests us to not forward message ids (peer_id + sequence _number) - IDontWant + /// control message. + IDontWant(IDontWant), + /// The Node has sent us its supported extensions. + Extensions(Option), +} + +/// Node broadcasts known messages per topic - IHave control message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct IHave { + /// The topic of the messages. + pub(crate) topic_hash: TopicHash, + /// A list of known message ids (peer_id + sequence _number) as a string. + pub(crate) message_ids: Vec, +} + +/// The node requests specific message ids (peer_id + sequence _number) - IWant control message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct IWant { + /// A list of known message ids (peer_id + sequence _number) as a string. + pub(crate) message_ids: Vec, +} + +/// The node has been added to the mesh - Graft control message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct Graft { + /// The mesh topic the peer should be added to. + pub(crate) topic_hash: TopicHash, +} + +/// The node has been removed from the mesh - Prune control message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct Prune { + /// The mesh topic the peer should be removed from. + pub(crate) topic_hash: TopicHash, + /// A list of peers to be proposed to the removed peer as peer exchange + pub(crate) peers: Vec, + /// The backoff time in seconds before we allow to reconnect + pub(crate) backoff: Option, +} + +/// The node requests us to not forward message ids - IDontWant control message. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct IDontWant { + /// A list of known message ids. + pub(crate) message_ids: Vec, +} + +/// A received partial message. +#[cfg(feature = "partial_messages")] +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct PartialMessage { + /// The topic ID this partial message belongs to. + pub topic_id: TopicHash, + /// The group ID that identifies the complete logical message. + pub group_id: Vec, + /// The partial metadata we have and we want. + pub metadata: Option>, + /// The partial message itself. + pub message: Option>, +} + +/// The node has sent us the supported Gossipsub Extensions. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct Extensions { + pub(crate) test_extension: Option, + pub(crate) partial_messages: Option, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct TestExtension {} + +/// A Gossipsub RPC message sent. +#[derive(Debug)] +pub enum RpcOut { + /// Publish a Gossipsub message on network.`timeout` limits the duration the message + /// can wait to be sent before it is abandoned. + Publish { + message_id: MessageId, + message: RawMessage, + timeout: Delay, + }, + /// Forward a Gossipsub message on network. `timeout` limits the duration the message + /// can wait to be sent before it is abandoned. + Forward { + message_id: MessageId, + message: RawMessage, + timeout: Delay, + }, + /// Subscribe a topic. + Subscribe { + topic: TopicHash, + #[cfg(feature = "partial_messages")] + requests_partials: bool, + }, + /// Unsubscribe a topic. + Unsubscribe(TopicHash), + /// Send a GRAFT control message. + Graft(Graft), + /// Send a PRUNE control message. + Prune(Prune), + /// Send a IHave control message. + IHave(IHave), + /// Send a IWant control message. + IWant(IWant), + /// The node requests us to not forward message ids (peer_id + sequence _number) - IDontWant + /// control message. + IDontWant(IDontWant), + /// Send a Extensions control message. + Extensions(Extensions), + /// Send a test extension message. + TestExtension, + /// Send a partial messages extension. + PartialMessage { + /// The group ID that identifies the complete logical message. + group_id: Vec, + /// The topic ID this partial message belongs to. + topic_id: TopicHash, + /// The partial message itself. + message: Option>, + /// The partial metadata we have and want. + metadata: Vec, + }, +} + +impl RpcOut { + /// Converts the GossipsubRPC into its protobuf format. + // A convenience function to avoid explicitly specifying types. + pub fn into_protobuf(self) -> proto::RPC { + self.into() + } + + /// Returns true if the `RpcOut` is priority. + pub(crate) fn priority(&self) -> bool { + matches!( + self, + RpcOut::Subscribe { .. } + | RpcOut::Unsubscribe(_) + | RpcOut::Graft(_) + | RpcOut::Prune(_) + | RpcOut::IDontWant(_) + ) + } +} + +impl From for proto::RPC { + /// Converts the RPC into protobuf format. + fn from(rpc: RpcOut) -> Self { + match rpc { + RpcOut::Publish { message, .. } => proto::RPC { + subscriptions: Vec::new(), + publish: vec![message.into()], + control: None, + testExtension: None, + partial: None, + }, + RpcOut::Forward { message, .. } => proto::RPC { + publish: vec![message.into()], + subscriptions: Vec::new(), + control: None, + testExtension: None, + partial: None, + }, + RpcOut::Subscribe { + topic, + #[cfg(feature = "partial_messages")] + requests_partials: partial_only, + } => proto::RPC { + publish: Vec::new(), + subscriptions: vec![proto::SubOpts { + subscribe: Some(true), + topic_id: Some(topic.into_string()), + #[cfg(not(feature = "partial_messages"))] + partial: None, + #[cfg(feature = "partial_messages")] + requestsPartial: Some(partial_only), + }], + control: None, + testExtension: None, + partial: None, + }, + RpcOut::Unsubscribe(topic) => proto::RPC { + publish: Vec::new(), + subscriptions: vec![proto::SubOpts { + subscribe: Some(false), + topic_id: Some(topic.into_string()), + requestsPartial: None, + }], + control: None, + testExtension: None, + partial: None, + }, + RpcOut::IHave(IHave { + topic_hash, + message_ids, + }) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![proto::ControlIHave { + topic_id: Some(topic_hash.into_string()), + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }], + iwant: vec![], + graft: vec![], + prune: vec![], + idontwant: vec![], + extensions: None, + }), + testExtension: None, + partial: None, + }, + RpcOut::IWant(IWant { message_ids }) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![proto::ControlIWant { + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }], + graft: vec![], + prune: vec![], + idontwant: vec![], + extensions: None, + }), + testExtension: None, + partial: None, + }, + RpcOut::Graft(Graft { topic_hash }) => proto::RPC { + publish: Vec::new(), + subscriptions: vec![], + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![proto::ControlGraft { + topic_id: Some(topic_hash.into_string()), + }], + prune: vec![], + idontwant: vec![], + extensions: None, + }), + testExtension: None, + partial: None, + }, + RpcOut::Prune(Prune { + topic_hash, + peers, + backoff, + }) => { + proto::RPC { + publish: Vec::new(), + subscriptions: vec![], + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![], + prune: vec![proto::ControlPrune { + topic_id: Some(topic_hash.into_string()), + peers: peers + .into_iter() + .map(|info| proto::PeerInfo { + peer_id: info.peer_id.map(|id| id.to_bytes()), + // TODO, see https://github.com/libp2p/specs/pull/217 + signed_peer_record: None, + }) + .collect(), + backoff, + }], + idontwant: vec![], + extensions: None, + }), + testExtension: None, + partial: None, + } + } + RpcOut::IDontWant(IDontWant { message_ids }) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![], + prune: vec![], + idontwant: vec![proto::ControlIDontWant { + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }], + extensions: None, + }), + testExtension: None, + partial: None, + }, + RpcOut::Extensions(Extensions { + partial_messages, + test_extension, + }) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![], + prune: vec![], + idontwant: vec![], + extensions: Some(proto::ControlExtensions { + testExtension: test_extension, + partialMessages: partial_messages, + }), + }), + testExtension: None, + partial: None, + }, + RpcOut::TestExtension => proto::RPC { + subscriptions: vec![], + publish: vec![], + control: None, + testExtension: Some(proto::TestExtension {}), + partial: None, + }, + RpcOut::PartialMessage { + topic_id, + group_id, + metadata, + message, + } => proto::RPC { + subscriptions: vec![], + publish: vec![], + control: None, + testExtension: None, + partial: Some(proto::PartialMessagesExtension { + topicID: Some(topic_id.as_str().as_bytes().to_vec()), + groupID: Some(group_id), + partialMessage: message, + partsMetadata: Some(metadata), + }), + }, + } + } +} + +/// A Gossipsub RPC message received. +#[derive(Clone, PartialEq, Eq, Hash)] +pub struct RpcIn { + /// List of messages that were part of this RPC query. + pub messages: Vec, + /// List of subscriptions. + pub subscriptions: Vec, + /// List of Gossipsub control messages. + pub control_msgs: Vec, + /// Gossipsub test extension. + pub test_extension: Option, + /// Partial messages extension. + #[cfg(feature = "partial_messages")] + pub partial_message: Option, +} + +impl fmt::Debug for RpcIn { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut b = f.debug_struct("GossipsubRpc"); + if !self.messages.is_empty() { + b.field("messages", &self.messages); + } + if !self.subscriptions.is_empty() { + b.field("subscriptions", &self.subscriptions); + } + if !self.control_msgs.is_empty() { + b.field("control_msgs", &self.control_msgs); + } + #[cfg(feature = "partial_messages")] + b.field("partial_messages", &self.partial_message); + + b.finish() + } +} + +impl PeerKind { + pub fn as_static_ref(&self) -> &'static str { + match self { + Self::NotSupported => "Not Supported", + Self::Floodsub => "Floodsub", + Self::Gossipsub => "Gossipsub v1.0", + Self::Gossipsubv1_1 => "Gossipsub v1.1", + Self::Gossipsubv1_2 => "Gossipsub v1.2", + Self::Gossipsubv1_3 => "Gossipsub v1.3", + } + } +} + +impl AsRef for PeerKind { + fn as_ref(&self) -> &str { + self.as_static_ref() + } +} + +impl fmt::Display for PeerKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_ref()) + } +} diff --git a/wasm-tests/webtransport-tests/echo-server/go.mod b/wasm-tests/webtransport-tests/echo-server/go.mod index 4ffb98f386c..a3576a42489 100644 --- a/wasm-tests/webtransport-tests/echo-server/go.mod +++ b/wasm-tests/webtransport-tests/echo-server/go.mod @@ -1,11 +1,11 @@ module echo-server -go 1.24 +go 1.24.6 require ( - github.com/libp2p/go-libp2p v0.41.0 - github.com/multiformats/go-multiaddr v0.15.0 - github.com/quic-go/quic-go v0.50.1 + github.com/libp2p/go-libp2p v0.44.0 + github.com/multiformats/go-multiaddr v0.16.1 + github.com/quic-go/quic-go v0.55.0 ) require ( @@ -16,46 +16,45 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7 // indirect github.com/ipfs/go-cid v0.5.0 // indirect github.com/ipfs/go-log/v2 v2.5.1 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect - github.com/klauspost/compress v1.18.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.10 // indirect + github.com/klauspost/compress v1.18.1 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/libp2p/go-netroute v0.2.2 // indirect + github.com/libp2p/go-netroute v0.3.0 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multibase v0.2.0 // indirect - github.com/multiformats/go-multicodec v0.9.0 // indirect + github.com/multiformats/go-multicodec v0.10.0 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect - github.com/multiformats/go-multistream v0.6.0 // indirect - github.com/multiformats/go-varint v0.0.7 // indirect + github.com/multiformats/go-multistream v0.6.1 // indirect + github.com/multiformats/go-varint v0.1.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/ginkgo/v2 v2.23.0 // indirect - github.com/prometheus/client_golang v1.21.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.63.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.67.1 // indirect + github.com/prometheus/procfs v0.17.0 // indirect github.com/quic-go/qpack v0.5.1 // indirect - github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect + github.com/quic-go/webtransport-go v0.9.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - go.uber.org/mock v0.5.0 // indirect + go.uber.org/mock v0.5.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.36.0 // indirect - golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect - golang.org/x/mod v0.24.0 // indirect - golang.org/x/net v0.37.0 // indirect - golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/text v0.23.0 // indirect - golang.org/x/tools v0.31.0 // indirect - google.golang.org/protobuf v1.36.5 // indirect - lukechampine.com/blake3 v1.4.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + golang.org/x/crypto v0.43.0 // indirect + golang.org/x/exp v0.0.0-20251017212417-90e834f514db // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/net v0.46.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.37.0 // indirect + golang.org/x/text v0.30.0 // indirect + golang.org/x/time v0.14.0 // indirect + golang.org/x/tools v0.38.0 // indirect + google.golang.org/protobuf v1.36.10 // indirect + lukechampine.com/blake3 v1.4.1 // indirect ) diff --git a/wasm-tests/webtransport-tests/echo-server/go.sum b/wasm-tests/webtransport-tests/echo-server/go.sum index 10c8992f9a3..a994475b9f4 100644 --- a/wasm-tests/webtransport-tests/echo-server/go.sum +++ b/wasm-tests/webtransport-tests/echo-server/go.sum @@ -40,10 +40,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= -github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= @@ -61,8 +57,6 @@ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7 h1:+J3r2e8+RsmN3vKfo75g0YSY61ms37qzPglu4p0sGro= -github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -80,8 +74,12 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= +github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -95,10 +93,14 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-libp2p v0.41.0 h1:JRaD39dqf/tBBGapJ0T38N73vOaDCsWgcx3mE6HgXWk= github.com/libp2p/go-libp2p v0.41.0/go.mod h1:Be8QYqC4JW6Xq8buukNeoZJjyT1XUDcGoIooCHm1ye4= +github.com/libp2p/go-libp2p v0.44.0 h1:5Gtt8OrF8yiXmH+Mx4+/iBeFRMK1TY3a8OrEBDEqAvs= +github.com/libp2p/go-libp2p v0.44.0/go.mod h1:NovCojezAt4dnDd4fH048K7PKEqH0UFYYqJRjIIu8zc= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8= github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE= +github.com/libp2p/go-netroute v0.3.0 h1:nqPCXHmeNmgTJnktosJ/sIef9hvwYCrsLxXmfNks/oc= +github.com/libp2p/go-netroute v0.3.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA= github.com/libp2p/go-yamux/v5 v5.0.0 h1:2djUh96d3Jiac/JpGkKs4TO49YhsfLopAoryfPmf+Po= github.com/libp2p/go-yamux/v5 v5.0.0/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= @@ -120,24 +122,28 @@ github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9 github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo= github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= +github.com/multiformats/go-multiaddr v0.16.1 h1:fgJ0Pitow+wWXzN9do+1b8Pyjmo8m5WhGfzpL82MpCw= +github.com/multiformats/go-multiaddr v0.16.1/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= +github.com/multiformats/go-multicodec v0.10.0 h1:UpP223cig/Cx8J76jWt91njpK3GTAO1w02sdcjZDSuc= +github.com/multiformats/go-multicodec v0.10.0/go.mod h1:wg88pM+s2kZJEQfRCKBNU+g32F5aWBEjyFHXvZLTcLI= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= github.com/multiformats/go-multistream v0.6.0 h1:ZaHKbsL404720283o4c/IHQXiS6gb8qAN5EIJ4PN5EA= github.com/multiformats/go-multistream v0.6.0/go.mod h1:MOyoG5otO24cHIg8kf9QW2/NozURlkP/rvi2FQJyCPg= +github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ= +github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/multiformats/go-varint v0.1.0 h1:i2wqFp4sdl3IcIxfAonHQV9qU5OsZ4Ts9IOoETFs5dI= +github.com/multiformats/go-varint v0.1.0/go.mod h1:5KVAVXegtfmNQQm/lCY+ATvDzvJJhSkUlGQV9wgObdI= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/onsi/ginkgo/v2 v2.23.0 h1:FA1xjp8ieYDzlgS5ABTpdUDB7wtngggONc8a7ku2NqQ= -github.com/onsi/ginkgo/v2 v2.23.0/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= -github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= -github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -145,21 +151,33 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= +github.com/prometheus/common v0.67.1 h1:OTSON1P4DNxzTg4hmKCc37o4ZAZDv0cfXLkOt0oEowI= +github.com/prometheus/common v0.67.1/go.mod h1:RpmT9v35q2Y+lsieQsdOh5sXZ6ajUGC8NjZAmr8vb0Q= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= -github.com/quic-go/quic-go v0.50.1 h1:unsgjFIUqW8a2oopkY7YNONpV1gYND6Nt9hnt1PN94Q= -github.com/quic-go/quic-go v0.50.1/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E= +github.com/quic-go/quic-go v0.54.1 h1:4ZAWm0AhCb6+hE+l5Q1NAL0iRn/ZrMwqHRGQiFwj2eg= +github.com/quic-go/quic-go v0.54.1/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY= +github.com/quic-go/quic-go v0.55.0 h1:zccPQIqYCXDt5NmcEabyYvOnomjs8Tlwl7tISjJh9Mk= +github.com/quic-go/quic-go v0.55.0/go.mod h1:DR51ilwU1uE164KuWXhinFcKWGlEjzys2l8zUl5Ss1U= github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg= github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw= +github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70= +github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -196,6 +214,7 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= @@ -207,12 +226,15 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= +go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -223,9 +245,13 @@ golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= +golang.org/x/exp v0.0.0-20251017212417-90e834f514db h1:by6IehL4BH5k3e3SJmcoNbOobMey2SLpAF79iPOEBvw= +golang.org/x/exp v0.0.0-20251017212417-90e834f514db/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -235,6 +261,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -250,6 +278,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -263,6 +293,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -277,16 +309,20 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -298,6 +334,8 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -319,6 +357,8 @@ google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -337,5 +377,7 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=