From 9ecf89151af519186bd14be5e7260f536086c781 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 26 Nov 2025 15:01:47 +0100 Subject: [PATCH 1/3] Add `MessagePaddingPrototype` feature and `UserConfig` option We add prototypical support for the `option_message_padding` feature while the BOLTs PR is still underway. --- lightning-types/src/features.rs | 15 ++++++++++++++- lightning/src/ln/channelmanager.rs | 8 ++++++++ lightning/src/util/config.rs | 10 ++++++++++ 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/lightning-types/src/features.rs b/lightning-types/src/features.rs index 05a504ab8ca..6f44a039085 100644 --- a/lightning-types/src/features.rs +++ b/lightning-types/src/features.rs @@ -83,6 +83,8 @@ //! (see [BOLT PR #1160](https://github.com/lightning/bolts/pull/1160) for more information). //! - `HtlcHold` - requires/supports holding HTLCs and forwarding on receipt of an onion message //! (see [BOLT-2](https://github.com/lightning/bolts/pull/989/files) for more information). +//! - `MessagePaddingPrototype` - requires/supports padding of network messages for improved privacy +//! (see [BOLT-1](https://github.com/lightning/bolts/pull/1304) for more information). //! //! LDK knows about the following features, but does not support them: //! - `AnchorsNonzeroFeeHtlcTx` - the initial version of anchor outputs, which was later found to be @@ -174,7 +176,7 @@ mod sealed { // Byte 18 , // Byte 19 - HtlcHold | SplicePrototype, + HtlcHold | SplicePrototype | MessagePaddingPrototype, ] ); define_context!( @@ -732,6 +734,17 @@ mod sealed { supports_splicing, requires_splicing ); + define_feature!( + 157, // BOLTs PR uses 68/69 + MessagePaddingPrototype, + [InitContext], + "Feature flags for network message padding.", + set_message_padding_optional, + set_message_padding_required, + clear_message_padding, + supports_message_padding, + requires_message_padding + ); define_feature!( 259, DnsResolver, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 399c51b9d9a..54e63dbb371 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -15876,6 +15876,14 @@ pub fn provided_init_features(config: &UserConfig) -> InitFeatures { features.set_htlc_hold_optional(); } + if config.enable_htlc_hold { + features.set_htlc_hold_optional(); + } + + if config.enable_message_padding { + features.set_message_padding_optional(); + } + features } diff --git a/lightning/src/util/config.rs b/lightning/src/util/config.rs index dd1aaa40424..21e5d228fc1 100644 --- a/lightning/src/util/config.rs +++ b/lightning/src/util/config.rs @@ -973,6 +973,14 @@ pub struct UserConfig { /// /// [`ChannelManager::splice_channel`]: crate::ln::channelmanager::ChannelManager::splice_channel pub reject_inbound_splices: bool, + /// If this set to `true`, then we will negotiate support for padding network messages with our + /// counterparty. If both parties agree, network messages will be padded to a fixed length to + /// improve privacy in the face of an adversary monitoring network traffic. + /// + /// Nodes which are heavily bandwidth-restricted might want to set this to `false`. + /// + /// Default value: `true` + pub enable_message_padding: bool, } impl Default for UserConfig { @@ -990,6 +998,7 @@ impl Default for UserConfig { enable_htlc_hold: false, hold_outbound_htlcs_at_next_hop: false, reject_inbound_splices: true, + enable_message_padding: true, } } } @@ -1013,6 +1022,7 @@ impl Readable for UserConfig { hold_outbound_htlcs_at_next_hop: Readable::read(reader)?, enable_htlc_hold: Readable::read(reader)?, reject_inbound_splices: Readable::read(reader)?, + enable_message_padding: Readable::read(reader)?, }) } } From cf70faf41e32670ea996d70226f251ede736d8ad Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 26 Nov 2025 14:46:07 +0100 Subject: [PATCH 2/3] Pad network messages to fixed size when supported When both parties signal support for `option_message_padding`, we pad any sent messages to a fixed size to improve privacy in the face of an adversary monitoring network traffic. To this end we utilize an optional TLV-stream extension with an odd field number of `u64::max_value()` that simply will be discarded by the counterparty. The padding threshold is chosen to fit even the largest standard Lightning messages (UpdateAddHtlc) whith some leeway to guarantee package size uniformity even when some of the optional fields are set. Note that even without padding we surpassed the standard Ethernet MTU of 1500 bytes for `UpdateAddHtlc` messages, so fitting the packets into exactly 1500 bytes is a futile endeavor. Furthermore note that any messages above that threshold size will still stand out in monitored network traffic. Lastly, we opt to *not* apply padding for any custom messages, as they might not be set up to handle the optional TLV extension. --- fuzz/src/peer_crypt.rs | 2 +- lightning/src/ln/peer_channel_encryptor.rs | 75 ++++++++++++++++++++-- lightning/src/ln/peer_handler.rs | 25 +++++++- 3 files changed, 92 insertions(+), 10 deletions(-) diff --git a/fuzz/src/peer_crypt.rs b/fuzz/src/peer_crypt.rs index b01aa02400b..1aade1d595a 100644 --- a/fuzz/src/peer_crypt.rs +++ b/fuzz/src/peer_crypt.rs @@ -81,7 +81,7 @@ pub fn do_test(data: &[u8]) { if get_slice!(1)[0] == 0 { crypter.encrypt_buffer(MessageBuf::from_encoded(&get_slice!(slice_to_be16( get_slice!(2) - )))); + ))), false); } else { let len = match crypter.decrypt_length_header(get_slice!(16 + 2)) { Ok(len) => len, diff --git a/lightning/src/ln/peer_channel_encryptor.rs b/lightning/src/ln/peer_channel_encryptor.rs index 09b970a9ab2..bd8243c0c3d 100644 --- a/lightning/src/ln/peer_channel_encryptor.rs +++ b/lightning/src/ln/peer_channel_encryptor.rs @@ -9,6 +9,7 @@ use crate::prelude::*; +use crate::io::Write; use crate::ln::msgs; use crate::ln::msgs::LightningError; use crate::ln::wire; @@ -26,7 +27,7 @@ use bitcoin::secp256k1::{PublicKey, SecretKey}; use crate::crypto::chacha20poly1305rfc::ChaCha20Poly1305RFC; use crate::crypto::utils::hkdf_extract_expand_twice; -use crate::util::ser::VecWriter; +use crate::util::ser::{BigSize, VecWriter, Writeable}; use core::ops::Deref; @@ -555,9 +556,69 @@ impl PeerChannelEncryptor { } } + fn maybe_add_message_padding(&self, buffer: &mut Vec) { + // In the base case, a serialized UpdateAddHTLC message is 1450 bytes: 32 (channel_id) + 8 + // (htlc_id) + 8 (amount_msat) + 32 (payment_hash) + 4 (cltv_expiry) + 1366 + // (onion_routing_packet). When including the additional 2 (encrypted message length) + 16 + // (encrypted message length MAC) + 2 (type) bytes, this has us at 1470 bytes + // pre-encryption. As the encryption step adds 16 more bytes for the MAC of the encrypted + // message itself, resulting in 1486 bytes TCP payload. + // + // As this base case however doesn't take into account any potential optional fields that + // might be set on UpdateAddHTLC (such as the `path_key` for route blinding or other TLVs), + // we opt to add another 50 bytes of leeway to our padding threshold size. + // + // Note that anything above this threshold won't get padded and will stand out in monitored + // network traffic. + const PADDING_THRESHOLD_BYTES: usize = 1470 + 50; + + let orig_buffer_len = buffer.len(); + let padding_len = + PADDING_THRESHOLD_BYTES.checked_sub(orig_buffer_len).map_or(0, |expected_len| { + // As the TLV's length BigSize grows as we add more padding bytes, we might end up with + // slightly larger messages than expected. To that end, we here account for this and + // reduce the number of padding bytes by any serialized length of the BigSize beyond 1. + // + // TODO: This method risks that by subtracting the overhead we fall again just below + // the `BigSize` steps which could leak the original padding len (and hence the + // original message size). We should look into making this even more exact. + let big_size_overhead = + BigSize(expected_len as u64).serialized_length().saturating_sub(1); + expected_len.saturating_sub(big_size_overhead) + }); + + // We always add type and length headers so unpadded messages just at + // PADDING_THRESHOLD_BYTES don't stand out. + BigSize(u64::max_value()) + .write(buffer) + .expect("In-memory messages must never fail to serialize"); + BigSize(padding_len as u64) + .write(buffer) + .expect("In-memory messages must never fail to serialize"); + let mut bytes_written: usize = 0; + while bytes_written < padding_len { + // Write padding in 32-byte chunks if possible. + const PAD_BYTES_LEN: usize = 32; + let pad_bytes = [42u8; PAD_BYTES_LEN]; + let bytes_to_write = (padding_len - bytes_written).min(PAD_BYTES_LEN); + buffer + .write_all(&pad_bytes[..bytes_to_write]) + .expect("In-memory messages must never fail to serialize"); + bytes_written += bytes_to_write; + } + + #[cfg(debug_assertions)] + if orig_buffer_len < PADDING_THRESHOLD_BYTES { + debug_assert_eq!(buffer.len(), PADDING_THRESHOLD_BYTES + 9 + 1); + } + } + /// Encrypts the given pre-serialized message, returning the encrypted version. /// panics if msg.len() > 65535 or Noise handshake has not finished. - pub fn encrypt_buffer(&mut self, mut msg: MessageBuf) -> Vec { + pub fn encrypt_buffer(&mut self, mut msg: MessageBuf, should_pad: bool) -> Vec { + if should_pad { + self.maybe_add_message_padding(&mut msg.0); + } self.encrypt_message_with_header_0s(&mut msg.0); msg.0 } @@ -565,13 +626,15 @@ impl PeerChannelEncryptor { /// Encrypts the given message, returning the encrypted version. /// panics if the length of `message`, once encoded, is greater than 65535 or if the Noise /// handshake has not finished. - pub fn encrypt_message(&mut self, message: &M) -> Vec { + pub fn encrypt_message(&mut self, message: &M, should_pad: bool) -> Vec { // Allocate a buffer with 2KB, fitting most common messages. Reserve the first 16+2 bytes // for the 2-byte message type prefix and its MAC. let mut res = VecWriter(Vec::with_capacity(MSG_BUF_ALLOC_SIZE)); res.0.resize(16 + 2, 0); wire::write(message, &mut res).expect("In-memory messages must never fail to serialize"); - + if should_pad { + self.maybe_add_message_padding(&mut res.0); + } self.encrypt_message_with_header_0s(&mut res.0); res.0 } @@ -1015,7 +1078,7 @@ mod tests { for i in 0..1005 { let msg = [0x68, 0x65, 0x6c, 0x6c, 0x6f]; - let mut res = outbound_peer.encrypt_buffer(MessageBuf::from_encoded(&msg)); + let mut res = outbound_peer.encrypt_buffer(MessageBuf::from_encoded(&msg), false); assert_eq!(res.len(), 5 + 2 * 16 + 2); let len_header = res[0..2 + 16].to_vec(); @@ -1060,7 +1123,7 @@ mod tests { fn max_message_len_encryption() { let mut outbound_peer = get_outbound_peer_for_initiator_test_vectors(); let msg = [4u8; LN_MAX_MSG_LEN + 1]; - outbound_peer.encrypt_buffer(MessageBuf::from_encoded(&msg)); + outbound_peer.encrypt_buffer(MessageBuf::from_encoded(&msg), false); } #[test] diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index c3b490ef31a..2de64189655 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -1578,9 +1578,14 @@ where } if peer.should_buffer_gossip_broadcast() { if let Some(msg) = peer.gossip_broadcast_buffer.pop_front() { + let should_pad = peer.their_node_id.is_some_and(|(peer_id, _)| { + let our_features = self.init_features(peer_id); + our_features.supports_message_padding() + }); + peer.msgs_sent_since_pong += 1; peer.pending_outbound_buffer - .push_back(peer.channel_encryptor.encrypt_buffer(msg)); + .push_back(peer.channel_encryptor.encrypt_buffer(msg, should_pad)); } } if peer.should_buffer_gossip_backfill() { @@ -1739,8 +1744,18 @@ where } else { debug_assert!(false, "node_id should be set by the time we send a message"); } + + let message_padding_supported = their_node_id.is_some_and(|peer_id| { + let our_features = self.init_features(peer_id); + our_features.supports_message_padding() + }); + + // Opt out of message padding for custom messages as we're not certain the application + // layer protocol can handle TLV exteensions. + let should_pad = !is_custom_msg(message.type_id()) && message_padding_supported; peer.msgs_sent_since_pong += 1; - peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(message)); + peer.pending_outbound_buffer + .push_back(peer.channel_encryptor.encrypt_message(message, should_pad)); } fn do_read_event( @@ -3697,6 +3712,10 @@ fn is_gossip_msg(type_id: u16) -> bool { } } +fn is_custom_msg(type_id: u16) -> bool { + type_id >= 32768 +} + #[cfg(test)] mod tests { use super::*; @@ -4261,7 +4280,7 @@ mod tests { peers[0].read_event(&mut fd_dup, &act_three).unwrap(); let not_init_msg = msgs::Ping { ponglen: 4, byteslen: 0 }; - let msg_bytes = dup_encryptor.encrypt_message(¬_init_msg); + let msg_bytes = dup_encryptor.encrypt_message(¬_init_msg, false); assert!(peers[0].read_event(&mut fd_dup, &msg_bytes).is_err()); } From 5052f4d2a74ec748c710ca0599953db9050ef25d Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 28 Nov 2025 10:17:43 +0100 Subject: [PATCH 3/3] Randomize padding bytes While it shouldn't really make any difference for the Noise protocol, we here avoid taking any chances w.r.t. known plaintext attacks and opt to randomize the padding data. --- lightning/src/ln/peer_channel_encryptor.rs | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/peer_channel_encryptor.rs b/lightning/src/ln/peer_channel_encryptor.rs index bd8243c0c3d..f388fc81f96 100644 --- a/lightning/src/ln/peer_channel_encryptor.rs +++ b/lightning/src/ln/peer_channel_encryptor.rs @@ -13,7 +13,7 @@ use crate::io::Write; use crate::ln::msgs; use crate::ln::msgs::LightningError; use crate::ln::wire; -use crate::sign::{NodeSigner, Recipient}; +use crate::sign::{EntropySource, NodeSigner, RandomBytes, Recipient}; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::{Hash, HashEngine}; @@ -106,8 +106,8 @@ enum NoiseState { pub struct PeerChannelEncryptor { their_node_id: Option, // filled in for outbound, or inbound after noise_state is Finished - noise_state: NoiseState, + padding_entropy_source: RandomBytes, } impl PeerChannelEncryptor { @@ -119,6 +119,12 @@ impl PeerChannelEncryptor { sha.input(&their_node_id.serialize()[..]); let h = Sha256::from_engine(sha).to_byte_array(); + let mut padding_seed_engine = Sha256::engine(); + padding_seed_engine.input(b"LDK MESSAGE PADDING"); + padding_seed_engine.input(&h); + let padding_seed = Sha256::from_engine(padding_seed_engine).to_byte_array(); + let padding_entropy_source = RandomBytes::new(padding_seed); + PeerChannelEncryptor { their_node_id: Some(their_node_id), noise_state: NoiseState::InProgress { @@ -126,6 +132,7 @@ impl PeerChannelEncryptor { directional_state: DirectionalNoiseState::Outbound { ie: ephemeral_key }, bidirectional_state: BidirectionalNoiseState { h, ck: NOISE_CK }, }, + padding_entropy_source, } } @@ -139,6 +146,12 @@ impl PeerChannelEncryptor { sha.input(&our_node_id.serialize()[..]); let h = Sha256::from_engine(sha).to_byte_array(); + let mut padding_seed_engine = Sha256::engine(); + padding_seed_engine.input(b"LDK MESSAGE PADDING"); + padding_seed_engine.input(&h); + let padding_seed = Sha256::from_engine(padding_seed_engine).to_byte_array(); + let padding_entropy_source = RandomBytes::new(padding_seed); + PeerChannelEncryptor { their_node_id: None, noise_state: NoiseState::InProgress { @@ -150,6 +163,7 @@ impl PeerChannelEncryptor { }, bidirectional_state: BidirectionalNoiseState { h, ck: NOISE_CK }, }, + padding_entropy_source, } } @@ -599,7 +613,7 @@ impl PeerChannelEncryptor { while bytes_written < padding_len { // Write padding in 32-byte chunks if possible. const PAD_BYTES_LEN: usize = 32; - let pad_bytes = [42u8; PAD_BYTES_LEN]; + let pad_bytes = self.padding_entropy_source.get_secure_random_bytes(); let bytes_to_write = (padding_len - bytes_written).min(PAD_BYTES_LEN); buffer .write_all(&pad_bytes[..bytes_to_write])