From 31c12f92d2fd5185b779051bd561a216f8e4434d Mon Sep 17 00:00:00 2001 From: benthecarman Date: Tue, 9 Dec 2025 01:37:16 -0600 Subject: [PATCH 01/75] Use actual funding output when constructing shared input/output in splices LDK gives us the actual funding output so we no longer need to create a dummy one with fake pubkeys --- src/lib.rs | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index bbae8ac72..853052070 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -138,7 +138,7 @@ use io::utils::write_node_metrics; use lightning::chain::BestBlock; use lightning::events::bump_transaction::{Input, Wallet as LdkWallet}; use lightning::impl_writeable_tlv_based; -use lightning::ln::chan_utils::{make_funding_redeemscript, FUNDING_TRANSACTION_WITNESS_WEIGHT}; +use lightning::ln::chan_utils::FUNDING_TRANSACTION_WITNESS_WEIGHT; use lightning::ln::channel_state::{ChannelDetails as LdkChannelDetails, ChannelShutdownState}; use lightning::ln::channelmanager::PaymentId; use lightning::ln::funding::SpliceContribution; @@ -1267,29 +1267,27 @@ impl Node { const EMPTY_SCRIPT_SIG_WEIGHT: u64 = 1 /* empty script_sig */ * bitcoin::constants::WITNESS_SCALE_FACTOR as u64; - // Used for creating a redeem script for the previous funding txo and the new funding - // txo. Only needed when selecting which UTXOs to include in the funding tx that would - // be sufficient to pay for fees. Hence, the value does not matter. - let dummy_pubkey = PublicKey::from_slice(&[2; 33]).unwrap(); - let funding_txo = channel_details.funding_txo.ok_or_else(|| { log_error!(self.logger, "Failed to splice channel: channel not yet ready",); Error::ChannelSplicingFailed })?; + let funding_output = channel_details.get_funding_output().ok_or_else(|| { + log_error!(self.logger, "Failed to splice channel: channel not yet ready"); + Error::ChannelSplicingFailed + })?; + let shared_input = Input { outpoint: funding_txo.into_bitcoin_outpoint(), - previous_utxo: bitcoin::TxOut { - value: Amount::from_sat(channel_details.channel_value_satoshis), - script_pubkey: make_funding_redeemscript(&dummy_pubkey, &dummy_pubkey) - .to_p2wsh(), - }, + previous_utxo: funding_output.clone(), satisfaction_weight: EMPTY_SCRIPT_SIG_WEIGHT + FUNDING_TRANSACTION_WITNESS_WEIGHT, }; let shared_output = bitcoin::TxOut { value: shared_input.previous_utxo.value + Amount::from_sat(splice_amount_sats), - script_pubkey: make_funding_redeemscript(&dummy_pubkey, &dummy_pubkey).to_p2wsh(), + // will not actually be the exact same script pubkey after splice + // but it is the same size and good enough for coin selection purposes + script_pubkey: funding_output.script_pubkey.clone(), }; let fee_rate = self.fee_estimator.estimate_fee_rate(ConfirmationTarget::ChannelFunding); From a49bdcf4b0b1a7b47c95325d396264d749dc374d Mon Sep 17 00:00:00 2001 From: benthecarman Date: Tue, 9 Dec 2025 01:38:54 -0600 Subject: [PATCH 02/75] Insert channel funding utxo before a splice We insert a channel's funding utxo into our wallet so we can later calculate the fees for the transaction, otherwise our wallet would have incomplete information. We do it before the splice as we only really need this information for splices and not for all channels. --- src/lib.rs | 16 ++++++++++++++ src/wallet/mod.rs | 15 ++++++++++++- tests/integration_tests_rust.rs | 37 +++++++++++++++++++++++++-------- 3 files changed, 58 insertions(+), 10 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 853052070..f2af5f168 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1303,6 +1303,10 @@ impl Node { Error::ChannelSplicingFailed })?; + // insert channel's funding utxo into the wallet so we can later calculate fees + // correctly when viewing this splice-in. + self.wallet.insert_txo(funding_txo.into_bitcoin_outpoint(), funding_output)?; + let change_address = self.wallet.get_new_internal_address()?; let contribution = SpliceContribution::SpliceIn { @@ -1398,6 +1402,18 @@ impl Node { }, }; + let funding_txo = channel_details.funding_txo.ok_or_else(|| { + log_error!(self.logger, "Failed to splice channel: channel not yet ready",); + Error::ChannelSplicingFailed + })?; + + let funding_output = channel_details.get_funding_output().ok_or_else(|| { + log_error!(self.logger, "Failed to splice channel: channel not yet ready"); + Error::ChannelSplicingFailed + })?; + + self.wallet.insert_txo(funding_txo.into_bitcoin_outpoint(), funding_output)?; + self.channel_manager .splice_channel( &channel_details.channel_id, diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index 2f8daa500..a8e791f34 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -26,7 +26,7 @@ use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature}; use bitcoin::secp256k1::{All, PublicKey, Scalar, Secp256k1, SecretKey}; use bitcoin::{ - Address, Amount, FeeRate, ScriptBuf, Transaction, TxOut, Txid, WPubkeyHash, Weight, + Address, Amount, FeeRate, OutPoint, ScriptBuf, Transaction, TxOut, Txid, WPubkeyHash, Weight, WitnessProgram, WitnessVersion, }; use lightning::chain::chaininterface::BroadcasterInterface; @@ -153,6 +153,19 @@ impl Wallet { Ok(()) } + pub(crate) fn insert_txo(&self, outpoint: OutPoint, txout: TxOut) -> Result<(), Error> { + let mut locked_wallet = self.inner.lock().unwrap(); + locked_wallet.insert_txout(outpoint, txout); + + let mut locked_persister = self.persister.lock().unwrap(); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + Error::PersistenceFailed + })?; + + Ok(()) + } + fn update_payment_store<'a>( &self, locked_wallet: &'a mut PersistedWallet, ) -> Result<(), Error> { diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 7c1ed8344..c821ae630 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -927,10 +927,13 @@ async fn concurrent_connections_succeed() { } } -#[tokio::test(flavor = "multi_thread", worker_threads = 1)] -async fn splice_channel() { +async fn run_splice_channel_test(bitcoind_chain_source: bool) { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let chain_source = if bitcoind_chain_source { + TestChainSource::BitcoindRpcSync(&bitcoind) + } else { + TestChainSource::Esplora(&electrsd) + }; let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); let address_a = node_a.onchain_payment().new_address().unwrap(); @@ -995,7 +998,7 @@ async fn splice_channel() { // Splice-in funds for Node B so that it has outbound liquidity to make a payment node_b.splice_in(&user_channel_id_b, node_a.node_id(), 4_000_000).unwrap(); - expect_splice_pending_event!(node_a, node_b.node_id()); + let txo = expect_splice_pending_event!(node_a, node_b.node_id()); expect_splice_pending_event!(node_b, node_a.node_id()); generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; @@ -1006,11 +1009,16 @@ async fn splice_channel() { expect_channel_ready_event!(node_a, node_b.node_id()); expect_channel_ready_event!(node_b, node_a.node_id()); - let splice_in_fee_sat = 252; + let expected_splice_in_fee_sat = 252; + + let payments = node_b.list_payments(); + let payment = + payments.into_iter().find(|p| p.id == PaymentId(txo.txid.to_byte_array())).unwrap(); + assert_eq!(payment.fee_paid_msat, Some(expected_splice_in_fee_sat * 1_000)); assert_eq!( node_b.list_balances().total_onchain_balance_sats, - premine_amount_sat - 4_000_000 - splice_in_fee_sat + premine_amount_sat - 4_000_000 - expected_splice_in_fee_sat ); assert_eq!(node_b.list_balances().total_lightning_balance_sats, 4_000_000); @@ -1033,7 +1041,7 @@ async fn splice_channel() { let address = node_a.onchain_payment().new_address().unwrap(); node_a.splice_out(&user_channel_id_a, node_b.node_id(), &address, amount_msat / 1000).unwrap(); - expect_splice_pending_event!(node_a, node_b.node_id()); + let txo = expect_splice_pending_event!(node_a, node_b.node_id()); expect_splice_pending_event!(node_b, node_a.node_id()); generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; @@ -1044,7 +1052,12 @@ async fn splice_channel() { expect_channel_ready_event!(node_a, node_b.node_id()); expect_channel_ready_event!(node_b, node_a.node_id()); - let splice_out_fee_sat = 183; + let expected_splice_out_fee_sat = 183; + + let payments = node_a.list_payments(); + let payment = + payments.into_iter().find(|p| p.id == PaymentId(txo.txid.to_byte_array())).unwrap(); + assert_eq!(payment.fee_paid_msat, Some(expected_splice_out_fee_sat * 1_000)); assert_eq!( node_a.list_balances().total_onchain_balance_sats, @@ -1052,10 +1065,16 @@ async fn splice_channel() { ); assert_eq!( node_a.list_balances().total_lightning_balance_sats, - 4_000_000 - closing_transaction_fee_sat - anchor_output_sat - splice_out_fee_sat + 4_000_000 - closing_transaction_fee_sat - anchor_output_sat - expected_splice_out_fee_sat ); } +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn splice_channel() { + run_splice_channel_test(false).await; + run_splice_channel_test(true).await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn simple_bolt12_send_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); From 9b325ff835a6664523376e9fc610f786785bb2ff Mon Sep 17 00:00:00 2001 From: benthecarman Date: Wed, 3 Dec 2025 13:50:53 -0600 Subject: [PATCH 03/75] Add funding_redeem_script to ChannelDetails Exposes the funding_redeem_script that LDK already exposes --- bindings/ldk_node.udl | 5 +++++ src/error.rs | 3 +++ src/ffi/types.rs | 18 +++++++++++++++++- src/types.rs | 12 +++++++++++- 4 files changed, 36 insertions(+), 2 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index c4ebf56a6..e89158b59 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -339,6 +339,7 @@ enum NodeError { "InvalidNodeAlias", "InvalidDateTime", "InvalidFeeRate", + "InvalidScriptPubKey", "DuplicatePayment", "UnsupportedCurrency", "InsufficientFunds", @@ -575,6 +576,7 @@ dictionary ChannelDetails { ChannelId channel_id; PublicKey counterparty_node_id; OutPoint? funding_txo; + ScriptBuf? funding_redeem_script; u64? short_channel_id; u64? outbound_scid_alias; u64? inbound_scid_alias; @@ -901,3 +903,6 @@ typedef string LSPS1OrderId; [Custom] typedef string LSPSDateTime; + +[Custom] +typedef string ScriptBuf; diff --git a/src/error.rs b/src/error.rs index 20b1cceab..55e180c15 100644 --- a/src/error.rs +++ b/src/error.rs @@ -113,6 +113,8 @@ pub enum Error { InvalidDateTime, /// The given fee rate is invalid. InvalidFeeRate, + /// The given script public key is invalid. + InvalidScriptPubKey, /// A payment with the given hash has already been initiated. DuplicatePayment, /// The provided offer was denonminated in an unsupported currency. @@ -186,6 +188,7 @@ impl fmt::Display for Error { Self::InvalidNodeAlias => write!(f, "The given node alias is invalid."), Self::InvalidDateTime => write!(f, "The given date time is invalid."), Self::InvalidFeeRate => write!(f, "The given fee rate is invalid."), + Self::InvalidScriptPubKey => write!(f, "The given script pubkey is invalid."), Self::DuplicatePayment => { write!(f, "A payment with the given hash has already been initiated.") }, diff --git a/src/ffi/types.rs b/src/ffi/types.rs index c69987c96..bd3c2192d 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -20,7 +20,7 @@ pub use bip39::Mnemonic; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; use bitcoin::secp256k1::PublicKey; -pub use bitcoin::{Address, BlockHash, FeeRate, Network, OutPoint, Txid}; +pub use bitcoin::{Address, BlockHash, FeeRate, Network, OutPoint, ScriptBuf, Txid}; pub use lightning::chain::channelmonitor::BalanceSource; pub use lightning::events::{ClosureReason, PaymentFailureReason}; use lightning::ln::channelmanager::PaymentId; @@ -106,6 +106,22 @@ impl UniffiCustomTypeConverter for Address { } } +impl UniffiCustomTypeConverter for ScriptBuf { + type Builtin = String; + + fn into_custom(val: Self::Builtin) -> uniffi::Result { + if let Ok(key) = ScriptBuf::from_hex(&val) { + return Ok(key); + } + + Err(Error::InvalidScriptPubKey.into()) + } + + fn from_custom(obj: Self) -> Self::Builtin { + obj.to_string() + } +} + #[derive(Debug, Clone, PartialEq, Eq)] pub enum OfferAmount { Bitcoin { amount_msats: u64 }, diff --git a/src/types.rs b/src/types.rs index 38519eca7..c1b7ec64f 100644 --- a/src/types.rs +++ b/src/types.rs @@ -9,7 +9,7 @@ use std::fmt; use std::sync::{Arc, Mutex}; use bitcoin::secp256k1::PublicKey; -use bitcoin::OutPoint; +use bitcoin::{OutPoint, ScriptBuf}; use lightning::chain::chainmonitor; use lightning::impl_writeable_tlv_based; use lightning::ln::channel_state::ChannelDetails as LdkChannelDetails; @@ -222,6 +222,15 @@ pub struct ChannelDetails { /// state until the splice transaction reaches sufficient confirmations to be locked (and we /// exchange `splice_locked` messages with our peer). pub funding_txo: Option, + /// The witness script that is used to lock the channel's funding output to commitment transactions. + /// + /// This field will be `None` if we have not negotiated the funding transaction with our + /// counterparty already. + /// + /// When a channel is spliced, this continues to refer to the original pre-splice channel + /// state until the splice transaction reaches sufficient confirmations to be locked (and we + /// exchange `splice_locked` messages with our peer). + pub funding_redeem_script: Option, /// The position of the funding transaction in the chain. None if the funding transaction has /// not yet been confirmed and the channel fully opened. /// @@ -378,6 +387,7 @@ impl From for ChannelDetails { channel_id: value.channel_id, counterparty_node_id: value.counterparty.node_id, funding_txo: value.funding_txo.map(|o| o.into_bitcoin_outpoint()), + funding_redeem_script: value.funding_redeem_script, short_channel_id: value.short_channel_id, outbound_scid_alias: value.outbound_scid_alias, inbound_scid_alias: value.inbound_scid_alias, From d47958a3616e51f1d7df861fb07b956ff81192da Mon Sep 17 00:00:00 2001 From: Chuks Agbakuru Date: Sat, 2 Aug 2025 17:08:31 +0100 Subject: [PATCH 04/75] Refactor unified_qr to use bitcoin-payment-instructions Refactor the unified_qr.rs module into unified.rs to provide a single API for sending payments to BIP 21/321 URIs and BIP 353 HRNs. This change simplifies the user interface by leveraging the bitcoin-payment-instructions library for parsing. Key changes: - Rename UnifiedQrPayment to UnifiedPayment. - Rename QRPaymentResult to UnifiedPaymentResult. - Update the send method to support both URIs and HRNs. - Update integration tests to match the new unified flow. --- Cargo.toml | 2 + bindings/ldk_node.udl | 18 ++- src/builder.rs | 16 +- src/error.rs | 5 + src/ffi/types.rs | 71 ++++++++- src/lib.rs | 22 ++- src/payment/bolt12.rs | 44 +++++- src/payment/mod.rs | 4 +- src/payment/{unified_qr.rs => unified.rs} | 182 ++++++++++++++++------ src/types.rs | 6 +- tests/integration_tests_rust.rs | 85 +++++----- 11 files changed, 344 insertions(+), 111 deletions(-) rename src/payment/{unified_qr.rs => unified.rs} (77%) diff --git a/Cargo.toml b/Cargo.toml index 59ad2b767..1f35d142e 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,6 +77,8 @@ log = { version = "0.4.22", default-features = false, features = ["std"]} vss-client = { package = "vss-client-ng", version = "0.4" } prost = { version = "0.11.6", default-features = false} +#bitcoin-payment-instructions = { version = "0.6" } +bitcoin-payment-instructions = { git = "https://github.com/tnull/bitcoin-payment-instructions", branch = "2025-12-ldk-node-base" } [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = ["winbase"] } diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index e89158b59..276ffcfba 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -149,7 +149,7 @@ interface Node { Bolt12Payment bolt12_payment(); SpontaneousPayment spontaneous_payment(); OnchainPayment onchain_payment(); - UnifiedQrPayment unified_qr_payment(); + UnifiedPayment unified_payment(); LSPS1Liquidity lsps1_liquidity(); [Throws=NodeError] void connect(PublicKey node_id, SocketAddress address, boolean persist); @@ -275,11 +275,11 @@ interface FeeRate { u64 to_sat_per_vb_ceil(); }; -interface UnifiedQrPayment { +interface UnifiedPayment { [Throws=NodeError] string receive(u64 amount_sats, [ByRef]string message, u32 expiry_sec); - [Throws=NodeError] - QrPaymentResult send([ByRef]string uri_str, RouteParametersConfig? route_parameters); + [Throws=NodeError, Async] + UnifiedPaymentResult send([ByRef]string uri_str, u64? amount_msat, RouteParametersConfig? route_parameters); }; interface LSPS1Liquidity { @@ -347,6 +347,7 @@ enum NodeError { "LiquidityFeeTooHigh", "InvalidBlindedPaths", "AsyncPaymentServicesDisabled", + "HrnParsingFailed", }; dictionary NodeStatus { @@ -456,7 +457,7 @@ interface PaymentKind { }; [Enum] -interface QrPaymentResult { +interface UnifiedPaymentResult { Onchain(Txid txid); Bolt11(PaymentId payment_id); Bolt12(PaymentId payment_id); @@ -809,6 +810,13 @@ interface Offer { PublicKey? issuer_signing_pubkey(); }; +interface HumanReadableName { + [Throws=NodeError, Name=from_encoded] + constructor([ByRef] string encoded); + string user(); + string domain(); +}; + [Traits=(Debug, Display, Eq)] interface Refund { [Throws=NodeError, Name=from_str] diff --git a/src/builder.rs b/src/builder.rs index ff84505b4..08ac123fa 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -19,6 +19,9 @@ use bitcoin::bip32::{ChildNumber, Xpriv}; use bitcoin::key::Secp256k1; use bitcoin::secp256k1::PublicKey; use bitcoin::{BlockHash, Network}; + +use bitcoin_payment_instructions::onion_message_resolver::LDKOnionMessageDNSSECHrnResolver; + use lightning::chain::{chainmonitor, BestBlock, Watch}; use lightning::io::Cursor; use lightning::ln::channelmanager::{self, ChainParameters, ChannelManagerReadArgs}; @@ -1439,6 +1442,8 @@ fn build_with_store_internal( })?; } + let hrn_resolver = Arc::new(LDKOnionMessageDNSSECHrnResolver::new(Arc::clone(&network_graph))); + // Initialize the PeerManager let onion_messenger: Arc = if let Some(AsyncPaymentsRole::Server) = async_payments_role { @@ -1450,7 +1455,7 @@ fn build_with_store_internal( message_router, Arc::clone(&channel_manager), Arc::clone(&channel_manager), - IgnoringMessageHandler {}, + Arc::clone(&hrn_resolver), IgnoringMessageHandler {}, )) } else { @@ -1462,7 +1467,7 @@ fn build_with_store_internal( message_router, Arc::clone(&channel_manager), Arc::clone(&channel_manager), - IgnoringMessageHandler {}, + Arc::clone(&hrn_resolver), IgnoringMessageHandler {}, )) }; @@ -1594,6 +1599,12 @@ fn build_with_store_internal( Arc::clone(&keys_manager), )); + let peer_manager_clone = Arc::clone(&peer_manager); + + hrn_resolver.register_post_queue_action(Box::new(move || { + peer_manager_clone.process_events(); + })); + liquidity_source.as_ref().map(|l| l.set_peer_manager(Arc::clone(&peer_manager))); gossip_source.set_gossip_verifier( @@ -1701,6 +1712,7 @@ fn build_with_store_internal( node_metrics, om_mailbox, async_payments_role, + hrn_resolver, }) } diff --git a/src/error.rs b/src/error.rs index 55e180c15..ea0bcca3b 100644 --- a/src/error.rs +++ b/src/error.rs @@ -129,6 +129,8 @@ pub enum Error { InvalidBlindedPaths, /// Asynchronous payment services are disabled. AsyncPaymentServicesDisabled, + /// Parsing a Human-Readable Name has failed. + HrnParsingFailed, } impl fmt::Display for Error { @@ -208,6 +210,9 @@ impl fmt::Display for Error { Self::AsyncPaymentServicesDisabled => { write!(f, "Asynchronous payment services are disabled.") }, + Self::HrnParsingFailed => { + write!(f, "Failed to parse a human-readable name.") + }, } } } diff --git a/src/ffi/types.rs b/src/ffi/types.rs index bd3c2192d..a5ff8372f 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -55,7 +55,10 @@ pub use crate::logger::{LogLevel, LogRecord, LogWriter}; pub use crate::payment::store::{ ConfirmationStatus, LSPFeeLimits, PaymentDirection, PaymentKind, PaymentStatus, }; -pub use crate::payment::QrPaymentResult; +pub use crate::payment::UnifiedPaymentResult; + +use lightning::onion_message::dns_resolution::HumanReadableName as LdkHumanReadableName; + use crate::{hex_utils, SocketAddress, UniffiCustomTypeConverter, UserChannelId}; impl UniffiCustomTypeConverter for PublicKey { @@ -284,6 +287,72 @@ impl std::fmt::Display for Offer { } } +/// A struct containing the two parts of a BIP 353 Human-Readable Name - the user and domain parts. +/// +/// The `user` and `domain` parts combined cannot exceed 231 bytes in length; +/// each DNS label within them must be non-empty and no longer than 63 bytes. +/// +/// If you intend to handle non-ASCII `user` or `domain` parts, you must handle [Homograph Attacks] +/// and do punycode en-/de-coding yourself. This struct will always handle only plain ASCII `user` +/// and `domain` parts. +/// +/// This struct can also be used for LN-Address recipients. +/// +/// [Homograph Attacks]: https://en.wikipedia.org/wiki/IDN_homograph_attack +pub struct HumanReadableName { + pub(crate) inner: LdkHumanReadableName, +} + +impl HumanReadableName { + /// Constructs a new [`HumanReadableName`] from the standard encoding - `user`@`domain`. + /// + /// If `user` includes the standard BIP 353 ₿ prefix it is automatically removed as required by + /// BIP 353. + pub fn from_encoded(encoded: &str) -> Result { + let hrn = match LdkHumanReadableName::from_encoded(encoded) { + Ok(hrn) => Ok(hrn), + Err(_) => Err(Error::HrnParsingFailed), + }?; + + Ok(Self { inner: hrn }) + } + + /// Gets the `user` part of this Human-Readable Name + pub fn user(&self) -> String { + self.inner.user().to_string() + } + + /// Gets the `domain` part of this Human-Readable Name + pub fn domain(&self) -> String { + self.inner.domain().to_string() + } +} + +impl From for HumanReadableName { + fn from(ldk_hrn: LdkHumanReadableName) -> Self { + HumanReadableName { inner: ldk_hrn } + } +} + +impl From for LdkHumanReadableName { + fn from(wrapper: HumanReadableName) -> Self { + wrapper.inner + } +} + +impl Deref for HumanReadableName { + type Target = LdkHumanReadableName; + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl AsRef for HumanReadableName { + fn as_ref(&self) -> &LdkHumanReadableName { + self.deref() + } +} + /// A `Refund` is a request to send an [`Bolt12Invoice`] without a preceding [`Offer`]. /// /// Typically, after an invoice is paid, the recipient may publish a refund allowing the sender to diff --git a/src/lib.rs b/src/lib.rs index 0031269dd..b050fba57 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -152,14 +152,15 @@ use payment::asynchronous::om_mailbox::OnionMessageMailbox; use payment::asynchronous::static_invoice_store::StaticInvoiceStore; use payment::{ Bolt11Payment, Bolt12Payment, OnchainPayment, PaymentDetails, SpontaneousPayment, - UnifiedQrPayment, + UnifiedPayment, }; use peer_store::{PeerInfo, PeerStore}; use rand::Rng; use runtime::Runtime; use types::{ Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, DynStore, Graph, - KeysManager, OnionMessenger, PaymentStore, PeerManager, Router, Scorer, Sweeper, Wallet, + HRNResolver, KeysManager, OnionMessenger, PaymentStore, PeerManager, Router, Scorer, Sweeper, + Wallet, }; pub use types::{ChannelDetails, CustomTlvRecord, PeerDetails, SyncAndAsyncKVStore, UserChannelId}; pub use { @@ -206,6 +207,7 @@ pub struct Node { node_metrics: Arc>, om_mailbox: Option>, async_payments_role: Option, + hrn_resolver: Arc, } impl Node { @@ -945,34 +947,42 @@ impl Node { /// Returns a payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], /// and [BOLT 12] payment options. /// + /// This handler allows you to send payments to these URIs as well as [BIP 353] HRNs. + /// /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki + /// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki #[cfg(not(feature = "uniffi"))] - pub fn unified_qr_payment(&self) -> UnifiedQrPayment { - UnifiedQrPayment::new( + pub fn unified_payment(&self) -> UnifiedPayment { + UnifiedPayment::new( self.onchain_payment().into(), self.bolt11_payment().into(), self.bolt12_payment().into(), Arc::clone(&self.config), Arc::clone(&self.logger), + Arc::clone(&self.hrn_resolver), ) } /// Returns a payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], /// and [BOLT 12] payment options. /// + /// This handler allows you to send payments to these URIs as well as [BIP 353] HRNs. + /// /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki + /// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki #[cfg(feature = "uniffi")] - pub fn unified_qr_payment(&self) -> Arc { - Arc::new(UnifiedQrPayment::new( + pub fn unified_payment(&self) -> Arc { + Arc::new(UnifiedPayment::new( self.onchain_payment(), self.bolt11_payment(), self.bolt12_payment(), Arc::clone(&self.config), Arc::clone(&self.logger), + Arc::clone(&self.hrn_resolver), )) } diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 0dd38edca..98f1d21ef 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -15,7 +15,7 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use lightning::blinded_path::message::BlindedMessagePath; use lightning::ln::channelmanager::{OptionalOfferPaymentParams, PaymentId, Retry}; -use lightning::offers::offer::{Amount, Offer as LdkOffer, Quantity}; +use lightning::offers::offer::{Amount, Offer as LdkOffer, OfferFromHrn, Quantity}; use lightning::offers::parse::Bolt12SemanticError; use lightning::routing::router::RouteParametersConfig; #[cfg(feature = "uniffi")] @@ -45,6 +45,11 @@ type Refund = lightning::offers::refund::Refund; #[cfg(feature = "uniffi")] type Refund = Arc; +#[cfg(not(feature = "uniffi"))] +type HumanReadableName = lightning::onion_message::dns_resolution::HumanReadableName; +#[cfg(feature = "uniffi")] +type HumanReadableName = Arc; + /// A payment handler allowing to create and pay [BOLT 12] offers and refunds. /// /// Should be retrieved by calling [`Node::bolt12_payment`]. @@ -193,6 +198,37 @@ impl Bolt12Payment { pub fn send_using_amount( &self, offer: &Offer, amount_msat: u64, quantity: Option, payer_note: Option, route_parameters: Option, + ) -> Result { + let payment_id = self.send_using_amount_inner( + offer, + amount_msat, + quantity, + payer_note, + route_parameters, + None, + )?; + Ok(payment_id) + } + + /// Internal helper to send a BOLT12 offer payment given an offer + /// and an amount in millisatoshi. + /// + /// This function contains the core payment logic and is called by + /// [`Self::send_using_amount`] and other internal logic that resolves + /// payment parameters (e.g. [`crate::UnifiedPayment::send`]). + /// + /// It wraps the core LDK `pay_for_offer` logic and handles necessary pre-checks, + /// payment ID generation, and payment details storage. + /// + /// The amount validation logic ensures the provided `amount_msat` is sufficient + /// based on the offer's required amount. + /// + /// If `hrn` is `Some`, the payment is initiated using [`ChannelManager::pay_for_offer_from_hrn`] + /// for offers resolved from a Human-Readable Name ([`HumanReadableName`]). + /// Otherwise, it falls back to the standard offer payment methods. + pub(crate) fn send_using_amount_inner( + &self, offer: &Offer, amount_msat: u64, quantity: Option, payer_note: Option, + route_parameters: Option, hrn: Option, ) -> Result { if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); @@ -228,7 +264,11 @@ impl Bolt12Payment { retry_strategy, route_params_config: route_parameters, }; - let res = if let Some(quantity) = quantity { + let res = if let Some(hrn) = hrn { + let hrn = maybe_deref(&hrn); + let offer = OfferFromHrn { offer: offer.clone(), hrn: *hrn }; + self.channel_manager.pay_for_offer_from_hrn(&offer, amount_msat, payment_id, params) + } else if let Some(quantity) = quantity { self.channel_manager.pay_for_offer_with_quantity( &offer, Some(amount_msat), diff --git a/src/payment/mod.rs b/src/payment/mod.rs index f629960e1..c82f35c8f 100644 --- a/src/payment/mod.rs +++ b/src/payment/mod.rs @@ -13,7 +13,7 @@ mod bolt12; mod onchain; mod spontaneous; pub(crate) mod store; -mod unified_qr; +mod unified; pub use bolt11::Bolt11Payment; pub use bolt12::Bolt12Payment; @@ -22,4 +22,4 @@ pub use spontaneous::SpontaneousPayment; pub use store::{ ConfirmationStatus, LSPFeeLimits, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, }; -pub use unified_qr::{QrPaymentResult, UnifiedQrPayment}; +pub use unified::{UnifiedPayment, UnifiedPaymentResult}; diff --git a/src/payment/unified_qr.rs b/src/payment/unified.rs similarity index 77% rename from src/payment/unified_qr.rs rename to src/payment/unified.rs index 6ebf25563..b1546961b 100644 --- a/src/payment/unified_qr.rs +++ b/src/payment/unified.rs @@ -5,10 +5,13 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -//! Holds a payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], and [BOLT 12] payment +//! Holds a payment handler allowing to create [BIP 21] URIs with on-chain, [BOLT 11], and [BOLT 12] payment //! options. //! +//! It also supports sending payments to these URIs as well as to [BIP 353] Human-Readable Names. +//! //! [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki +//! [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki //! [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md //! [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md use std::sync::Arc; @@ -16,17 +19,23 @@ use std::vec::IntoIter; use bip21::de::ParamKind; use bip21::{DeserializationError, DeserializeParams, Param, SerializeParams}; -use bitcoin::address::{NetworkChecked, NetworkUnchecked}; +use bitcoin::address::NetworkChecked; use bitcoin::{Amount, Txid}; use lightning::ln::channelmanager::PaymentId; use lightning::offers::offer::Offer; +use lightning::onion_message::dns_resolution::HumanReadableName; use lightning::routing::router::RouteParametersConfig; use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, Description}; +use bitcoin_payment_instructions::{ + amount::Amount as BPIAmount, PaymentInstructions, PaymentMethod, +}; + use crate::error::Error; use crate::ffi::maybe_wrap; use crate::logger::{log_error, LdkLogger, Logger}; use crate::payment::{Bolt11Payment, Bolt12Payment, OnchainPayment}; +use crate::types::HRNResolver; use crate::Config; type Uri<'a> = bip21::Uri<'a, NetworkChecked, Extras>; @@ -40,26 +49,31 @@ struct Extras { /// A payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], and [BOLT 12] payment /// option. /// -/// Should be retrieved by calling [`Node::unified_qr_payment`] +/// Should be retrieved by calling [`Node::unified_payment`] +/// +/// It also supports sending payments to these URIs as well as to [BIP 353] Human-Readable Names. /// /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki +/// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md -/// [`Node::unified_qr_payment`]: crate::Node::unified_qr_payment -pub struct UnifiedQrPayment { +/// [`Node::unified_payment`]: crate::Node::unified_payment +pub struct UnifiedPayment { onchain_payment: Arc, bolt11_invoice: Arc, bolt12_payment: Arc, config: Arc, logger: Arc, + hrn_resolver: Arc, } -impl UnifiedQrPayment { +impl UnifiedPayment { pub(crate) fn new( onchain_payment: Arc, bolt11_invoice: Arc, bolt12_payment: Arc, config: Arc, logger: Arc, + hrn_resolver: Arc, ) -> Self { - Self { onchain_payment, bolt11_invoice, bolt12_payment, config, logger } + Self { onchain_payment, bolt11_invoice, bolt12_payment, config, logger, hrn_resolver } } /// Generates a URI with an on-chain address, [BOLT 11] invoice and [BOLT 12] offer. @@ -129,72 +143,141 @@ impl UnifiedQrPayment { Ok(format_uri(uri)) } - /// Sends a payment given a [BIP 21] URI. + /// Sends a payment given a [BIP 21] URI or [BIP 353] Human-Readable Name. /// /// This method parses the provided URI string and attempts to send the payment. If the URI /// has an offer and or invoice, it will try to pay the offer first followed by the invoice. /// If they both fail, the on-chain payment will be paid. /// - /// Returns a `QrPaymentResult` indicating the outcome of the payment. If an error + /// Returns a `UnifiedPaymentResult` indicating the outcome of the payment. If an error /// occurs, an `Error` is returned detailing the issue encountered. /// /// If `route_parameters` are provided they will override the default as well as the /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. /// /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki - pub fn send( - &self, uri_str: &str, route_parameters: Option, - ) -> Result { - let uri: bip21::Uri = - uri_str.parse().map_err(|_| Error::InvalidUri)?; - - let uri_network_checked = - uri.clone().require_network(self.config.network).map_err(|_| Error::InvalidNetwork)?; - - if let Some(offer) = uri_network_checked.extras.bolt12_offer { - let offer = maybe_wrap(offer); - match self.bolt12_payment.send(&offer, None, None, route_parameters) { - Ok(payment_id) => return Ok(QrPaymentResult::Bolt12 { payment_id }), - Err(e) => log_error!(self.logger, "Failed to send BOLT12 offer: {:?}. This is part of a unified QR code payment. Falling back to the BOLT11 invoice.", e), - } - } - - if let Some(invoice) = uri_network_checked.extras.bolt11_invoice { - let invoice = maybe_wrap(invoice); - match self.bolt11_invoice.send(&invoice, route_parameters) { - Ok(payment_id) => return Ok(QrPaymentResult::Bolt11 { payment_id }), - Err(e) => log_error!(self.logger, "Failed to send BOLT11 invoice: {:?}. This is part of a unified QR code payment. Falling back to the on-chain transaction.", e), - } - } - - let amount = match uri_network_checked.amount { - Some(amount) => amount, - None => { - log_error!(self.logger, "No amount specified in the URI. Aborting the payment."); - return Err(Error::InvalidAmount); + /// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki + pub async fn send( + &self, uri_str: &str, amount_msat: Option, + route_parameters: Option, + ) -> Result { + let instructions = PaymentInstructions::parse( + uri_str, + self.config.network, + self.hrn_resolver.as_ref(), + false, + ) + .await + .map_err(|e| { + log_error!(self.logger, "Failed to parse payment instructions: {:?}", e); + Error::UriParameterParsingFailed + })?; + + let resolved = match instructions { + PaymentInstructions::ConfigurableAmount(instr) => { + let amount = amount_msat.ok_or_else(|| { + log_error!(self.logger, "No amount specified. Aborting the payment."); + Error::InvalidAmount + })?; + + let amt = BPIAmount::from_milli_sats(amount).map_err(|e| { + log_error!(self.logger, "Error while converting amount : {:?}", e); + Error::InvalidAmount + })?; + + instr.set_amount(amt, self.hrn_resolver.as_ref()).await.map_err(|e| { + log_error!(self.logger, "Failed to set amount: {:?}", e); + Error::InvalidAmount + })? + }, + PaymentInstructions::FixedAmount(instr) => { + if let Some(user_amount) = amount_msat { + if instr.max_amount().map_or(false, |amt| user_amount < amt.milli_sats()) { + log_error!(self.logger, "Amount specified is less than the amount in the parsed URI. Aborting the payment."); + return Err(Error::InvalidAmount); + } + } + instr }, }; - let txid = self.onchain_payment.send_to_address( - &uri_network_checked.address, - amount.to_sat(), - None, - )?; + let mut sorted_payment_methods = resolved.methods().to_vec(); + sorted_payment_methods.sort_by_key(|method| match method { + PaymentMethod::LightningBolt12(_) => 0, + PaymentMethod::LightningBolt11(_) => 1, + PaymentMethod::OnChain(_) => 2, + }); + + for method in sorted_payment_methods { + match method { + PaymentMethod::LightningBolt12(offer) => { + let offer = maybe_wrap(offer.clone()); + + let payment_result = if let Ok(hrn) = HumanReadableName::from_encoded(uri_str) { + let hrn = maybe_wrap(hrn.clone()); + self.bolt12_payment.send_using_amount_inner(&offer, amount_msat.unwrap_or(0), None, None, route_parameters, Some(hrn)) + } else if let Some(amount_msat) = amount_msat { + self.bolt12_payment.send_using_amount(&offer, amount_msat, None, None, route_parameters) + } else { + self.bolt12_payment.send(&offer, None, None, route_parameters) + } + .map_err(|e| { + log_error!(self.logger, "Failed to send BOLT12 offer: {:?}. This is part of a unified payment. Falling back to the BOLT11 invoice.", e); + e + }); + + if let Ok(payment_id) = payment_result { + return Ok(UnifiedPaymentResult::Bolt12 { payment_id }); + } + }, + PaymentMethod::LightningBolt11(invoice) => { + let invoice = maybe_wrap(invoice.clone()); + let payment_result = self.bolt11_invoice.send(&invoice, route_parameters) + .map_err(|e| { + log_error!(self.logger, "Failed to send BOLT11 invoice: {:?}. This is part of a unified payment. Falling back to the on-chain transaction.", e); + e + }); + + if let Ok(payment_id) = payment_result { + return Ok(UnifiedPaymentResult::Bolt11 { payment_id }); + } + }, + PaymentMethod::OnChain(address) => { + let amount = resolved.onchain_payment_amount().ok_or_else(|| { + log_error!(self.logger, "No amount specified. Aborting the payment."); + Error::InvalidAmount + })?; + + let amt_sats = amount.sats().map_err(|_| { + log_error!( + self.logger, + "Amount in sats returned an error. Aborting the payment." + ); + Error::InvalidAmount + })?; + + let txid = self.onchain_payment.send_to_address(&address, amt_sats, None)?; + return Ok(UnifiedPaymentResult::Onchain { txid }); + }, + } + } - Ok(QrPaymentResult::Onchain { txid }) + log_error!(self.logger, "Payable methods not found in URI"); + Err(Error::PaymentSendingFailed) } } -/// Represents the result of a payment made using a [BIP 21] QR code. +/// Represents the result of a payment made using a [BIP 21] URI or a [BIP 353] Human-Readable Name. /// /// After a successful on-chain transaction, the transaction ID ([`Txid`]) is returned. /// For BOLT11 and BOLT12 payments, the corresponding [`PaymentId`] is returned. /// /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki +/// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki /// [`PaymentId`]: lightning::ln::channelmanager::PaymentId /// [`Txid`]: bitcoin::hash_types::Txid #[derive(Debug)] -pub enum QrPaymentResult { +pub enum UnifiedPaymentResult { /// An on-chain payment. Onchain { /// The transaction ID (txid) of the on-chain payment. @@ -310,10 +393,9 @@ impl DeserializationError for Extras { mod tests { use std::str::FromStr; - use bitcoin::{Address, Network}; + use bitcoin::{address::NetworkUnchecked, Address, Network}; - use super::*; - use crate::payment::unified_qr::Extras; + use super::{Amount, Bolt11Invoice, Extras, Offer}; #[test] fn parse_uri() { diff --git a/src/types.rs b/src/types.rs index 7c0e1227a..8835cb424 100644 --- a/src/types.rs +++ b/src/types.rs @@ -29,6 +29,8 @@ use lightning_block_sync::gossip::GossipVerifier; use lightning_liquidity::utils::time::DefaultTimeProvider; use lightning_net_tokio::SocketDescriptor; +use bitcoin_payment_instructions::onion_message_resolver::LDKOnionMessageDNSSECHrnResolver; + use crate::chain::bitcoind::UtxoSourceClient; use crate::chain::ChainSource; use crate::config::ChannelConfig; @@ -276,10 +278,12 @@ pub(crate) type OnionMessenger = lightning::onion_message::messenger::OnionMesse Arc, Arc, Arc, - IgnoringMessageHandler, + Arc, IgnoringMessageHandler, >; +pub(crate) type HRNResolver = LDKOnionMessageDNSSECHrnResolver, Arc>; + pub(crate) type MessageRouter = lightning::onion_message::messenger::DefaultMessageRouter< Arc, Arc, diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 9b02cd61f..655b5fd94 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -29,7 +29,7 @@ use ldk_node::config::{AsyncPaymentsRole, EsploraSyncConfig}; use ldk_node::liquidity::LSPS2ServiceConfig; use ldk_node::payment::{ ConfirmationStatus, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, - QrPaymentResult, + UnifiedPaymentResult, }; use ldk_node::{Builder, Event, NodeError}; use lightning::ln::channelmanager::PaymentId; @@ -1545,15 +1545,15 @@ async fn generate_bip21_uri() { // Test 1: Verify URI generation (on-chain + BOLT11) works // even before any channels are opened. This checks the graceful fallback behavior. - let initial_uqr_payment = node_b - .unified_qr_payment() + let initial_uni_payment = node_b + .unified_payment() .receive(expected_amount_sats, "asdf", expiry_sec) .expect("Failed to generate URI"); - println!("Initial URI (no channels): {}", initial_uqr_payment); + println!("Initial URI (no channels): {}", initial_uni_payment); - assert!(initial_uqr_payment.contains("bitcoin:")); - assert!(initial_uqr_payment.contains("lightning=")); - assert!(!initial_uqr_payment.contains("lno=")); // BOLT12 requires channels + assert!(initial_uni_payment.contains("bitcoin:")); + assert!(initial_uni_payment.contains("lightning=")); + assert!(!initial_uni_payment.contains("lno=")); // BOLT12 requires channels premine_and_distribute_funds( &bitcoind.client, @@ -1574,19 +1574,19 @@ async fn generate_bip21_uri() { expect_channel_ready_event!(node_b, node_a.node_id()); // Test 2: Verify URI generation (on-chain + BOLT11 + BOLT12) works after channels are established. - let uqr_payment = node_b - .unified_qr_payment() + let uni_payment = node_b + .unified_payment() .receive(expected_amount_sats, "asdf", expiry_sec) .expect("Failed to generate URI"); - println!("Generated URI: {}", uqr_payment); - assert!(uqr_payment.contains("bitcoin:")); - assert!(uqr_payment.contains("lightning=")); - assert!(uqr_payment.contains("lno=")); + println!("Generated URI: {}", uni_payment); + assert!(uni_payment.contains("bitcoin:")); + assert!(uni_payment.contains("lightning=")); + assert!(uni_payment.contains("lno=")); } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] -async fn unified_qr_send_receive() { +async fn unified_send_receive_bip21_uri() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); @@ -1624,38 +1624,39 @@ async fn unified_qr_send_receive() { let expected_amount_sats = 100_000; let expiry_sec = 4_000; - let uqr_payment = node_b.unified_qr_payment().receive(expected_amount_sats, "asdf", expiry_sec); - let uri_str = uqr_payment.clone().unwrap(); - let offer_payment_id: PaymentId = match node_a.unified_qr_payment().send(&uri_str, None) { - Ok(QrPaymentResult::Bolt12 { payment_id }) => { - println!("\nBolt12 payment sent successfully with PaymentID: {:?}", payment_id); - payment_id - }, - Ok(QrPaymentResult::Bolt11 { payment_id: _ }) => { - panic!("Expected Bolt12 payment but got Bolt11"); - }, - Ok(QrPaymentResult::Onchain { txid: _ }) => { - panic!("Expected Bolt12 payment but get On-chain transaction"); - }, - Err(e) => { - panic!("Expected Bolt12 payment but got error: {:?}", e); - }, - }; + let uni_payment = node_b.unified_payment().receive(expected_amount_sats, "asdf", expiry_sec); + let uri_str = uni_payment.clone().unwrap(); + let offer_payment_id: PaymentId = + match node_a.unified_payment().send(&uri_str, None, None).await { + Ok(UnifiedPaymentResult::Bolt12 { payment_id }) => { + println!("\nBolt12 payment sent successfully with PaymentID: {:?}", payment_id); + payment_id + }, + Ok(UnifiedPaymentResult::Bolt11 { payment_id: _ }) => { + panic!("Expected Bolt12 payment but got Bolt11"); + }, + Ok(UnifiedPaymentResult::Onchain { txid: _ }) => { + panic!("Expected Bolt12 payment but got On-chain transaction"); + }, + Err(e) => { + panic!("Expected Bolt12 payment but got error: {:?}", e); + }, + }; expect_payment_successful_event!(node_a, Some(offer_payment_id), None); // Cut off the BOLT12 part to fallback to BOLT11. let uri_str_without_offer = uri_str.split("&lno=").next().unwrap(); let invoice_payment_id: PaymentId = - match node_a.unified_qr_payment().send(uri_str_without_offer, None) { - Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { + match node_a.unified_payment().send(uri_str_without_offer, None, None).await { + Ok(UnifiedPaymentResult::Bolt12 { payment_id: _ }) => { panic!("Expected Bolt11 payment but got Bolt12"); }, - Ok(QrPaymentResult::Bolt11 { payment_id }) => { + Ok(UnifiedPaymentResult::Bolt11 { payment_id }) => { println!("\nBolt11 payment sent successfully with PaymentID: {:?}", payment_id); payment_id }, - Ok(QrPaymentResult::Onchain { txid: _ }) => { + Ok(UnifiedPaymentResult::Onchain { txid: _ }) => { panic!("Expected Bolt11 payment but got on-chain transaction"); }, Err(e) => { @@ -1665,19 +1666,19 @@ async fn unified_qr_send_receive() { expect_payment_successful_event!(node_a, Some(invoice_payment_id), None); let expect_onchain_amount_sats = 800_000; - let onchain_uqr_payment = - node_b.unified_qr_payment().receive(expect_onchain_amount_sats, "asdf", 4_000).unwrap(); + let onchain_uni_payment = + node_b.unified_payment().receive(expect_onchain_amount_sats, "asdf", 4_000).unwrap(); // Cut off any lightning part to fallback to on-chain only. - let uri_str_without_lightning = onchain_uqr_payment.split("&lightning=").next().unwrap(); - let txid = match node_a.unified_qr_payment().send(&uri_str_without_lightning, None) { - Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { + let uri_str_without_lightning = onchain_uni_payment.split("&lightning=").next().unwrap(); + let txid = match node_a.unified_payment().send(&uri_str_without_lightning, None, None).await { + Ok(UnifiedPaymentResult::Bolt12 { payment_id: _ }) => { panic!("Expected on-chain payment but got Bolt12") }, - Ok(QrPaymentResult::Bolt11 { payment_id: _ }) => { + Ok(UnifiedPaymentResult::Bolt11 { payment_id: _ }) => { panic!("Expected on-chain payment but got Bolt11"); }, - Ok(QrPaymentResult::Onchain { txid }) => { + Ok(UnifiedPaymentResult::Onchain { txid }) => { println!("\nOn-chain transaction successful with Txid: {}", txid); txid }, From 13e55ba8057c0673d34a43e00390fc5e329e479d Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 5 Jan 2026 13:22:53 +0100 Subject: [PATCH 05/75] Use async `KVStore` for `read_X` util methods Rather than using `KVStoreSync` we now use the async `KVStore` implementation for most `read_X` util methods used during node building. This is a first step towards making node building/startup entirely async eventually. --- src/builder.rs | 76 +++++++++++++++----------- src/io/utils.rs | 142 ++++++++++++++++++++++++++++-------------------- 2 files changed, 129 insertions(+), 89 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 08ac123fa..63e7df005 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -57,7 +57,9 @@ use crate::fee_estimator::OnchainFeeEstimator; use crate::gossip::GossipSource; use crate::io::sqlite_store::SqliteStore; use crate::io::utils::{ - read_external_pathfinding_scores_from_cache, read_node_metrics, write_node_metrics, + read_event_queue, read_external_pathfinding_scores_from_cache, read_network_graph, + read_node_metrics, read_output_sweeper, read_payments, read_peer_info, read_scorer, + write_node_metrics, }; use crate::io::vss_store::VssStoreBuilder; use crate::io::{ @@ -1053,7 +1055,9 @@ fn build_with_store_internal( } // Initialize the status fields. - let node_metrics = match read_node_metrics(Arc::clone(&kv_store), Arc::clone(&logger)) { + let node_metrics = match runtime + .block_on(async { read_node_metrics(Arc::clone(&kv_store), Arc::clone(&logger)).await }) + { Ok(metrics) => Arc::new(RwLock::new(metrics)), Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { @@ -1067,7 +1071,9 @@ fn build_with_store_internal( let tx_broadcaster = Arc::new(TransactionBroadcaster::new(Arc::clone(&logger))); let fee_estimator = Arc::new(OnchainFeeEstimator::new()); - let payment_store = match io::utils::read_payments(Arc::clone(&kv_store), Arc::clone(&logger)) { + let payment_store = match runtime + .block_on(async { read_payments(Arc::clone(&kv_store), Arc::clone(&logger)).await }) + { Ok(payments) => Arc::new(PaymentStore::new( payments, PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE.to_string(), @@ -1294,24 +1300,23 @@ fn build_with_store_internal( )); // Initialize the network graph, scorer, and router - let network_graph = - match io::utils::read_network_graph(Arc::clone(&kv_store), Arc::clone(&logger)) { - Ok(graph) => Arc::new(graph), - Err(e) => { - if e.kind() == std::io::ErrorKind::NotFound { - Arc::new(Graph::new(config.network.into(), Arc::clone(&logger))) - } else { - log_error!(logger, "Failed to read network graph from store: {}", e); - return Err(BuildError::ReadFailed); - } - }, - }; + let network_graph = match runtime + .block_on(async { read_network_graph(Arc::clone(&kv_store), Arc::clone(&logger)).await }) + { + Ok(graph) => Arc::new(graph), + Err(e) => { + if e.kind() == std::io::ErrorKind::NotFound { + Arc::new(Graph::new(config.network.into(), Arc::clone(&logger))) + } else { + log_error!(logger, "Failed to read network graph from store: {}", e); + return Err(BuildError::ReadFailed); + } + }, + }; - let local_scorer = match io::utils::read_scorer( - Arc::clone(&kv_store), - Arc::clone(&network_graph), - Arc::clone(&logger), - ) { + let local_scorer = match runtime.block_on(async { + read_scorer(Arc::clone(&kv_store), Arc::clone(&network_graph), Arc::clone(&logger)).await + }) { Ok(scorer) => scorer, Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { @@ -1327,7 +1332,10 @@ fn build_with_store_internal( let scorer = Arc::new(Mutex::new(CombinedScorer::new(local_scorer))); // Restore external pathfinding scores from cache if possible. - match read_external_pathfinding_scores_from_cache(Arc::clone(&kv_store), Arc::clone(&logger)) { + match runtime.block_on(async { + read_external_pathfinding_scores_from_cache(Arc::clone(&kv_store), Arc::clone(&logger)) + .await + }) { Ok(external_scores) => { scorer.lock().unwrap().merge(external_scores, cur_time); log_trace!(logger, "External scores from cache merged successfully"); @@ -1616,14 +1624,17 @@ fn build_with_store_internal( let connection_manager = Arc::new(ConnectionManager::new(Arc::clone(&peer_manager), Arc::clone(&logger))); - let output_sweeper = match io::utils::read_output_sweeper( - Arc::clone(&tx_broadcaster), - Arc::clone(&fee_estimator), - Arc::clone(&chain_source), - Arc::clone(&keys_manager), - Arc::clone(&kv_store), - Arc::clone(&logger), - ) { + let output_sweeper = match runtime.block_on(async { + read_output_sweeper( + Arc::clone(&tx_broadcaster), + Arc::clone(&fee_estimator), + Arc::clone(&chain_source), + Arc::clone(&keys_manager), + Arc::clone(&kv_store), + Arc::clone(&logger), + ) + .await + }) { Ok(output_sweeper) => Arc::new(output_sweeper), Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { @@ -1644,7 +1655,8 @@ fn build_with_store_internal( }, }; - let event_queue = match io::utils::read_event_queue(Arc::clone(&kv_store), Arc::clone(&logger)) + let event_queue = match runtime + .block_on(async { read_event_queue(Arc::clone(&kv_store), Arc::clone(&logger)).await }) { Ok(event_queue) => Arc::new(event_queue), Err(e) => { @@ -1657,7 +1669,9 @@ fn build_with_store_internal( }, }; - let peer_store = match io::utils::read_peer_info(Arc::clone(&kv_store), Arc::clone(&logger)) { + let peer_store = match runtime + .block_on(async { read_peer_info(Arc::clone(&kv_store), Arc::clone(&logger)).await }) + { Ok(peer_store) => Arc::new(peer_store), Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { diff --git a/src/io/utils.rs b/src/io/utils.rs index 928d4031b..9b754f32a 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -88,18 +88,21 @@ pub(crate) fn read_or_generate_seed_file( } /// Read a previously persisted [`NetworkGraph`] from the store. -pub(crate) fn read_network_graph( +pub(crate) async fn read_network_graph( kv_store: Arc, logger: L, ) -> Result, std::io::Error> where L::Target: LdkLogger, { - let mut reader = Cursor::new(KVStoreSync::read( - &*kv_store, - NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_KEY, - )?); + let mut reader = Cursor::new( + KVStore::read( + &*kv_store, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_KEY, + ) + .await?, + ); NetworkGraph::read(&mut reader, logger.clone()).map_err(|e| { log_error!(logger, "Failed to deserialize NetworkGraph: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize NetworkGraph") @@ -107,19 +110,22 @@ where } /// Read a previously persisted [`ProbabilisticScorer`] from the store. -pub(crate) fn read_scorer>, L: Deref + Clone>( +pub(crate) async fn read_scorer>, L: Deref + Clone>( kv_store: Arc, network_graph: G, logger: L, ) -> Result, std::io::Error> where L::Target: LdkLogger, { let params = ProbabilisticScoringDecayParameters::default(); - let mut reader = Cursor::new(KVStoreSync::read( - &*kv_store, - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - SCORER_PERSISTENCE_SECONDARY_NAMESPACE, - SCORER_PERSISTENCE_KEY, - )?); + let mut reader = Cursor::new( + KVStore::read( + &*kv_store, + SCORER_PERSISTENCE_PRIMARY_NAMESPACE, + SCORER_PERSISTENCE_SECONDARY_NAMESPACE, + SCORER_PERSISTENCE_KEY, + ) + .await?, + ); let args = (params, network_graph, logger.clone()); ProbabilisticScorer::read(&mut reader, args).map_err(|e| { log_error!(logger, "Failed to deserialize scorer: {}", e); @@ -128,18 +134,21 @@ where } /// Read previously persisted external pathfinding scores from the cache. -pub(crate) fn read_external_pathfinding_scores_from_cache( +pub(crate) async fn read_external_pathfinding_scores_from_cache( kv_store: Arc, logger: L, ) -> Result where L::Target: LdkLogger, { - let mut reader = Cursor::new(KVStoreSync::read( - &*kv_store, - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - SCORER_PERSISTENCE_SECONDARY_NAMESPACE, - EXTERNAL_PATHFINDING_SCORES_CACHE_KEY, - )?); + let mut reader = Cursor::new( + KVStore::read( + &*kv_store, + SCORER_PERSISTENCE_PRIMARY_NAMESPACE, + SCORER_PERSISTENCE_SECONDARY_NAMESPACE, + EXTERNAL_PATHFINDING_SCORES_CACHE_KEY, + ) + .await?, + ); ChannelLiquidities::read(&mut reader).map_err(|e| { log_error!(logger, "Failed to deserialize scorer: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize Scorer") @@ -175,18 +184,21 @@ where } /// Read previously persisted events from the store. -pub(crate) fn read_event_queue( +pub(crate) async fn read_event_queue( kv_store: Arc, logger: L, ) -> Result, std::io::Error> where L::Target: LdkLogger, { - let mut reader = Cursor::new(KVStoreSync::read( - &*kv_store, - EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, - EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, - EVENT_QUEUE_PERSISTENCE_KEY, - )?); + let mut reader = Cursor::new( + KVStore::read( + &*kv_store, + EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, + EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, + EVENT_QUEUE_PERSISTENCE_KEY, + ) + .await?, + ); EventQueue::read(&mut reader, (kv_store, logger.clone())).map_err(|e| { log_error!(logger, "Failed to deserialize event queue: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize EventQueue") @@ -194,18 +206,21 @@ where } /// Read previously persisted peer info from the store. -pub(crate) fn read_peer_info( +pub(crate) async fn read_peer_info( kv_store: Arc, logger: L, ) -> Result, std::io::Error> where L::Target: LdkLogger, { - let mut reader = Cursor::new(KVStoreSync::read( - &*kv_store, - PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - PEER_INFO_PERSISTENCE_KEY, - )?); + let mut reader = Cursor::new( + KVStore::read( + &*kv_store, + PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + PEER_INFO_PERSISTENCE_KEY, + ) + .await?, + ); PeerStore::read(&mut reader, (kv_store, logger.clone())).map_err(|e| { log_error!(logger, "Failed to deserialize peer store: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize PeerStore") @@ -213,7 +228,7 @@ where } /// Read previously persisted payments information from the store. -pub(crate) fn read_payments( +pub(crate) async fn read_payments( kv_store: Arc, logger: L, ) -> Result, std::io::Error> where @@ -221,17 +236,22 @@ where { let mut res = Vec::new(); - for stored_key in KVStoreSync::list( + for stored_key in KVStore::list( &*kv_store, PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - )? { - let mut reader = Cursor::new(KVStoreSync::read( - &*kv_store, - PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - &stored_key, - )?); + ) + .await? + { + let mut reader = Cursor::new( + KVStore::read( + &*kv_store, + PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + &stored_key, + ) + .await?, + ); let payment = PaymentDetails::read(&mut reader).map_err(|e| { log_error!(logger, "Failed to deserialize PaymentDetails: {}", e); std::io::Error::new( @@ -245,17 +265,20 @@ where } /// Read `OutputSweeper` state from the store. -pub(crate) fn read_output_sweeper( +pub(crate) async fn read_output_sweeper( broadcaster: Arc, fee_estimator: Arc, chain_data_source: Arc, keys_manager: Arc, kv_store: Arc, logger: Arc, ) -> Result { - let mut reader = Cursor::new(KVStoreSync::read( - &*kv_store, - OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, - OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, - OUTPUT_SWEEPER_PERSISTENCE_KEY, - )?); + let mut reader = Cursor::new( + KVStore::read( + &*kv_store, + OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, + OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, + OUTPUT_SWEEPER_PERSISTENCE_KEY, + ) + .await?, + ); let args = ( broadcaster, fee_estimator, @@ -272,18 +295,21 @@ pub(crate) fn read_output_sweeper( Ok(sweeper) } -pub(crate) fn read_node_metrics( +pub(crate) async fn read_node_metrics( kv_store: Arc, logger: L, ) -> Result where L::Target: LdkLogger, { - let mut reader = Cursor::new(KVStoreSync::read( - &*kv_store, - NODE_METRICS_PRIMARY_NAMESPACE, - NODE_METRICS_SECONDARY_NAMESPACE, - NODE_METRICS_KEY, - )?); + let mut reader = Cursor::new( + KVStore::read( + &*kv_store, + NODE_METRICS_PRIMARY_NAMESPACE, + NODE_METRICS_SECONDARY_NAMESPACE, + NODE_METRICS_KEY, + ) + .await?, + ); NodeMetrics::read(&mut reader).map_err(|e| { log_error!(logger, "Failed to deserialize NodeMetrics: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize NodeMetrics") From aba72a2462ed2b0cd16ddcb5dee809e34073dd46 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 5 Jan 2026 14:16:29 +0100 Subject: [PATCH 06/75] Parallelize `read_payments` Previously, we would read entries of our payment store sequentially. This is more or less fine when we read from a local store, but when we read from a remote (e.g., VSS) store, all the latency could result in considerable slowdown during startup. Here, we opt to read store entries in batches. --- src/builder.rs | 2 -- src/ffi/types.rs | 4 +-- src/io/utils.rs | 59 +++++++++++++++++++++++++++++++++++------- src/payment/unified.rs | 9 +++---- src/types.rs | 3 +-- 5 files changed, 56 insertions(+), 21 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 63e7df005..2aa09a61a 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -19,9 +19,7 @@ use bitcoin::bip32::{ChildNumber, Xpriv}; use bitcoin::key::Secp256k1; use bitcoin::secp256k1::PublicKey; use bitcoin::{BlockHash, Network}; - use bitcoin_payment_instructions::onion_message_resolver::LDKOnionMessageDNSSECHrnResolver; - use lightning::chain::{chainmonitor, BestBlock, Watch}; use lightning::io::Cursor; use lightning::ln::channelmanager::{self, ChainParameters, ChannelManagerReadArgs}; diff --git a/src/ffi/types.rs b/src/ffi/types.rs index a5ff8372f..bed040fcd 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -29,6 +29,7 @@ use lightning::offers::invoice::Bolt12Invoice as LdkBolt12Invoice; pub use lightning::offers::offer::OfferId; use lightning::offers::offer::{Amount as LdkAmount, Offer as LdkOffer}; use lightning::offers::refund::Refund as LdkRefund; +use lightning::onion_message::dns_resolution::HumanReadableName as LdkHumanReadableName; pub use lightning::routing::gossip::{NodeAlias, NodeId, RoutingFees}; pub use lightning::routing::router::RouteParametersConfig; use lightning::util::ser::Writeable; @@ -56,9 +57,6 @@ pub use crate::payment::store::{ ConfirmationStatus, LSPFeeLimits, PaymentDirection, PaymentKind, PaymentStatus, }; pub use crate::payment::UnifiedPaymentResult; - -use lightning::onion_message::dns_resolution::HumanReadableName as LdkHumanReadableName; - use crate::{hex_utils, SocketAddress, UniffiCustomTypeConverter, UserChannelId}; impl UniffiCustomTypeConverter for PublicKey { diff --git a/src/io/utils.rs b/src/io/utils.rs index 9b754f32a..d326827d7 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -236,22 +236,59 @@ where { let mut res = Vec::new(); - for stored_key in KVStore::list( + let mut stored_keys = KVStore::list( &*kv_store, PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, ) - .await? - { - let mut reader = Cursor::new( - KVStore::read( + .await?; + + const BATCH_SIZE: usize = 50; + + let mut set = tokio::task::JoinSet::new(); + + // Fill JoinSet with tasks if possible + while set.len() < BATCH_SIZE && !stored_keys.is_empty() { + if let Some(next_key) = stored_keys.pop() { + let fut = KVStore::read( &*kv_store, PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - &stored_key, - ) - .await?, - ); + &next_key, + ); + set.spawn(fut); + debug_assert!(set.len() <= BATCH_SIZE); + } + } + + while let Some(read_res) = set.join_next().await { + // Exit early if we get an IO error. + let read_res = read_res + .map_err(|e| { + log_error!(logger, "Failed to read PaymentDetails: {}", e); + set.abort_all(); + e + })? + .map_err(|e| { + log_error!(logger, "Failed to read PaymentDetails: {}", e); + set.abort_all(); + e + })?; + + // Refill set for every finished future, if we still have something to do. + if let Some(next_key) = stored_keys.pop() { + let fut = KVStore::read( + &*kv_store, + PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + &next_key, + ); + set.spawn(fut); + debug_assert!(set.len() <= BATCH_SIZE); + } + + // Handle result. + let mut reader = Cursor::new(read_res); let payment = PaymentDetails::read(&mut reader).map_err(|e| { log_error!(logger, "Failed to deserialize PaymentDetails: {}", e); std::io::Error::new( @@ -261,6 +298,10 @@ where })?; res.push(payment); } + + debug_assert!(set.is_empty()); + debug_assert!(stored_keys.is_empty()); + Ok(res) } diff --git a/src/payment/unified.rs b/src/payment/unified.rs index b1546961b..8225205fd 100644 --- a/src/payment/unified.rs +++ b/src/payment/unified.rs @@ -21,16 +21,14 @@ use bip21::de::ParamKind; use bip21::{DeserializationError, DeserializeParams, Param, SerializeParams}; use bitcoin::address::NetworkChecked; use bitcoin::{Amount, Txid}; +use bitcoin_payment_instructions::amount::Amount as BPIAmount; +use bitcoin_payment_instructions::{PaymentInstructions, PaymentMethod}; use lightning::ln::channelmanager::PaymentId; use lightning::offers::offer::Offer; use lightning::onion_message::dns_resolution::HumanReadableName; use lightning::routing::router::RouteParametersConfig; use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, Description}; -use bitcoin_payment_instructions::{ - amount::Amount as BPIAmount, PaymentInstructions, PaymentMethod, -}; - use crate::error::Error; use crate::ffi::maybe_wrap; use crate::logger::{log_error, LdkLogger, Logger}; @@ -393,7 +391,8 @@ impl DeserializationError for Extras { mod tests { use std::str::FromStr; - use bitcoin::{address::NetworkUnchecked, Address, Network}; + use bitcoin::address::NetworkUnchecked; + use bitcoin::{Address, Network}; use super::{Amount, Bolt11Invoice, Extras, Offer}; diff --git a/src/types.rs b/src/types.rs index 8835cb424..5e9cd74c9 100644 --- a/src/types.rs +++ b/src/types.rs @@ -12,6 +12,7 @@ use std::sync::{Arc, Mutex}; use bitcoin::secp256k1::PublicKey; use bitcoin::{OutPoint, ScriptBuf}; +use bitcoin_payment_instructions::onion_message_resolver::LDKOnionMessageDNSSECHrnResolver; use lightning::chain::chainmonitor; use lightning::impl_writeable_tlv_based; use lightning::ln::channel_state::ChannelDetails as LdkChannelDetails; @@ -29,8 +30,6 @@ use lightning_block_sync::gossip::GossipVerifier; use lightning_liquidity::utils::time::DefaultTimeProvider; use lightning_net_tokio::SocketDescriptor; -use bitcoin_payment_instructions::onion_message_resolver::LDKOnionMessageDNSSECHrnResolver; - use crate::chain::bitcoind::UtxoSourceClient; use crate::chain::ChainSource; use crate::config::ChannelConfig; From 21f0020049444a7a23e966e48966a58468b3785f Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 7 Jan 2026 09:59:07 +0100 Subject: [PATCH 07/75] Drop unnecessary uses of `io::Cursor` --- src/builder.rs | 6 +- src/io/utils.rs | 137 +++++++++++++++++++------------------------ src/io/vss_store.rs | 9 ++- src/payment/store.rs | 25 +++----- src/scoring.rs | 6 +- 5 files changed, 77 insertions(+), 106 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 2aa09a61a..ce1bd7155 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -21,7 +21,6 @@ use bitcoin::secp256k1::PublicKey; use bitcoin::{BlockHash, Network}; use bitcoin_payment_instructions::onion_message_resolver::LDKOnionMessageDNSSECHrnResolver; use lightning::chain::{chainmonitor, BestBlock, Watch}; -use lightning::io::Cursor; use lightning::ln::channelmanager::{self, ChainParameters, ChannelManagerReadArgs}; use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress}; use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler}; @@ -1386,13 +1385,12 @@ fn build_with_store_internal( // Initialize the ChannelManager let channel_manager = { - if let Ok(res) = KVStoreSync::read( + if let Ok(reader) = KVStoreSync::read( &*kv_store, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY, ) { - let mut reader = Cursor::new(res); let channel_monitor_references = channel_monitors.iter().map(|(_, chanmon)| chanmon).collect(); let read_args = ChannelManagerReadArgs::new( @@ -1409,7 +1407,7 @@ fn build_with_store_internal( channel_monitor_references, ); let (_hash, channel_manager) = - <(BlockHash, ChannelManager)>::read(&mut reader, read_args).map_err(|e| { + <(BlockHash, ChannelManager)>::read(&mut &*reader, read_args).map_err(|e| { log_error!(logger, "Failed to read channel manager from store: {}", e); BuildError::ReadFailed })?; diff --git a/src/io/utils.rs b/src/io/utils.rs index d326827d7..15677d096 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -18,7 +18,6 @@ use bdk_chain::tx_graph::ChangeSet as BdkTxGraphChangeSet; use bdk_chain::ConfirmationBlockTime; use bdk_wallet::ChangeSet as BdkWalletChangeSet; use bitcoin::Network; -use lightning::io::Cursor; use lightning::ln::msgs::DecodeError; use lightning::routing::gossip::NetworkGraph; use lightning::routing::scoring::{ @@ -94,16 +93,14 @@ pub(crate) async fn read_network_graph( where L::Target: LdkLogger, { - let mut reader = Cursor::new( - KVStore::read( - &*kv_store, - NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_KEY, - ) - .await?, - ); - NetworkGraph::read(&mut reader, logger.clone()).map_err(|e| { + let reader = KVStore::read( + &*kv_store, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_KEY, + ) + .await?; + NetworkGraph::read(&mut &*reader, logger.clone()).map_err(|e| { log_error!(logger, "Failed to deserialize NetworkGraph: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize NetworkGraph") }) @@ -117,17 +114,15 @@ where L::Target: LdkLogger, { let params = ProbabilisticScoringDecayParameters::default(); - let mut reader = Cursor::new( - KVStore::read( - &*kv_store, - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - SCORER_PERSISTENCE_SECONDARY_NAMESPACE, - SCORER_PERSISTENCE_KEY, - ) - .await?, - ); + let reader = KVStore::read( + &*kv_store, + SCORER_PERSISTENCE_PRIMARY_NAMESPACE, + SCORER_PERSISTENCE_SECONDARY_NAMESPACE, + SCORER_PERSISTENCE_KEY, + ) + .await?; let args = (params, network_graph, logger.clone()); - ProbabilisticScorer::read(&mut reader, args).map_err(|e| { + ProbabilisticScorer::read(&mut &*reader, args).map_err(|e| { log_error!(logger, "Failed to deserialize scorer: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize Scorer") }) @@ -140,16 +135,14 @@ pub(crate) async fn read_external_pathfinding_scores_from_cache( where L::Target: LdkLogger, { - let mut reader = Cursor::new( - KVStore::read( - &*kv_store, - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - SCORER_PERSISTENCE_SECONDARY_NAMESPACE, - EXTERNAL_PATHFINDING_SCORES_CACHE_KEY, - ) - .await?, - ); - ChannelLiquidities::read(&mut reader).map_err(|e| { + let reader = KVStore::read( + &*kv_store, + SCORER_PERSISTENCE_PRIMARY_NAMESPACE, + SCORER_PERSISTENCE_SECONDARY_NAMESPACE, + EXTERNAL_PATHFINDING_SCORES_CACHE_KEY, + ) + .await?; + ChannelLiquidities::read(&mut &*reader).map_err(|e| { log_error!(logger, "Failed to deserialize scorer: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize Scorer") }) @@ -190,16 +183,14 @@ pub(crate) async fn read_event_queue( where L::Target: LdkLogger, { - let mut reader = Cursor::new( - KVStore::read( - &*kv_store, - EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, - EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, - EVENT_QUEUE_PERSISTENCE_KEY, - ) - .await?, - ); - EventQueue::read(&mut reader, (kv_store, logger.clone())).map_err(|e| { + let reader = KVStore::read( + &*kv_store, + EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, + EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, + EVENT_QUEUE_PERSISTENCE_KEY, + ) + .await?; + EventQueue::read(&mut &*reader, (kv_store, logger.clone())).map_err(|e| { log_error!(logger, "Failed to deserialize event queue: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize EventQueue") }) @@ -212,16 +203,14 @@ pub(crate) async fn read_peer_info( where L::Target: LdkLogger, { - let mut reader = Cursor::new( - KVStore::read( - &*kv_store, - PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - PEER_INFO_PERSISTENCE_KEY, - ) - .await?, - ); - PeerStore::read(&mut reader, (kv_store, logger.clone())).map_err(|e| { + let reader = KVStore::read( + &*kv_store, + PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + PEER_INFO_PERSISTENCE_KEY, + ) + .await?; + PeerStore::read(&mut &*reader, (kv_store, logger.clone())).map_err(|e| { log_error!(logger, "Failed to deserialize peer store: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize PeerStore") }) @@ -263,7 +252,7 @@ where while let Some(read_res) = set.join_next().await { // Exit early if we get an IO error. - let read_res = read_res + let reader = read_res .map_err(|e| { log_error!(logger, "Failed to read PaymentDetails: {}", e); set.abort_all(); @@ -288,8 +277,7 @@ where } // Handle result. - let mut reader = Cursor::new(read_res); - let payment = PaymentDetails::read(&mut reader).map_err(|e| { + let payment = PaymentDetails::read(&mut &*reader).map_err(|e| { log_error!(logger, "Failed to deserialize PaymentDetails: {}", e); std::io::Error::new( std::io::ErrorKind::InvalidData, @@ -311,15 +299,13 @@ pub(crate) async fn read_output_sweeper( chain_data_source: Arc, keys_manager: Arc, kv_store: Arc, logger: Arc, ) -> Result { - let mut reader = Cursor::new( - KVStore::read( - &*kv_store, - OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, - OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, - OUTPUT_SWEEPER_PERSISTENCE_KEY, - ) - .await?, - ); + let reader = KVStore::read( + &*kv_store, + OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, + OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, + OUTPUT_SWEEPER_PERSISTENCE_KEY, + ) + .await?; let args = ( broadcaster, fee_estimator, @@ -329,7 +315,7 @@ pub(crate) async fn read_output_sweeper( kv_store, logger.clone(), ); - let (_, sweeper) = <(_, Sweeper)>::read(&mut reader, args).map_err(|e| { + let (_, sweeper) = <(_, Sweeper)>::read(&mut &*reader, args).map_err(|e| { log_error!(logger, "Failed to deserialize OutputSweeper: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize OutputSweeper") })?; @@ -342,16 +328,14 @@ pub(crate) async fn read_node_metrics( where L::Target: LdkLogger, { - let mut reader = Cursor::new( - KVStore::read( - &*kv_store, - NODE_METRICS_PRIMARY_NAMESPACE, - NODE_METRICS_SECONDARY_NAMESPACE, - NODE_METRICS_KEY, - ) - .await?, - ); - NodeMetrics::read(&mut reader).map_err(|e| { + let reader = KVStore::read( + &*kv_store, + NODE_METRICS_PRIMARY_NAMESPACE, + NODE_METRICS_SECONDARY_NAMESPACE, + NODE_METRICS_KEY, + ) + .await?; + NodeMetrics::read(&mut &*reader).map_err(|e| { log_error!(logger, "Failed to deserialize NodeMetrics: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize NodeMetrics") }) @@ -490,7 +474,7 @@ macro_rules! impl_read_write_change_set_type { where L::Target: LdkLogger, { - let bytes = + let reader = match KVStoreSync::read(&*kv_store, $primary_namespace, $secondary_namespace, $key) { Ok(bytes) => bytes, @@ -511,9 +495,8 @@ macro_rules! impl_read_write_change_set_type { }, }; - let mut reader = Cursor::new(bytes); let res: Result, DecodeError> = - Readable::read(&mut reader); + Readable::read(&mut &*reader); match res { Ok(res) => Ok(Some(res.0)), Err(e) => { diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index eb439ed10..b4fdc770a 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -745,11 +745,10 @@ async fn determine_and_write_schema_version( })? .0; - let schema_version: VssSchemaVersion = Readable::read(&mut io::Cursor::new(decrypted)) - .map_err(|e| { - let msg = format!("Failed to decode schema version: {}", e); - Error::new(ErrorKind::Other, msg) - })?; + let schema_version: VssSchemaVersion = Readable::read(&mut &*decrypted).map_err(|e| { + let msg = format!("Failed to decode schema version: {}", e); + Error::new(ErrorKind::Other, msg) + })?; Ok(schema_version) } else { // The schema version wasn't present, this either means we're running for the first time *or* it's V0 pre-migration (predating writing of the schema version). diff --git a/src/payment/store.rs b/src/payment/store.rs index 184de2ea9..15e94190c 100644 --- a/src/payment/store.rs +++ b/src/payment/store.rs @@ -605,7 +605,6 @@ impl StorableObjectUpdate for PaymentDetailsUpdate { #[cfg(test)] mod tests { - use bitcoin::io::Cursor; use lightning::util::ser::Readable; use super::*; @@ -657,16 +656,12 @@ mod tests { let old_bolt11_encoded = old_bolt11_payment.encode(); assert_eq!( old_bolt11_payment, - OldPaymentDetails::read(&mut Cursor::new(old_bolt11_encoded.clone())).unwrap() + OldPaymentDetails::read(&mut &*old_bolt11_encoded.clone()).unwrap() ); - let bolt11_decoded = - PaymentDetails::read(&mut Cursor::new(old_bolt11_encoded)).unwrap(); + let bolt11_decoded = PaymentDetails::read(&mut &*old_bolt11_encoded).unwrap(); let bolt11_reencoded = bolt11_decoded.encode(); - assert_eq!( - bolt11_decoded, - PaymentDetails::read(&mut Cursor::new(bolt11_reencoded)).unwrap() - ); + assert_eq!(bolt11_decoded, PaymentDetails::read(&mut &*bolt11_reencoded).unwrap()); match bolt11_decoded.kind { PaymentKind::Bolt11 { hash: h, preimage: p, secret: s } => { @@ -700,15 +695,14 @@ mod tests { let old_bolt11_jit_encoded = old_bolt11_jit_payment.encode(); assert_eq!( old_bolt11_jit_payment, - OldPaymentDetails::read(&mut Cursor::new(old_bolt11_jit_encoded.clone())).unwrap() + OldPaymentDetails::read(&mut &*old_bolt11_jit_encoded.clone()).unwrap() ); - let bolt11_jit_decoded = - PaymentDetails::read(&mut Cursor::new(old_bolt11_jit_encoded)).unwrap(); + let bolt11_jit_decoded = PaymentDetails::read(&mut &*old_bolt11_jit_encoded).unwrap(); let bolt11_jit_reencoded = bolt11_jit_decoded.encode(); assert_eq!( bolt11_jit_decoded, - PaymentDetails::read(&mut Cursor::new(bolt11_jit_reencoded)).unwrap() + PaymentDetails::read(&mut &*bolt11_jit_reencoded).unwrap() ); match bolt11_jit_decoded.kind { @@ -746,15 +740,14 @@ mod tests { let old_spontaneous_encoded = old_spontaneous_payment.encode(); assert_eq!( old_spontaneous_payment, - OldPaymentDetails::read(&mut Cursor::new(old_spontaneous_encoded.clone())).unwrap() + OldPaymentDetails::read(&mut &*old_spontaneous_encoded.clone()).unwrap() ); - let spontaneous_decoded = - PaymentDetails::read(&mut Cursor::new(old_spontaneous_encoded)).unwrap(); + let spontaneous_decoded = PaymentDetails::read(&mut &*old_spontaneous_encoded).unwrap(); let spontaneous_reencoded = spontaneous_decoded.encode(); assert_eq!( spontaneous_decoded, - PaymentDetails::read(&mut Cursor::new(spontaneous_reencoded)).unwrap() + PaymentDetails::read(&mut &*spontaneous_reencoded).unwrap() ); match spontaneous_decoded.kind { diff --git a/src/scoring.rs b/src/scoring.rs index 6385f2f56..daa5725fa 100644 --- a/src/scoring.rs +++ b/src/scoring.rs @@ -1,4 +1,3 @@ -use std::io::Cursor; use std::sync::{Arc, Mutex, RwLock}; use std::time::{Duration, SystemTime}; @@ -74,15 +73,14 @@ async fn sync_external_scores( return; }, }; - let body = match response.bytes().await { + let reader = match response.bytes().await { Ok(bytes) => bytes, Err(e) => { log_error!(logger, "Failed to read external scores update: {}", e); return; }, }; - let mut reader = Cursor::new(body); - match ChannelLiquidities::read(&mut reader) { + match ChannelLiquidities::read(&mut &*reader) { Ok(liquidities) => { if let Err(e) = write_external_pathfinding_scores_to_cache( Arc::clone(&kv_store), From f1885be7a0ddc0403f1ac3b54da05e146e7e3e71 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 7 Jan 2026 10:19:21 +0100 Subject: [PATCH 08/75] Drop a bunch of unnecessary `Arc::clone`s Previously, we consistently handed around `Arc` references for most objects to avoid unnecessary refactoring work. This approach however introduced a bunch of unnecessary allocations through `Arc::clone`. Here we opt to rather use plain references in a bunch of places, reducing the usage of `Arc`s. --- src/builder.rs | 52 +++++++++++++++++++------------------------ src/chain/bitcoind.rs | 34 +++++++++------------------- src/chain/electrum.rs | 26 ++++++++-------------- src/chain/esplora.rs | 26 ++++++++-------------- src/chain/mod.rs | 4 ++-- src/io/utils.rs | 38 +++++++++++++------------------ src/lib.rs | 4 ++-- src/scoring.rs | 15 +++++-------- src/wallet/persist.rs | 27 +++++++--------------- 9 files changed, 85 insertions(+), 141 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index ce1bd7155..e7e2de286 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1053,7 +1053,7 @@ fn build_with_store_internal( // Initialize the status fields. let node_metrics = match runtime - .block_on(async { read_node_metrics(Arc::clone(&kv_store), Arc::clone(&logger)).await }) + .block_on(async { read_node_metrics(&*kv_store, Arc::clone(&logger)).await }) { Ok(metrics) => Arc::new(RwLock::new(metrics)), Err(e) => { @@ -1068,21 +1068,20 @@ fn build_with_store_internal( let tx_broadcaster = Arc::new(TransactionBroadcaster::new(Arc::clone(&logger))); let fee_estimator = Arc::new(OnchainFeeEstimator::new()); - let payment_store = match runtime - .block_on(async { read_payments(Arc::clone(&kv_store), Arc::clone(&logger)).await }) - { - Ok(payments) => Arc::new(PaymentStore::new( - payments, - PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE.to_string(), - PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE.to_string(), - Arc::clone(&kv_store), - Arc::clone(&logger), - )), - Err(e) => { - log_error!(logger, "Failed to read payment data from store: {}", e); - return Err(BuildError::ReadFailed); - }, - }; + let payment_store = + match runtime.block_on(async { read_payments(&*kv_store, Arc::clone(&logger)).await }) { + Ok(payments) => Arc::new(PaymentStore::new( + payments, + PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE.to_string(), + PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE.to_string(), + Arc::clone(&kv_store), + Arc::clone(&logger), + )), + Err(e) => { + log_error!(logger, "Failed to read payment data from store: {}", e); + return Err(BuildError::ReadFailed); + }, + }; let (chain_source, chain_tip_opt) = match chain_data_source_config { Some(ChainDataSourceConfig::Esplora { server_url, headers, sync_config }) => { @@ -1298,7 +1297,7 @@ fn build_with_store_internal( // Initialize the network graph, scorer, and router let network_graph = match runtime - .block_on(async { read_network_graph(Arc::clone(&kv_store), Arc::clone(&logger)).await }) + .block_on(async { read_network_graph(&*kv_store, Arc::clone(&logger)).await }) { Ok(graph) => Arc::new(graph), Err(e) => { @@ -1312,7 +1311,7 @@ fn build_with_store_internal( }; let local_scorer = match runtime.block_on(async { - read_scorer(Arc::clone(&kv_store), Arc::clone(&network_graph), Arc::clone(&logger)).await + read_scorer(&*kv_store, Arc::clone(&network_graph), Arc::clone(&logger)).await }) { Ok(scorer) => scorer, Err(e) => { @@ -1330,8 +1329,7 @@ fn build_with_store_internal( // Restore external pathfinding scores from cache if possible. match runtime.block_on(async { - read_external_pathfinding_scores_from_cache(Arc::clone(&kv_store), Arc::clone(&logger)) - .await + read_external_pathfinding_scores_from_cache(&*kv_store, Arc::clone(&logger)).await }) { Ok(external_scores) => { scorer.lock().unwrap().merge(external_scores, cur_time); @@ -1490,15 +1488,11 @@ fn build_with_store_internal( { let mut locked_node_metrics = node_metrics.write().unwrap(); locked_node_metrics.latest_rgs_snapshot_timestamp = None; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&kv_store), - Arc::clone(&logger), - ) - .map_err(|e| { - log_error!(logger, "Failed writing to store: {}", e); - BuildError::WriteFailed - })?; + write_node_metrics(&*locked_node_metrics, &*kv_store, Arc::clone(&logger)) + .map_err(|e| { + log_error!(logger, "Failed writing to store: {}", e); + BuildError::WriteFailed + })?; } p2p_source }, diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index 0c3b644ca..69255e080 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -205,14 +205,10 @@ impl BitcoindChainSource { unix_time_secs_opt; locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - ) - .unwrap_or_else(|e| { - log_error!(self.logger, "Failed to persist node metrics: {}", e); - }); + write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger) + .unwrap_or_else(|e| { + log_error!(self.logger, "Failed to persist node metrics: {}", e); + }); } break; }, @@ -420,11 +416,11 @@ impl BitcoindChainSource { *self.latest_chain_tip.write().unwrap() = Some(tip); periodically_archive_fully_resolved_monitors( - Arc::clone(&channel_manager), - chain_monitor, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - Arc::clone(&self.node_metrics), + &*channel_manager, + &*chain_monitor, + &*self.kv_store, + &*self.logger, + &*self.node_metrics, )?; }, Ok(_) => {}, @@ -469,11 +465,7 @@ impl BitcoindChainSource { locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - )?; + write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; Ok(()) } @@ -586,11 +578,7 @@ impl BitcoindChainSource { { let mut locked_node_metrics = self.node_metrics.write().unwrap(); locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - )?; + write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; } Ok(()) diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index 9e05dfaee..b520b2e11 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -149,8 +149,8 @@ impl ElectrumChainSource { unix_time_secs_opt; write_node_metrics( &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), + &*self.kv_store, + &*self.logger, )?; } Ok(()) @@ -239,19 +239,15 @@ impl ElectrumChainSource { { let mut locked_node_metrics = self.node_metrics.write().unwrap(); locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - )?; + write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; } periodically_archive_fully_resolved_monitors( - Arc::clone(&channel_manager), - Arc::clone(&chain_monitor), - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - Arc::clone(&self.node_metrics), + &*channel_manager, + &*chain_monitor, + &*self.kv_store, + &*self.logger, + &*self.node_metrics, )?; } @@ -284,11 +280,7 @@ impl ElectrumChainSource { { let mut locked_node_metrics = self.node_metrics.write().unwrap(); locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - )?; + write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; } Ok(()) diff --git a/src/chain/esplora.rs b/src/chain/esplora.rs index f6f313955..2acca4654 100644 --- a/src/chain/esplora.rs +++ b/src/chain/esplora.rs @@ -128,8 +128,8 @@ impl EsploraChainSource { locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; write_node_metrics( &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger) + &*self.kv_store, + &*self.logger )?; } Ok(()) @@ -259,19 +259,15 @@ impl EsploraChainSource { let mut locked_node_metrics = self.node_metrics.write().unwrap(); locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - )?; + write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; } periodically_archive_fully_resolved_monitors( - Arc::clone(&channel_manager), - Arc::clone(&chain_monitor), - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - Arc::clone(&self.node_metrics), + &*channel_manager, + &*chain_monitor, + &*self.kv_store, + &*self.logger, + &*self.node_metrics, )?; Ok(()) }, @@ -353,11 +349,7 @@ impl EsploraChainSource { { let mut locked_node_metrics = self.node_metrics.write().unwrap(); locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - )?; + write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; } Ok(()) diff --git a/src/chain/mod.rs b/src/chain/mod.rs index a73ce7418..1010f32b7 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -488,8 +488,8 @@ impl Filter for ChainSource { } fn periodically_archive_fully_resolved_monitors( - channel_manager: Arc, chain_monitor: Arc, - kv_store: Arc, logger: Arc, node_metrics: Arc>, + channel_manager: &ChannelManager, chain_monitor: &ChainMonitor, kv_store: &DynStore, + logger: &Logger, node_metrics: &RwLock, ) -> Result<(), Error> { let mut locked_node_metrics = node_metrics.write().unwrap(); let cur_height = channel_manager.current_best_block().height; diff --git a/src/io/utils.rs b/src/io/utils.rs index 15677d096..68ca7a61e 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -88,7 +88,7 @@ pub(crate) fn read_or_generate_seed_file( /// Read a previously persisted [`NetworkGraph`] from the store. pub(crate) async fn read_network_graph( - kv_store: Arc, logger: L, + kv_store: &DynStore, logger: L, ) -> Result, std::io::Error> where L::Target: LdkLogger, @@ -108,7 +108,7 @@ where /// Read a previously persisted [`ProbabilisticScorer`] from the store. pub(crate) async fn read_scorer>, L: Deref + Clone>( - kv_store: Arc, network_graph: G, logger: L, + kv_store: &DynStore, network_graph: G, logger: L, ) -> Result, std::io::Error> where L::Target: LdkLogger, @@ -130,7 +130,7 @@ where /// Read previously persisted external pathfinding scores from the cache. pub(crate) async fn read_external_pathfinding_scores_from_cache( - kv_store: Arc, logger: L, + kv_store: &DynStore, logger: L, ) -> Result where L::Target: LdkLogger, @@ -150,7 +150,7 @@ where /// Persist external pathfinding scores to the cache. pub(crate) async fn write_external_pathfinding_scores_to_cache( - kv_store: Arc, data: &ChannelLiquidities, logger: L, + kv_store: &DynStore, data: &ChannelLiquidities, logger: L, ) -> Result<(), Error> where L::Target: LdkLogger, @@ -218,7 +218,7 @@ where /// Read previously persisted payments information from the store. pub(crate) async fn read_payments( - kv_store: Arc, logger: L, + kv_store: &DynStore, logger: L, ) -> Result, std::io::Error> where L::Target: LdkLogger, @@ -323,7 +323,7 @@ pub(crate) async fn read_output_sweeper( } pub(crate) async fn read_node_metrics( - kv_store: Arc, logger: L, + kv_store: &DynStore, logger: L, ) -> Result where L::Target: LdkLogger, @@ -342,7 +342,7 @@ where } pub(crate) fn write_node_metrics( - node_metrics: &NodeMetrics, kv_store: Arc, logger: L, + node_metrics: &NodeMetrics, kv_store: &DynStore, logger: L, ) -> Result<(), Error> where L::Target: LdkLogger, @@ -469,7 +469,7 @@ macro_rules! impl_read_write_change_set_type { $key:expr ) => { pub(crate) fn $read_name( - kv_store: Arc, logger: L, + kv_store: &DynStore, logger: L, ) -> Result, std::io::Error> where L::Target: LdkLogger, @@ -510,7 +510,7 @@ macro_rules! impl_read_write_change_set_type { } pub(crate) fn $write_name( - value: &$change_set_type, kv_store: Arc, logger: L, + value: &$change_set_type, kv_store: &DynStore, logger: L, ) -> Result<(), std::io::Error> where L::Target: LdkLogger, @@ -588,41 +588,35 @@ impl_read_write_change_set_type!( // Reads the full BdkWalletChangeSet or returns default fields pub(crate) fn read_bdk_wallet_change_set( - kv_store: Arc, logger: Arc, + kv_store: &DynStore, logger: &Logger, ) -> Result, std::io::Error> { let mut change_set = BdkWalletChangeSet::default(); // We require a descriptor and return `None` to signal creation of a new wallet otherwise. - if let Some(descriptor) = - read_bdk_wallet_descriptor(Arc::clone(&kv_store), Arc::clone(&logger))? - { + if let Some(descriptor) = read_bdk_wallet_descriptor(kv_store, logger)? { change_set.descriptor = Some(descriptor); } else { return Ok(None); } // We require a change_descriptor and return `None` to signal creation of a new wallet otherwise. - if let Some(change_descriptor) = - read_bdk_wallet_change_descriptor(Arc::clone(&kv_store), Arc::clone(&logger))? - { + if let Some(change_descriptor) = read_bdk_wallet_change_descriptor(kv_store, logger)? { change_set.change_descriptor = Some(change_descriptor); } else { return Ok(None); } // We require a network and return `None` to signal creation of a new wallet otherwise. - if let Some(network) = read_bdk_wallet_network(Arc::clone(&kv_store), Arc::clone(&logger))? { + if let Some(network) = read_bdk_wallet_network(kv_store, logger)? { change_set.network = Some(network); } else { return Ok(None); } - read_bdk_wallet_local_chain(Arc::clone(&kv_store), Arc::clone(&logger))? + read_bdk_wallet_local_chain(&*kv_store, logger)? .map(|local_chain| change_set.local_chain = local_chain); - read_bdk_wallet_tx_graph(Arc::clone(&kv_store), Arc::clone(&logger))? - .map(|tx_graph| change_set.tx_graph = tx_graph); - read_bdk_wallet_indexer(Arc::clone(&kv_store), Arc::clone(&logger))? - .map(|indexer| change_set.indexer = indexer); + read_bdk_wallet_tx_graph(&*kv_store, logger)?.map(|tx_graph| change_set.tx_graph = tx_graph); + read_bdk_wallet_indexer(&*kv_store, logger)?.map(|indexer| change_set.indexer = indexer); Ok(Some(change_set)) } diff --git a/src/lib.rs b/src/lib.rs index b050fba57..e15b708ce 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -291,7 +291,7 @@ impl Node { { let mut locked_node_metrics = gossip_node_metrics.write().unwrap(); locked_node_metrics.latest_rgs_snapshot_timestamp = Some(updated_timestamp); - write_node_metrics(&*locked_node_metrics, Arc::clone(&gossip_sync_store), Arc::clone(&gossip_sync_logger)) + write_node_metrics(&*locked_node_metrics, &*gossip_sync_store, Arc::clone(&gossip_sync_logger)) .unwrap_or_else(|e| { log_error!(gossip_sync_logger, "Persistence failed: {}", e); }); @@ -507,7 +507,7 @@ impl Node { { let mut locked_node_metrics = bcast_node_metrics.write().unwrap(); locked_node_metrics.latest_node_announcement_broadcast_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, Arc::clone(&bcast_store), Arc::clone(&bcast_logger)) + write_node_metrics(&*locked_node_metrics, &*bcast_store, Arc::clone(&bcast_logger)) .unwrap_or_else(|e| { log_error!(bcast_logger, "Persistence failed: {}", e); }); diff --git a/src/scoring.rs b/src/scoring.rs index daa5725fa..2e0d226ff 100644 --- a/src/scoring.rs +++ b/src/scoring.rs @@ -82,12 +82,8 @@ async fn sync_external_scores( }; match ChannelLiquidities::read(&mut &*reader) { Ok(liquidities) => { - if let Err(e) = write_external_pathfinding_scores_to_cache( - Arc::clone(&kv_store), - &liquidities, - logger, - ) - .await + if let Err(e) = + write_external_pathfinding_scores_to_cache(&*kv_store, &liquidities, logger).await { log_error!(logger, "Failed to persist external scores to cache: {}", e); } @@ -98,10 +94,9 @@ async fn sync_external_scores( let mut locked_node_metrics = node_metrics.write().unwrap(); locked_node_metrics.latest_pathfinding_scores_sync_timestamp = Some(duration_since_epoch.as_secs()); - write_node_metrics(&*locked_node_metrics, Arc::clone(&kv_store), logger) - .unwrap_or_else(|e| { - log_error!(logger, "Persisting node metrics failed: {}", e); - }); + write_node_metrics(&*locked_node_metrics, &*kv_store, logger).unwrap_or_else(|e| { + log_error!(logger, "Persisting node metrics failed: {}", e); + }); log_trace!(logger, "External scores merged successfully"); }, Err(e) => { diff --git a/src/wallet/persist.rs b/src/wallet/persist.rs index 5c8668937..10be1fac0 100644 --- a/src/wallet/persist.rs +++ b/src/wallet/persist.rs @@ -38,10 +38,7 @@ impl WalletPersister for KVStoreWalletPersister { return Ok(latest_change_set.clone()); } - let change_set_opt = read_bdk_wallet_change_set( - Arc::clone(&persister.kv_store), - Arc::clone(&persister.logger), - )?; + let change_set_opt = read_bdk_wallet_change_set(&*persister.kv_store, &*persister.logger)?; let change_set = match change_set_opt { Some(persisted_change_set) => persisted_change_set, @@ -87,11 +84,7 @@ impl WalletPersister for KVStoreWalletPersister { )); } else { latest_change_set.descriptor = Some(descriptor.clone()); - write_bdk_wallet_descriptor( - &descriptor, - Arc::clone(&persister.kv_store), - Arc::clone(&persister.logger), - )?; + write_bdk_wallet_descriptor(&descriptor, &*persister.kv_store, &*persister.logger)?; } } @@ -112,8 +105,8 @@ impl WalletPersister for KVStoreWalletPersister { latest_change_set.change_descriptor = Some(change_descriptor.clone()); write_bdk_wallet_change_descriptor( &change_descriptor, - Arc::clone(&persister.kv_store), - Arc::clone(&persister.logger), + &*persister.kv_store, + &*persister.logger, )?; } } @@ -131,11 +124,7 @@ impl WalletPersister for KVStoreWalletPersister { )); } else { latest_change_set.network = Some(network); - write_bdk_wallet_network( - &network, - Arc::clone(&persister.kv_store), - Arc::clone(&persister.logger), - )?; + write_bdk_wallet_network(&network, &*persister.kv_store, &*persister.logger)?; } } @@ -157,7 +146,7 @@ impl WalletPersister for KVStoreWalletPersister { latest_change_set.indexer.merge(change_set.indexer.clone()); write_bdk_wallet_indexer( &latest_change_set.indexer, - Arc::clone(&persister.kv_store), + &*persister.kv_store, Arc::clone(&persister.logger), )?; } @@ -166,7 +155,7 @@ impl WalletPersister for KVStoreWalletPersister { latest_change_set.tx_graph.merge(change_set.tx_graph.clone()); write_bdk_wallet_tx_graph( &latest_change_set.tx_graph, - Arc::clone(&persister.kv_store), + &*persister.kv_store, Arc::clone(&persister.logger), )?; } @@ -175,7 +164,7 @@ impl WalletPersister for KVStoreWalletPersister { latest_change_set.local_chain.merge(change_set.local_chain.clone()); write_bdk_wallet_local_chain( &latest_change_set.local_chain, - Arc::clone(&persister.kv_store), + &*persister.kv_store, Arc::clone(&persister.logger), )?; } From c724a893ece9a21b8332aab416ef3288b2488c2a Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 7 Jan 2026 10:56:33 +0100 Subject: [PATCH 09/75] Add test for payment persistence after node restart Add integration test that verifies 200 payments are correctly persisted and retrievable via `list_payments` after restarting a node. Co-Authored-By: Claude AI --- tests/integration_tests_rust.rs | 121 +++++++++++++++++++++++++++++++- 1 file changed, 120 insertions(+), 1 deletion(-) diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 655b5fd94..4d2a17422 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -23,7 +23,8 @@ use common::{ expect_splice_pending_event, generate_blocks_and_wait, open_channel, open_channel_push_amt, premine_and_distribute_funds, premine_blocks, prepare_rbf, random_config, random_listening_addresses, setup_bitcoind_and_electrsd, setup_builder, setup_node, - setup_node_for_async_payments, setup_two_nodes, wait_for_tx, TestChainSource, TestSyncStore, + setup_node_for_async_payments, setup_two_nodes, wait_for_tx, TestChainSource, TestStoreType, + TestSyncStore, }; use ldk_node::config::{AsyncPaymentsRole, EsploraSyncConfig}; use ldk_node::liquidity::LSPS2ServiceConfig; @@ -2317,3 +2318,121 @@ async fn lsps2_lsp_trusts_client_but_client_does_not_claim() { Some(6) ); } + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn payment_persistence_after_restart() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + + // Setup nodes manually so we can restart node_a with the same config + println!("== Node A =="); + let mut config_a = random_config(true); + config_a.store_type = TestStoreType::Sqlite; + + let num_payments = 200; + let payment_amount_msat = 1_000_000; // 1000 sats per payment + + { + let node_a = setup_node(&chain_source, config_a.clone()); + + println!("\n== Node B =="); + let config_b = random_config(true); + let node_b = setup_node(&chain_source, config_b); + + let addr_a = node_a.onchain_payment().new_address().unwrap(); + let addr_b = node_b.onchain_payment().new_address().unwrap(); + + // Premine sufficient funds for a large channel and many payments + let premine_amount_sat = 10_000_000; + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![addr_a, addr_b], + Amount::from_sat(premine_amount_sat), + ) + .await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + assert_eq!(node_a.list_balances().spendable_onchain_balance_sats, premine_amount_sat); + assert_eq!(node_b.list_balances().spendable_onchain_balance_sats, premine_amount_sat); + + // Open a large channel from node_a to node_b + let channel_amount_sat = 5_000_000; + open_channel(&node_a, &node_b, channel_amount_sat, true, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + // Send 200 payments from node_a to node_b + println!("\nSending {} payments from A to B...", num_payments); + let invoice_description = + Bolt11InvoiceDescription::Direct(Description::new(String::from("test")).unwrap()); + + for i in 0..num_payments { + let invoice = node_b + .bolt11_payment() + .receive(payment_amount_msat, &invoice_description.clone().into(), 3600) + .unwrap(); + let payment_id = node_a.bolt11_payment().send(&invoice, None).unwrap(); + expect_event!(node_a, PaymentSuccessful); + expect_event!(node_b, PaymentReceived); + + if (i + 1) % 50 == 0 { + println!("Completed {} payments", i + 1); + } + + // Verify payment succeeded + assert_eq!(node_a.payment(&payment_id).unwrap().status, PaymentStatus::Succeeded); + } + println!("All {} payments completed successfully", num_payments); + + // Verify node_a has 200 outbound Bolt11 payments before shutdown + let outbound_payments_before = node_a.list_payments_with_filter(|p| { + p.direction == PaymentDirection::Outbound + && matches!(p.kind, PaymentKind::Bolt11 { .. }) + }); + assert_eq!(outbound_payments_before.len(), num_payments); + + // Shut down both nodes + println!("\nShutting down nodes..."); + node_a.stop().unwrap(); + node_b.stop().unwrap(); + } + + // Restart node_a with the same config + println!("\nRestarting node A..."); + let restarted_node_a = setup_node(&chain_source, config_a); + + // Assert all 200 payments are still in the store + let outbound_payments_after = restarted_node_a.list_payments_with_filter(|p| { + p.direction == PaymentDirection::Outbound && matches!(p.kind, PaymentKind::Bolt11 { .. }) + }); + assert_eq!( + outbound_payments_after.len(), + num_payments, + "Expected {} payments after restart, found {}", + num_payments, + outbound_payments_after.len() + ); + + // Verify all payments have the correct status + for payment in &outbound_payments_after { + assert_eq!( + payment.status, + PaymentStatus::Succeeded, + "Payment {:?} has unexpected status {:?}", + payment.id, + payment.status + ); + assert_eq!(payment.amount_msat, Some(payment_amount_msat)); + } + + println!( + "Successfully verified {} payments persisted after restart", + outbound_payments_after.len() + ); + + restarted_node_a.stop().unwrap(); +} From d8d68613a85c248eceae514106719e4c9285333c Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 8 Jan 2026 13:12:05 +0100 Subject: [PATCH 10/75] Bump LDK dependency to `1c730c8a16e28cc8e0c4817717ee63c97abcf4b0` .. we bump to the most recent `rust-lightning` commit and fix some minor test code changes. --- Cargo.toml | 26 +++++++++++++------------- src/io/test_utils.rs | 12 ++++++------ 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 1f35d142e..cc2a4b194 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,17 +39,17 @@ default = [] #lightning-liquidity = { version = "0.2.0", features = ["std"] } #lightning-macros = { version = "0.2.0" } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc", features = ["std"] } -lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc" } -lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc", features = ["std"] } -lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc" } -lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc", features = ["tokio"] } -lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc" } -lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc" } -lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } -lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc", features = ["std"] } -lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc" } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0", features = ["std"] } +lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0" } +lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0", features = ["std"] } +lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0", features = ["tokio"] } +lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0" } +lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0" } +lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } +lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0", features = ["std"] } +lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0" } bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} @@ -78,13 +78,13 @@ log = { version = "0.4.22", default-features = false, features = ["std"]} vss-client = { package = "vss-client-ng", version = "0.4" } prost = { version = "0.11.6", default-features = false} #bitcoin-payment-instructions = { version = "0.6" } -bitcoin-payment-instructions = { git = "https://github.com/tnull/bitcoin-payment-instructions", branch = "2025-12-ldk-node-base" } +bitcoin-payment-instructions = { git = "https://github.com/tnull/bitcoin-payment-instructions", rev = "a9ad849a0eb7b155a688d713de6d9010cb48f073" } [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc", features = ["std", "_test_utils"] } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" criterion = { version = "0.7.0", features = ["async_tokio"] } diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index 6eb04df3f..cbcd90d29 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -13,15 +13,15 @@ use std::sync::Mutex; use lightning::events::ClosureReason; use lightning::ln::functional_test_utils::{ - check_closed_event, connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, - create_dummy_block, create_network, create_node_cfgs, create_node_chanmgrs, send_payment, - TestChanMonCfg, + check_added_monitors, check_closed_event, connect_block, create_announced_chan_between_nodes, + create_chanmon_cfgs, create_dummy_block, create_network, create_node_cfgs, + create_node_chanmgrs, send_payment, TestChanMonCfg, }; use lightning::util::persist::{ KVStore, KVStoreSync, MonitorUpdatingPersister, KVSTORE_NAMESPACE_KEY_MAX_LEN, }; use lightning::util::test_utils; -use lightning::{check_added_monitors, check_closed_broadcast, io}; +use lightning::{check_closed_broadcast, io}; use rand::distr::Alphanumeric; use rand::{rng, Rng}; @@ -333,7 +333,7 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { 100000, ); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let node_txn = nodes[0].tx_broadcaster.txn_broadcast(); assert_eq!(node_txn.len(), 1); @@ -345,7 +345,7 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { let reason = ClosureReason::CommitmentTxConfirmed; let node_id_0 = nodes[0].node.get_our_node_id(); check_closed_event(&nodes[1], 1, reason, &[node_id_0], 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Make sure everything is persisted as expected after close. check_persisted_data!(persister_0_max_pending_updates * 2 * EXPECTED_UPDATES_PER_PAYMENT + 1); From dd55d4758d8ea0b71312de574fd42fa25fc5f5d0 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 8 Jan 2026 13:15:47 +0100 Subject: [PATCH 11/75] Stop archiving `ChannelMonitor`s .. as this is now done by the background processor. --- bindings/ldk_node.udl | 1 - src/chain/bitcoind.rs | 10 +--------- src/chain/electrum.rs | 10 +--------- src/chain/esplora.rs | 10 +--------- src/chain/mod.rs | 22 +--------------------- src/config.rs | 3 --- src/lib.rs | 12 ++---------- 7 files changed, 6 insertions(+), 62 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 276ffcfba..b59a38b04 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -359,7 +359,6 @@ dictionary NodeStatus { u64? latest_rgs_snapshot_timestamp; u64? latest_pathfinding_scores_sync_timestamp; u64? latest_node_announcement_broadcast_timestamp; - u32? latest_channel_monitor_archival_height; }; dictionary BestBlock { diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index 69255e080..b0cf69395 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -29,7 +29,7 @@ use lightning_block_sync::{ }; use serde::Serialize; -use super::{periodically_archive_fully_resolved_monitors, WalletSyncStatus}; +use super::WalletSyncStatus; use crate::config::{ BitcoindRestClientConfig, Config, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, }; @@ -414,14 +414,6 @@ impl BitcoindChainSource { now.elapsed().unwrap().as_millis() ); *self.latest_chain_tip.write().unwrap() = Some(tip); - - periodically_archive_fully_resolved_monitors( - &*channel_manager, - &*chain_monitor, - &*self.kv_store, - &*self.logger, - &*self.node_metrics, - )?; }, Ok(_) => {}, Err(e) => { diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index b520b2e11..df96dfddb 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -23,7 +23,7 @@ use lightning::chain::{Confirm, Filter, WatchedOutput}; use lightning::util::ser::Writeable; use lightning_transaction_sync::ElectrumSyncClient; -use super::{periodically_archive_fully_resolved_monitors, WalletSyncStatus}; +use super::WalletSyncStatus; use crate::config::{ Config, ElectrumSyncConfig, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, @@ -241,14 +241,6 @@ impl ElectrumChainSource { locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; } - - periodically_archive_fully_resolved_monitors( - &*channel_manager, - &*chain_monitor, - &*self.kv_store, - &*self.logger, - &*self.node_metrics, - )?; } res diff --git a/src/chain/esplora.rs b/src/chain/esplora.rs index 2acca4654..4d9f051cf 100644 --- a/src/chain/esplora.rs +++ b/src/chain/esplora.rs @@ -16,7 +16,7 @@ use lightning::chain::{Confirm, Filter, WatchedOutput}; use lightning::util::ser::Writeable; use lightning_transaction_sync::EsploraSyncClient; -use super::{periodically_archive_fully_resolved_monitors, WalletSyncStatus}; +use super::WalletSyncStatus; use crate::config::{ Config, EsploraSyncConfig, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS, @@ -261,14 +261,6 @@ impl EsploraChainSource { unix_time_secs_opt; write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; } - - periodically_archive_fully_resolved_monitors( - &*channel_manager, - &*chain_monitor, - &*self.kv_store, - &*self.logger, - &*self.node_metrics, - )?; Ok(()) }, Err(e) => { diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 1010f32b7..afd502363 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -21,10 +21,9 @@ use crate::chain::electrum::ElectrumChainSource; use crate::chain::esplora::EsploraChainSource; use crate::config::{ BackgroundSyncConfig, BitcoindRestClientConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, - RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, WALLET_SYNC_INTERVAL_MINIMUM_SECS, + WALLET_SYNC_INTERVAL_MINIMUM_SECS, }; use crate::fee_estimator::OnchainFeeEstimator; -use crate::io::utils::write_node_metrics; use crate::logger::{log_debug, log_info, log_trace, LdkLogger, Logger}; use crate::runtime::Runtime; use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; @@ -486,22 +485,3 @@ impl Filter for ChainSource { } } } - -fn periodically_archive_fully_resolved_monitors( - channel_manager: &ChannelManager, chain_monitor: &ChainMonitor, kv_store: &DynStore, - logger: &Logger, node_metrics: &RwLock, -) -> Result<(), Error> { - let mut locked_node_metrics = node_metrics.write().unwrap(); - let cur_height = channel_manager.current_best_block().height; - let should_archive = locked_node_metrics - .latest_channel_monitor_archival_height - .as_ref() - .map_or(true, |h| cur_height >= h + RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL); - - if should_archive { - chain_monitor.archive_fully_resolved_channel_monitors(); - locked_node_metrics.latest_channel_monitor_archival_height = Some(cur_height); - write_node_metrics(&*locked_node_metrics, kv_store, logger)?; - } - Ok(()) -} diff --git a/src/config.rs b/src/config.rs index 510bcc875..329f406d8 100644 --- a/src/config.rs +++ b/src/config.rs @@ -54,9 +54,6 @@ pub(crate) const BDK_CLIENT_CONCURRENCY: usize = 4; // The timeout after which we abandon retrying failed payments. pub(crate) const LDK_PAYMENT_RETRY_TIMEOUT: Duration = Duration::from_secs(10); -// The interval (in block height) after which we retry archiving fully resolved channel monitors. -pub(crate) const RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL: u32 = 6; - // The time in-between peer reconnection attempts. pub(crate) const PEER_RECONNECTION_INTERVAL: Duration = Duration::from_secs(60); diff --git a/src/lib.rs b/src/lib.rs index e15b708ce..d9bca4551 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -728,8 +728,6 @@ impl Node { locked_node_metrics.latest_pathfinding_scores_sync_timestamp; let latest_node_announcement_broadcast_timestamp = locked_node_metrics.latest_node_announcement_broadcast_timestamp; - let latest_channel_monitor_archival_height = - locked_node_metrics.latest_channel_monitor_archival_height; NodeStatus { is_running, @@ -740,7 +738,6 @@ impl Node { latest_rgs_snapshot_timestamp, latest_pathfinding_scores_sync_timestamp, latest_node_announcement_broadcast_timestamp, - latest_channel_monitor_archival_height, } } @@ -1806,10 +1803,6 @@ pub struct NodeStatus { /// /// Will be `None` if we have no public channels or we haven't broadcasted yet. pub latest_node_announcement_broadcast_timestamp: Option, - /// The block height when we last archived closed channel monitor data. - /// - /// Will be `None` if we haven't archived any monitors of closed channels yet. - pub latest_channel_monitor_archival_height: Option, } /// Status fields that are persisted across restarts. @@ -1821,7 +1814,6 @@ pub(crate) struct NodeMetrics { latest_rgs_snapshot_timestamp: Option, latest_pathfinding_scores_sync_timestamp: Option, latest_node_announcement_broadcast_timestamp: Option, - latest_channel_monitor_archival_height: Option, } impl Default for NodeMetrics { @@ -1833,7 +1825,6 @@ impl Default for NodeMetrics { latest_rgs_snapshot_timestamp: None, latest_pathfinding_scores_sync_timestamp: None, latest_node_announcement_broadcast_timestamp: None, - latest_channel_monitor_archival_height: None, } } } @@ -1845,7 +1836,8 @@ impl_writeable_tlv_based!(NodeMetrics, { (4, latest_fee_rate_cache_update_timestamp, option), (6, latest_rgs_snapshot_timestamp, option), (8, latest_node_announcement_broadcast_timestamp, option), - (10, latest_channel_monitor_archival_height, option), + // 10 used to be latest_channel_monitor_archival_height + (10, _legacy_latest_channel_monitor_archival_height, (legacy, Option, |_: &NodeMetrics| None::> )), }); pub(crate) fn total_anchor_channels_reserve_sats( From f3bbd4a4882d4ecd370bf94ae0d0ff3de245b049 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 8 Jan 2026 12:51:28 +0000 Subject: [PATCH 12/75] Avoid resilvering `ChannelMonitor`s on startup LDK 0.2 added a method to load `ChannelMonitor`s on startup without resilvering them, avoiding the startup latency of persistence for each `ChannelMonitor`. Here we start using it. --- src/builder.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index e7e2de286..187f780d2 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1438,8 +1438,8 @@ fn build_with_store_internal( // Give ChannelMonitors to ChainMonitor for (_blockhash, channel_monitor) in channel_monitors.into_iter() { let channel_id = channel_monitor.channel_id(); - chain_monitor.watch_channel(channel_id, channel_monitor).map_err(|e| { - log_error!(logger, "Failed to watch channel monitor: {:?}", e); + chain_monitor.load_existing_monitor(channel_id, channel_monitor).map_err(|e| { + log_error!(logger, "Failed to load channel monitor: {:?}", e); BuildError::InvalidChannelMonitor })?; } From d8e33c15baf82a4ce46754745f0a33582e1d54df Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 30 Dec 2025 14:09:27 +0000 Subject: [PATCH 13/75] Fix circular `Arc` reference in `LiquiditySource` `LiquiditySource` takes a reference to our `PeerManager` but the `PeerManager` holds an indirect reference to the `LiquiditySource`. As a result, after our `Node` instance is `stop`ped and the `Node` `drop`ped, much of the node's memory will stick around, including the `NetworkGraph`. Here we fix this issue by using `Weak` pointers, though note that there is another issue caused by LDK's gossip validation API. --- src/builder.rs | 2 +- src/liquidity.rs | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 187f780d2..ee8931127 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1603,7 +1603,7 @@ fn build_with_store_internal( peer_manager_clone.process_events(); })); - liquidity_source.as_ref().map(|l| l.set_peer_manager(Arc::clone(&peer_manager))); + liquidity_source.as_ref().map(|l| l.set_peer_manager(Arc::downgrade(&peer_manager))); gossip_source.set_gossip_verifier( Arc::clone(&chain_source), diff --git a/src/liquidity.rs b/src/liquidity.rs index 74e6098dd..2151110b6 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -9,7 +9,7 @@ use std::collections::HashMap; use std::ops::Deref; -use std::sync::{Arc, Mutex, RwLock}; +use std::sync::{Arc, Mutex, RwLock, Weak}; use std::time::Duration; use bitcoin::hashes::{sha256, Hash}; @@ -291,7 +291,7 @@ where lsps2_service: Option, wallet: Arc, channel_manager: Arc, - peer_manager: RwLock>>, + peer_manager: RwLock>>, keys_manager: Arc, liquidity_manager: Arc, config: Arc, @@ -302,7 +302,7 @@ impl LiquiditySource where L::Target: LdkLogger, { - pub(crate) fn set_peer_manager(&self, peer_manager: Arc) { + pub(crate) fn set_peer_manager(&self, peer_manager: Weak) { *self.peer_manager.write().unwrap() = Some(peer_manager); } @@ -715,8 +715,8 @@ where return; }; - let init_features = if let Some(peer_manager) = - self.peer_manager.read().unwrap().as_ref() + let init_features = if let Some(Some(peer_manager)) = + self.peer_manager.read().unwrap().as_ref().map(|weak| weak.upgrade()) { // Fail if we're not connected to the prospective channel partner. if let Some(peer) = peer_manager.peer_by_node_id(&their_network_key) { From a7d2b6a1869d25c88ec08db3059f635ae537769f Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 8 Jan 2026 14:47:22 +0000 Subject: [PATCH 14/75] Fix circular `Arc` reference in HRN resolver action In added logic to use the HRN resolver from `bitcoin-payment-instructions`, we created a circular `Arc` reference - the `LDKOnionMessageDNSSECHrnResolver` is used as a handler for the `OnionMessenger` but we also set a post-queue-action which holds a reference to the `PeerManager`. As a result, after our `Node` instance is `stop`ped and the `Node` `drop`ped, much of the node's memory will stick around, including the `NetworkGraph`. Here we fix this issue by using `Weak` pointers. --- src/builder.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index ee8931127..cea3d09f5 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1597,10 +1597,11 @@ fn build_with_store_internal( Arc::clone(&keys_manager), )); - let peer_manager_clone = Arc::clone(&peer_manager); - + let peer_manager_clone = Arc::downgrade(&peer_manager); hrn_resolver.register_post_queue_action(Box::new(move || { - peer_manager_clone.process_events(); + if let Some(upgraded_pointer) = peer_manager_clone.upgrade() { + upgraded_pointer.process_events(); + } })); liquidity_source.as_ref().map(|l| l.set_peer_manager(Arc::downgrade(&peer_manager))); From 809a2270efbbfb017e333d03e610086be5152627 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 8 Jan 2026 14:21:10 +0000 Subject: [PATCH 15/75] Update LDK, fixing a circular `Arc` reference in gossip validation LDK's gossip validation API basically forced us to have a circular `Arc` reference, leading to memory leaks after `drop`ping an instance of `Node`. This is fixed upstream in LDK PR #4294 which we update to here. --- Cargo.toml | 26 +++++++++++++------------- src/builder.rs | 16 +++++++--------- src/gossip.rs | 38 ++++++++++---------------------------- src/types.rs | 2 +- 4 files changed, 31 insertions(+), 51 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index cc2a4b194..431d6b8d8 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,17 +39,17 @@ default = [] #lightning-liquidity = { version = "0.2.0", features = ["std"] } #lightning-macros = { version = "0.2.0" } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0", features = ["std"] } -lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0" } -lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0", features = ["std"] } -lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0" } -lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0", features = ["tokio"] } -lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0" } -lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0" } -lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } -lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0", features = ["std"] } -lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0" } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891", features = ["std"] } +lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891" } +lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891", features = ["std"] } +lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891", features = ["tokio"] } +lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891" } +lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891" } +lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } +lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891", features = ["std"] } +lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891" } bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} @@ -78,13 +78,13 @@ log = { version = "0.4.22", default-features = false, features = ["std"]} vss-client = { package = "vss-client-ng", version = "0.4" } prost = { version = "0.11.6", default-features = false} #bitcoin-payment-instructions = { version = "0.6" } -bitcoin-payment-instructions = { git = "https://github.com/tnull/bitcoin-payment-instructions", rev = "a9ad849a0eb7b155a688d713de6d9010cb48f073" } +bitcoin-payment-instructions = { git = "https://github.com/tnull/bitcoin-payment-instructions", rev = "fdca6c62f2fe2c53427d3e51e322a49aa7323ee2" } [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "1c730c8a16e28cc8e0c4817717ee63c97abcf4b0", features = ["std", "_test_utils"] } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" criterion = { version = "0.7.0", features = ["async_tokio"] } diff --git a/src/builder.rs b/src/builder.rs index cea3d09f5..ca8e71d03 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -20,7 +20,7 @@ use bitcoin::key::Secp256k1; use bitcoin::secp256k1::PublicKey; use bitcoin::{BlockHash, Network}; use bitcoin_payment_instructions::onion_message_resolver::LDKOnionMessageDNSSECHrnResolver; -use lightning::chain::{chainmonitor, BestBlock, Watch}; +use lightning::chain::{chainmonitor, BestBlock}; use lightning::ln::channelmanager::{self, ChainParameters, ChannelManagerReadArgs}; use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress}; use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler}; @@ -1481,8 +1481,12 @@ fn build_with_store_internal( let gossip_source = match gossip_source_config { GossipSourceConfig::P2PNetwork => { - let p2p_source = - Arc::new(GossipSource::new_p2p(Arc::clone(&network_graph), Arc::clone(&logger))); + let p2p_source = Arc::new(GossipSource::new_p2p( + Arc::clone(&network_graph), + Arc::clone(&chain_source), + Arc::clone(&runtime), + Arc::clone(&logger), + )); // Reset the RGS sync timestamp in case we somehow switch gossip sources { @@ -1606,12 +1610,6 @@ fn build_with_store_internal( liquidity_source.as_ref().map(|l| l.set_peer_manager(Arc::downgrade(&peer_manager))); - gossip_source.set_gossip_verifier( - Arc::clone(&chain_source), - Arc::clone(&peer_manager), - Arc::clone(&runtime), - ); - let connection_manager = Arc::new(ConnectionManager::new(Arc::clone(&peer_manager), Arc::clone(&logger))); diff --git a/src/gossip.rs b/src/gossip.rs index 563d9e1ea..2b524d9ae 100644 --- a/src/gossip.rs +++ b/src/gossip.rs @@ -17,7 +17,7 @@ use crate::chain::ChainSource; use crate::config::RGS_SYNC_TIMEOUT_SECS; use crate::logger::{log_trace, LdkLogger, Logger}; use crate::runtime::Runtime; -use crate::types::{GossipSync, Graph, P2PGossipSync, PeerManager, RapidGossipSync, UtxoLookup}; +use crate::types::{GossipSync, Graph, P2PGossipSync, RapidGossipSync}; use crate::Error; pub(crate) enum GossipSource { @@ -33,12 +33,15 @@ pub(crate) enum GossipSource { } impl GossipSource { - pub fn new_p2p(network_graph: Arc, logger: Arc) -> Self { - let gossip_sync = Arc::new(P2PGossipSync::new( - network_graph, - None::>, - Arc::clone(&logger), - )); + pub fn new_p2p( + network_graph: Arc, chain_source: Arc, runtime: Arc, + logger: Arc, + ) -> Self { + let verifier = chain_source.as_utxo_source().map(|utxo_source| { + Arc::new(GossipVerifier::new(Arc::new(utxo_source), RuntimeSpawner::new(runtime))) + }); + + let gossip_sync = Arc::new(P2PGossipSync::new(network_graph, verifier, logger)); Self::P2PNetwork { gossip_sync } } @@ -62,27 +65,6 @@ impl GossipSource { } } - pub(crate) fn set_gossip_verifier( - &self, chain_source: Arc, peer_manager: Arc, - runtime: Arc, - ) { - match self { - Self::P2PNetwork { gossip_sync } => { - if let Some(utxo_source) = chain_source.as_utxo_source() { - let spawner = RuntimeSpawner::new(Arc::clone(&runtime)); - let gossip_verifier = Arc::new(GossipVerifier::new( - Arc::new(utxo_source), - spawner, - Arc::clone(gossip_sync), - peer_manager, - )); - gossip_sync.add_utxo_lookup(Some(gossip_verifier)); - } - }, - _ => (), - } - } - pub async fn update_rgs_snapshot(&self) -> Result { match self { Self::P2PNetwork { gossip_sync: _, .. } => Ok(0), diff --git a/src/types.rs b/src/types.rs index 5e9cd74c9..2b7d3829a 100644 --- a/src/types.rs +++ b/src/types.rs @@ -254,7 +254,7 @@ pub(crate) type Scorer = CombinedScorer, Arc>; pub(crate) type Graph = gossip::NetworkGraph>; -pub(crate) type UtxoLookup = GossipVerifier, Arc>; +pub(crate) type UtxoLookup = GossipVerifier>; pub(crate) type P2PGossipSync = lightning::routing::gossip::P2PGossipSync, Arc, Arc>; From eb832348a6c0ba94b2ebcdc139ed9022b67a62ca Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 8 Jan 2026 21:08:56 +0100 Subject: [PATCH 16/75] Fix backwards compatibility of `NodeMetrics` reads In commit dd55d4758d8ea0b71312de574fd42fa25fc5f5d0 we started ignoring the legacy `latest_channel_monitor_archival_height` field of `NodeMetrics`. However, we erroneously started reading it as `Option`, though, given it's an optional field, it should have been read as a plain `u32` that might or might not be present. Here we fix this error. --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index d9bca4551..1b19864dd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1837,7 +1837,7 @@ impl_writeable_tlv_based!(NodeMetrics, { (6, latest_rgs_snapshot_timestamp, option), (8, latest_node_announcement_broadcast_timestamp, option), // 10 used to be latest_channel_monitor_archival_height - (10, _legacy_latest_channel_monitor_archival_height, (legacy, Option, |_: &NodeMetrics| None::> )), + (10, _legacy_latest_channel_monitor_archival_height, (legacy, u32, |_: &NodeMetrics| None::> )), }); pub(crate) fn total_anchor_channels_reserve_sats( From 06a8108aff12196210728b0078453641a9aa85aa Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 8 Jan 2026 21:16:18 +0100 Subject: [PATCH 17/75] Add backwards compatibility test for non-VSS We previously added a test asserting backwards compatibility for nodes reinitializing from a VSS backend. However, given VSS tests are only continously run in CI we here add the same test using the default SQLite backend, ensuring backwards compatibility breakage is also checked when running tests locally. --- Cargo.toml | 4 +-- tests/integration_tests_rust.rs | 59 +++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index cc2a4b194..4f8c0ed7b 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -88,6 +88,7 @@ lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = " proptest = "1.0.0" regex = "1.5.6" criterion = { version = "0.7.0", features = ["async_tokio"] } +ldk-node-062 = { package = "ldk-node", version = "=0.6.2" } [target.'cfg(not(no_download))'.dev-dependencies] electrsd = { version = "0.36.1", default-features = false, features = ["legacy", "esplora_a33e97e1", "corepc-node_27_2"] } @@ -103,9 +104,6 @@ clightningrpc = { version = "0.3.0-beta.8", default-features = false } lnd_grpc_rust = { version = "2.10.0", default-features = false } tokio = { version = "1.37", features = ["fs"] } -[target.'cfg(vss_test)'.dev-dependencies] -ldk-node-062 = { package = "ldk-node", version = "=0.6.2" } - [build-dependencies] uniffi = { version = "0.28.3", features = ["build"], optional = true } diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 4d2a17422..4b82d1f4f 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -27,6 +27,7 @@ use common::{ TestSyncStore, }; use ldk_node::config::{AsyncPaymentsRole, EsploraSyncConfig}; +use ldk_node::entropy::NodeEntropy; use ldk_node::liquidity::LSPS2ServiceConfig; use ldk_node::payment::{ ConfirmationStatus, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, @@ -2436,3 +2437,61 @@ async fn payment_persistence_after_restart() { restarted_node_a.stop().unwrap(); } + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn persistence_backwards_compatibility() { + let (bitcoind, electrsd) = common::setup_bitcoind_and_electrsd(); + let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); + + let storage_path = common::random_storage_path().to_str().unwrap().to_owned(); + let seed_bytes = [42u8; 64]; + let node_entropy = NodeEntropy::from_seed_bytes(seed_bytes); + + // Setup a v0.6.2 `Node` + let (old_balance, old_node_id) = { + let mut builder_old = ldk_node_062::Builder::new(); + builder_old.set_network(bitcoin::Network::Regtest); + builder_old.set_storage_dir_path(storage_path.clone()); + builder_old.set_entropy_seed_bytes(seed_bytes); + builder_old.set_chain_source_esplora(esplora_url.clone(), None); + let node_old = builder_old.build().unwrap(); + + node_old.start().unwrap(); + let addr_old = node_old.onchain_payment().new_address().unwrap(); + common::premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![addr_old], + bitcoin::Amount::from_sat(100_000), + ) + .await; + node_old.sync_wallets().unwrap(); + + let balance = node_old.list_balances().spendable_onchain_balance_sats; + assert!(balance > 0); + let node_id = node_old.node_id(); + + node_old.stop().unwrap(); + + (balance, node_id) + }; + + // Now ensure we can still reinit from the same backend. + let mut builder_new = Builder::new(); + builder_new.set_network(bitcoin::Network::Regtest); + builder_new.set_storage_dir_path(storage_path); + builder_new.set_chain_source_esplora(esplora_url, None); + + let node_new = builder_new.build(node_entropy).unwrap(); + + node_new.start().unwrap(); + node_new.sync_wallets().unwrap(); + + let new_balance = node_new.list_balances().spendable_onchain_balance_sats; + let new_node_id = node_new.node_id(); + + assert_eq!(old_node_id, new_node_id); + assert_eq!(old_balance, new_balance); + + node_new.stop().unwrap(); +} From 93b6b4256c36be814e0c0af92d15297a4345887e Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 8 Jan 2026 21:18:48 +0100 Subject: [PATCH 18/75] Drop unused `Watch` import --- src/builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/builder.rs b/src/builder.rs index 187f780d2..c1acf71d4 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -20,7 +20,7 @@ use bitcoin::key::Secp256k1; use bitcoin::secp256k1::PublicKey; use bitcoin::{BlockHash, Network}; use bitcoin_payment_instructions::onion_message_resolver::LDKOnionMessageDNSSECHrnResolver; -use lightning::chain::{chainmonitor, BestBlock, Watch}; +use lightning::chain::{chainmonitor, BestBlock}; use lightning::ln::channelmanager::{self, ChainParameters, ChannelManagerReadArgs}; use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress}; use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler}; From 3c41141bc1f9c19eff465f5c16e285fd15185bbb Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 6 Jan 2026 13:58:24 +0100 Subject: [PATCH 19/75] Minor adjustments to `UnifiedPayment` docs --- src/lib.rs | 12 ++++++------ src/payment/unified.rs | 16 ++++++++-------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 1b19864dd..cf728c8bf 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -941,10 +941,10 @@ impl Node { )) } - /// Returns a payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], - /// and [BOLT 12] payment options. + /// Returns a payment handler that supports creating and paying to [BIP 21] URIs with on-chain, + /// [BOLT 11], and [BOLT 12] payment options. /// - /// This handler allows you to send payments to these URIs as well as [BIP 353] HRNs. + /// Also supports sending payments to [BIP 353] Human-Readable Names. /// /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md @@ -962,10 +962,10 @@ impl Node { ) } - /// Returns a payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], - /// and [BOLT 12] payment options. + /// Returns a payment handler that supports creating and paying to [BIP 21] URIs with on-chain, + /// [BOLT 11], and [BOLT 12] payment options. /// - /// This handler allows you to send payments to these URIs as well as [BIP 353] HRNs. + /// Also supports sending payments to [BIP 353] Human-Readable Names. /// /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md diff --git a/src/payment/unified.rs b/src/payment/unified.rs index 8225205fd..3a8efd1b5 100644 --- a/src/payment/unified.rs +++ b/src/payment/unified.rs @@ -5,10 +5,10 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -//! Holds a payment handler allowing to create [BIP 21] URIs with on-chain, [BOLT 11], and [BOLT 12] payment -//! options. +//! Holds a payment handler that supports creating and paying to [BIP 21] URIs with on-chain, [BOLT 11], +//! and [BOLT 12] payment options. //! -//! It also supports sending payments to these URIs as well as to [BIP 353] Human-Readable Names. +//! Also supports sending payments to [BIP 353] Human-Readable Names. //! //! [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki //! [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki @@ -44,12 +44,12 @@ struct Extras { bolt12_offer: Option, } -/// A payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], and [BOLT 12] payment -/// option. +/// A payment handler that supports creating and paying to [BIP 21] URIs with on-chain, [BOLT 11], +/// and [BOLT 12] payment options. /// -/// Should be retrieved by calling [`Node::unified_payment`] +/// Also supports sending payments to [BIP 353] Human-Readable Names. /// -/// It also supports sending payments to these URIs as well as to [BIP 353] Human-Readable Names. +/// Should be retrieved by calling [`Node::unified_payment`] /// /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki /// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki @@ -147,7 +147,7 @@ impl UnifiedPayment { /// has an offer and or invoice, it will try to pay the offer first followed by the invoice. /// If they both fail, the on-chain payment will be paid. /// - /// Returns a `UnifiedPaymentResult` indicating the outcome of the payment. If an error + /// Returns a [`UnifiedPaymentResult`] indicating the outcome of the payment. If an error /// occurs, an `Error` is returned detailing the issue encountered. /// /// If `route_parameters` are provided they will override the default as well as the From 5681526cf8c2b42d9b657e852cc16e81d8958156 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 6 Jan 2026 14:13:44 +0100 Subject: [PATCH 20/75] Timeout DNS resolution requests after some time .. as otherwise we might wait indefinitely for a service to respond. --- src/config.rs | 3 +++ src/payment/unified.rs | 43 +++++++++++++++++++++++++++++++----------- 2 files changed, 35 insertions(+), 11 deletions(-) diff --git a/src/config.rs b/src/config.rs index 329f406d8..1b71d0d4e 100644 --- a/src/config.rs +++ b/src/config.rs @@ -96,6 +96,9 @@ pub const WALLET_KEYS_SEED_LEN: usize = 64; // The timeout after which we abort a external scores sync operation. pub(crate) const EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS: u64 = 5; +// The timeout after which we abort a parsing/looking up an HRN resolution. +pub(crate) const HRN_RESOLUTION_TIMEOUT_SECS: u64 = 5; + #[derive(Debug, Clone)] /// Represents the configuration of an [`Node`] instance. /// diff --git a/src/payment/unified.rs b/src/payment/unified.rs index 3a8efd1b5..e62bf65eb 100644 --- a/src/payment/unified.rs +++ b/src/payment/unified.rs @@ -15,6 +15,7 @@ //! [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md //! [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md use std::sync::Arc; +use std::time::Duration; use std::vec::IntoIter; use bip21::de::ParamKind; @@ -29,6 +30,7 @@ use lightning::onion_message::dns_resolution::HumanReadableName; use lightning::routing::router::RouteParametersConfig; use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, Description}; +use crate::config::HRN_RESOLUTION_TIMEOUT_SECS; use crate::error::Error; use crate::ffi::maybe_wrap; use crate::logger::{log_error, LdkLogger, Logger}; @@ -159,17 +161,24 @@ impl UnifiedPayment { &self, uri_str: &str, amount_msat: Option, route_parameters: Option, ) -> Result { - let instructions = PaymentInstructions::parse( + let parse_fut = PaymentInstructions::parse( uri_str, self.config.network, self.hrn_resolver.as_ref(), false, - ) - .await - .map_err(|e| { - log_error!(self.logger, "Failed to parse payment instructions: {:?}", e); - Error::UriParameterParsingFailed - })?; + ); + + let instructions = + tokio::time::timeout(Duration::from_secs(HRN_RESOLUTION_TIMEOUT_SECS), parse_fut) + .await + .map_err(|e| { + log_error!(self.logger, "Payment instructions resolution timed out: {:?}", e); + Error::UriParameterParsingFailed + })? + .map_err(|e| { + log_error!(self.logger, "Failed to parse payment instructions: {:?}", e); + Error::UriParameterParsingFailed + })?; let resolved = match instructions { PaymentInstructions::ConfigurableAmount(instr) => { @@ -183,10 +192,22 @@ impl UnifiedPayment { Error::InvalidAmount })?; - instr.set_amount(amt, self.hrn_resolver.as_ref()).await.map_err(|e| { - log_error!(self.logger, "Failed to set amount: {:?}", e); - Error::InvalidAmount - })? + let fut = instr.set_amount(amt, self.hrn_resolver.as_ref()); + + tokio::time::timeout(Duration::from_secs(HRN_RESOLUTION_TIMEOUT_SECS), fut) + .await + .map_err(|e| { + log_error!( + self.logger, + "Payment instructions resolution timed out: {:?}", + e + ); + Error::UriParameterParsingFailed + })? + .map_err(|e| { + log_error!(self.logger, "Failed to set amount: {:?}", e); + Error::InvalidAmount + })? }, PaymentInstructions::FixedAmount(instr) => { if let Some(user_amount) = amount_msat { From b167ef6187ef020fb582270156fd69a497db33f9 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 6 Jan 2026 14:17:31 +0100 Subject: [PATCH 21/75] Consistently use `_msat` suffix for msat amounts --- src/payment/unified.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/payment/unified.rs b/src/payment/unified.rs index e62bf65eb..671af14ff 100644 --- a/src/payment/unified.rs +++ b/src/payment/unified.rs @@ -182,12 +182,12 @@ impl UnifiedPayment { let resolved = match instructions { PaymentInstructions::ConfigurableAmount(instr) => { - let amount = amount_msat.ok_or_else(|| { + let amount_msat = amount_msat.ok_or_else(|| { log_error!(self.logger, "No amount specified. Aborting the payment."); Error::InvalidAmount })?; - let amt = BPIAmount::from_milli_sats(amount).map_err(|e| { + let amt = BPIAmount::from_milli_sats(amount_msat).map_err(|e| { log_error!(self.logger, "Error while converting amount : {:?}", e); Error::InvalidAmount })?; @@ -210,8 +210,8 @@ impl UnifiedPayment { })? }, PaymentInstructions::FixedAmount(instr) => { - if let Some(user_amount) = amount_msat { - if instr.max_amount().map_or(false, |amt| user_amount < amt.milli_sats()) { + if let Some(user_amount_msat) = amount_msat { + if instr.max_amount().map_or(false, |amt| user_amount_msat < amt.milli_sats()) { log_error!(self.logger, "Amount specified is less than the amount in the parsed URI. Aborting the payment."); return Err(Error::InvalidAmount); } From e93844aa981fb0f80e588d49ff1357bff326ac2e Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 7 Jan 2026 11:29:49 +0100 Subject: [PATCH 22/75] Set restrictive file permissions for seed file Previously, seed files were created using `fs::File::create()` which inherits the default umask, potentially making the sensitive seed material world-readable on Unix systems. This change: - Creates seed files with mode 0o400 (owner read only) on Unix - Uses `create_new` instead of `create` to atomically fail if the file already exists, providing defense-in-depth against TOCTOU race conditions Co-Authored-By: Claude AI --- src/io/utils.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/io/utils.rs b/src/io/utils.rs index 928d4031b..e2f8ed23b 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -5,12 +5,15 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use std::fs; +use std::fs::{self, OpenOptions}; use std::io::Write; use std::ops::Deref; use std::path::Path; use std::sync::Arc; +#[cfg(unix)] +use std::os::unix::fs::OpenOptionsExt; + use bdk_chain::indexer::keychain_txout::ChangeSet as BdkIndexerChangeSet; use bdk_chain::local_chain::ChangeSet as BdkLocalChainChangeSet; use bdk_chain::miniscript::{Descriptor, DescriptorPublicKey}; @@ -78,7 +81,11 @@ pub(crate) fn read_or_generate_seed_file( fs::create_dir_all(parent_dir)?; } - let mut f = fs::File::create(keys_seed_path)?; + #[cfg(unix)] + let mut f = OpenOptions::new().write(true).create_new(true).mode(0o400).open(keys_seed_path)?; + + #[cfg(not(unix))] + let mut f = OpenOptions::new().write(true).create_new(true).open(keys_seed_path)?; f.write_all(&key)?; From 23cb6ed5ebceba68b362c52781533aeff86ba32d Mon Sep 17 00:00:00 2001 From: Fmt Bot Date: Sun, 11 Jan 2026 01:44:57 +0000 Subject: [PATCH 23/75] 2026-01-11 automated rustfmt nightly --- src/io/utils.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/io/utils.rs b/src/io/utils.rs index b94a329f7..4ddc03b07 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -8,11 +8,10 @@ use std::fs::{self, OpenOptions}; use std::io::Write; use std::ops::Deref; -use std::path::Path; -use std::sync::Arc; - #[cfg(unix)] use std::os::unix::fs::OpenOptionsExt; +use std::path::Path; +use std::sync::Arc; use bdk_chain::indexer::keychain_txout::ChangeSet as BdkIndexerChangeSet; use bdk_chain::local_chain::ChangeSet as BdkLocalChainChangeSet; From 295b2cea8153652522aa2e642111d52e692525c3 Mon Sep 17 00:00:00 2001 From: Chuks Agbakuru Date: Sun, 11 Jan 2026 13:39:46 +0100 Subject: [PATCH 24/75] Handle conditional NodeEntropy types for UniFFI Adapt the NodeEntropy initialization to account for diverging constructor signatures when the uniffi feature is active. The UniFFI layer requires shared ownership (Arc) and dynamic byte validation (Result/Vec) to facilitate memory management and error handling across the FFI boundary. This change ensures the builder receives the expected pointer type in UniFFI builds while maintaining the zero-cost stack allocation for standard Rust usage. --- tests/integration_tests_rust.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 4b82d1f4f..3d9f04e38 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -2445,6 +2445,9 @@ async fn persistence_backwards_compatibility() { let storage_path = common::random_storage_path().to_str().unwrap().to_owned(); let seed_bytes = [42u8; 64]; + #[cfg(feature = "uniffi")] + let node_entropy = Arc::new(NodeEntropy::from_seed_bytes(seed_bytes.to_vec()).unwrap()); + #[cfg(not(feature = "uniffi"))] let node_entropy = NodeEntropy::from_seed_bytes(seed_bytes); // Setup a v0.6.2 `Node` From 168be274b41c1cde85cc0bf1c8c217e5471d0c39 Mon Sep 17 00:00:00 2001 From: Chuks Agbakuru Date: Sun, 11 Jan 2026 23:04:01 +0100 Subject: [PATCH 25/75] fixup! Handle conditional NodeEntropy types for UniFFI --- tests/common/mod.rs | 12 ++++++++++++ tests/integration_tests_rust.rs | 13 +++++-------- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 96f58297c..a998217b2 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1288,6 +1288,18 @@ pub(crate) async fn do_channel_full_cycle( println!("\nB stopped"); } +// Helper to unify entropy creation +pub(crate) fn create_test_entropy(seed: [u8; 64]) -> NodeEntropy { + #[cfg(feature = "uniffi")] + { + NodeEntropy::from_seed_bytes(seed.to_vec()).unwrap() + } + #[cfg(not(feature = "uniffi"))] + { + NodeEntropy::from_seed_bytes(seed) + } +} + // A `KVStore` impl for testing purposes that wraps all our `KVStore`s and asserts their synchronicity. #[derive(Clone)] pub(crate) struct TestSyncStore { diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 3d9f04e38..e642c0713 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -17,8 +17,8 @@ use bitcoin::hashes::Hash; use bitcoin::{Address, Amount, ScriptBuf}; use common::logging::{init_log_logger, validate_log_entry, MultiNodeLogger, TestLogWriter}; use common::{ - bump_fee_and_broadcast, distribute_funds_unconfirmed, do_channel_full_cycle, - expect_channel_pending_event, expect_channel_ready_event, expect_event, + bump_fee_and_broadcast, create_test_entropy, distribute_funds_unconfirmed, + do_channel_full_cycle, expect_channel_pending_event, expect_channel_ready_event, expect_event, expect_payment_claimable_event, expect_payment_received_event, expect_payment_successful_event, expect_splice_pending_event, generate_blocks_and_wait, open_channel, open_channel_push_amt, premine_and_distribute_funds, premine_blocks, prepare_rbf, random_config, @@ -27,7 +27,6 @@ use common::{ TestSyncStore, }; use ldk_node::config::{AsyncPaymentsRole, EsploraSyncConfig}; -use ldk_node::entropy::NodeEntropy; use ldk_node::liquidity::LSPS2ServiceConfig; use ldk_node::payment::{ ConfirmationStatus, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, @@ -2445,10 +2444,8 @@ async fn persistence_backwards_compatibility() { let storage_path = common::random_storage_path().to_str().unwrap().to_owned(); let seed_bytes = [42u8; 64]; - #[cfg(feature = "uniffi")] - let node_entropy = Arc::new(NodeEntropy::from_seed_bytes(seed_bytes.to_vec()).unwrap()); - #[cfg(not(feature = "uniffi"))] - let node_entropy = NodeEntropy::from_seed_bytes(seed_bytes); + + let node_entropy = create_test_entropy(seed_bytes); // Setup a v0.6.2 `Node` let (old_balance, old_node_id) = { @@ -2485,7 +2482,7 @@ async fn persistence_backwards_compatibility() { builder_new.set_storage_dir_path(storage_path); builder_new.set_chain_source_esplora(esplora_url, None); - let node_new = builder_new.build(node_entropy).unwrap(); + let node_new = builder_new.build(node_entropy.into()).unwrap(); node_new.start().unwrap(); node_new.sync_wallets().unwrap(); From 6a32f363c68c2d0484fef9aa7d6188066ae9495e Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 30 Dec 2025 14:09:57 +0000 Subject: [PATCH 26/75] Add test for circular references leading to `NetworkGraph` leaks Due to two circular `Arc` references, after `stop`ping and `drop`ping the `Node` instance the bulk of ldk-node's memory (in the form of the `NetworkGraph`) would hang around. Here we add a test for this in our integration tests, checking if the `NetworkGraph` (as a proxy for other objects held in reference by the `PeerManager`) hangs around after `Node`s are `drop`ped. --- .github/workflows/rust.yml | 4 ++-- .github/workflows/vss-integration.yml | 2 +- Cargo.toml | 1 + src/builder.rs | 14 ++++++++++++++ src/lib.rs | 21 +++++++++++++++++++++ 5 files changed, 39 insertions(+), 3 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 661703ded..1ccade444 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -80,11 +80,11 @@ jobs: - name: Test on Rust ${{ matrix.toolchain }} if: "matrix.platform != 'windows-latest'" run: | - RUSTFLAGS="--cfg no_download" cargo test + RUSTFLAGS="--cfg no_download --cfg cycle_tests" cargo test - name: Test with UniFFI support on Rust ${{ matrix.toolchain }} if: "matrix.platform != 'windows-latest' && matrix.build-uniffi" run: | - RUSTFLAGS="--cfg no_download" cargo test --features uniffi + RUSTFLAGS="--cfg no_download --cfg cycle_tests" cargo test --features uniffi doc: name: Documentation diff --git a/.github/workflows/vss-integration.yml b/.github/workflows/vss-integration.yml index 8473ed413..b5c4e9a0b 100644 --- a/.github/workflows/vss-integration.yml +++ b/.github/workflows/vss-integration.yml @@ -45,4 +45,4 @@ jobs: cd ldk-node export TEST_VSS_BASE_URL="http://localhost:8080/vss" RUSTFLAGS="--cfg vss_test" cargo test io::vss_store - RUSTFLAGS="--cfg vss_test" cargo test --test integration_tests_vss + RUSTFLAGS="--cfg vss_test --cfg cycle_tests" cargo test --test integration_tests_vss diff --git a/Cargo.toml b/Cargo.toml index 431d6b8d8..207ad92a1 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -124,6 +124,7 @@ check-cfg = [ "cfg(tokio_unstable)", "cfg(cln_test)", "cfg(lnd_test)", + "cfg(cycle_tests)", ] [[bench]] diff --git a/src/builder.rs b/src/builder.rs index ca8e71d03..510d86bdd 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1684,6 +1684,18 @@ fn build_with_store_internal( let pathfinding_scores_sync_url = pathfinding_scores_sync_config.map(|c| c.url.clone()); + #[cfg(cycle_tests)] + let mut _leak_checker = crate::LeakChecker(Vec::new()); + #[cfg(cycle_tests)] + { + use std::any::Any; + use std::sync::Weak; + + _leak_checker.0.push(Arc::downgrade(&channel_manager) as Weak); + _leak_checker.0.push(Arc::downgrade(&network_graph) as Weak); + _leak_checker.0.push(Arc::downgrade(&wallet) as Weak); + } + Ok(Node { runtime, stop_sender, @@ -1716,6 +1728,8 @@ fn build_with_store_internal( om_mailbox, async_payments_role, hrn_resolver, + #[cfg(cycle_tests)] + _leak_checker, }) } diff --git a/src/lib.rs b/src/lib.rs index d9bca4551..a76df1977 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -110,6 +110,8 @@ use std::default::Default; use std::net::ToSocketAddrs; use std::sync::{Arc, Mutex, RwLock}; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +#[cfg(cycle_tests)] +use std::{any::Any, sync::Weak}; pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance}; use bitcoin::secp256k1::PublicKey; @@ -173,6 +175,23 @@ use crate::scoring::setup_background_pathfinding_scores_sync; #[cfg(feature = "uniffi")] uniffi::include_scaffolding!("ldk_node"); +#[cfg(cycle_tests)] +/// A list of [`Weak`]s which can be used to check that a [`Node`]'s inner fields are being +/// properly released after the [`Node`] is dropped. +pub struct LeakChecker(Vec>); + +#[cfg(cycle_tests)] +impl LeakChecker { + /// Asserts that all the stored [`Weak`]s point to contents which have been freed. + /// + /// This will (obviously) panic if the [`Node`] has not yet been dropped. + pub fn assert_no_leaks(&self) { + for weak in self.0.iter() { + assert_eq!(weak.strong_count(), 0); + } + } +} + /// The main interface object of LDK Node, wrapping the necessary LDK and BDK functionalities. /// /// Needs to be initialized and instantiated through [`Builder::build`]. @@ -208,6 +227,8 @@ pub struct Node { om_mailbox: Option>, async_payments_role: Option, hrn_resolver: Arc, + #[cfg(cycle_tests)] + _leak_checker: LeakChecker, } impl Node { From 4f86a6430e076e7263b007aa5ba403c133c28948 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 12 Jan 2026 14:29:51 +0100 Subject: [PATCH 27/75] Add structured logging context fields to LogRecord Extend LogRecord with peer_id, channel_id, and payment_hash fields from LDK's Record struct. These structured fields are now available to custom LogWriter implementations and are automatically appended to log messages by the built-in FileWriter and LogFacadeWriter. - Add peer_id, channel_id, payment_hash fields to LogRecord (both uniffi and non-uniffi versions) - Add LogContext struct with Display impl to format fields with truncated hex values, avoiding intermediate heap allocations - Update FileWriter and LogFacadeWriter to append context to messages - Update UDL bindings with new LogRecord fields - Add unit tests for LogContext and LogFacadeWriter Co-Authored-By: Claude Opus 4.5 --- bindings/ldk_node.udl | 3 + src/logger.rs | 213 +++++++++++++++++++++++++++++++++++++++- tests/common/logging.rs | 11 ++- 3 files changed, 219 insertions(+), 8 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index b59a38b04..c881dbe09 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -83,6 +83,9 @@ dictionary LogRecord { string args; string module_path; u32 line; + PublicKey? peer_id; + ChannelId? channel_id; + PaymentHash? payment_hash; }; [Trait, WithForeign] diff --git a/src/logger.rs b/src/logger.rs index 4eaefad74..e38a5f1fc 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -7,14 +7,16 @@ //! Logging-related objects. -#[cfg(not(feature = "uniffi"))] use core::fmt; use std::fs; use std::io::Write; use std::path::Path; use std::sync::Arc; +use bitcoin::secp256k1::PublicKey; use chrono::Utc; +use lightning::ln::types::ChannelId; +use lightning::types::payment::PaymentHash; pub use lightning::util::logger::Level as LogLevel; pub(crate) use lightning::util::logger::{Logger as LdkLogger, Record as LdkRecord}; pub(crate) use lightning::{log_bytes, log_debug, log_error, log_info, log_trace}; @@ -32,6 +34,64 @@ pub struct LogRecord<'a> { pub module_path: &'a str, /// The line containing the message. pub line: u32, + /// The node id of the peer pertaining to the logged record. + pub peer_id: Option, + /// The channel id of the channel pertaining to the logged record. + pub channel_id: Option, + /// The payment hash pertaining to the logged record. + pub payment_hash: Option, +} + +/// Structured context fields for log messages. +/// +/// Implements `Display` to format context fields (channel_id, peer_id, payment_hash) directly +/// into a formatter, avoiding intermediate heap allocations when used with `format_args!` or +/// `write!` macros. +/// +/// Note: LDK's `Record` Display implementation uses fixed-width padded columns and different +/// formatting for test vs production builds. We intentionally use a simpler format here: +/// fields are only included when present (no padding), and the format is consistent across +/// all build configurations. +pub struct LogContext<'a> { + /// The channel id of the channel pertaining to the logged record. + pub channel_id: Option<&'a ChannelId>, + /// The node id of the peer pertaining to the logged record. + pub peer_id: Option<&'a PublicKey>, + /// The payment hash pertaining to the logged record. + pub payment_hash: Option<&'a PaymentHash>, +} + +impl fmt::Display for LogContext<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn truncate(s: &str) -> &str { + &s[..s.len().min(6)] + } + + if self.channel_id.is_none() && self.peer_id.is_none() && self.payment_hash.is_none() { + return Ok(()); + } + + write!(f, " (")?; + let mut need_space = false; + if let Some(c) = self.channel_id { + write!(f, "ch:{}", truncate(&c.to_string()))?; + need_space = true; + } + if let Some(p) = self.peer_id { + if need_space { + write!(f, " ")?; + } + write!(f, "p:{}", truncate(&p.to_string()))?; + need_space = true; + } + if let Some(h) = self.payment_hash { + if need_space { + write!(f, " ")?; + } + write!(f, "h:{}", truncate(&format!("{:?}", h)))?; + } + write!(f, ")") + } } /// A unit of logging output with metadata to enable filtering `module_path`, @@ -50,6 +110,12 @@ pub struct LogRecord { pub module_path: String, /// The line containing the message. pub line: u32, + /// The node id of the peer pertaining to the logged record. + pub peer_id: Option, + /// The channel id of the channel pertaining to the logged record. + pub channel_id: Option, + /// The payment hash pertaining to the logged record. + pub payment_hash: Option, } #[cfg(feature = "uniffi")] @@ -60,6 +126,9 @@ impl<'a> From> for LogRecord { args: record.args.to_string(), module_path: record.module_path.to_string(), line: record.line, + peer_id: record.peer_id, + channel_id: record.channel_id, + payment_hash: record.payment_hash, } } } @@ -72,6 +141,9 @@ impl<'a> From> for LogRecord<'a> { args: record.args, module_path: record.module_path, line: record.line, + peer_id: record.peer_id, + channel_id: record.channel_id, + payment_hash: record.payment_hash, } } } @@ -113,6 +185,12 @@ pub(crate) enum Writer { impl LogWriter for Writer { fn log(&self, record: LogRecord) { + let context = LogContext { + channel_id: record.channel_id.as_ref(), + peer_id: record.peer_id.as_ref(), + payment_hash: record.payment_hash.as_ref(), + }; + match self { Writer::FileWriter { file_path, max_log_level } => { if record.level < *max_log_level { @@ -120,12 +198,13 @@ impl LogWriter for Writer { } let log = format!( - "{} {:<5} [{}:{}] {}\n", + "{} {:<5} [{}:{}] {}{}\n", Utc::now().format("%Y-%m-%d %H:%M:%S%.3f"), record.level.to_string(), record.module_path, record.line, - record.args + record.args, + context, ); fs::OpenOptions::new() @@ -153,7 +232,7 @@ impl LogWriter for Writer { .target(record.module_path) .module_path(Some(record.module_path)) .line(Some(record.line)) - .args(format_args!("{}", record.args)) + .args(format_args!("{}{}", record.args, context)) .build(), ); #[cfg(feature = "uniffi")] @@ -162,7 +241,7 @@ impl LogWriter for Writer { .target(&record.module_path) .module_path(Some(&record.module_path)) .line(Some(record.line)) - .args(format_args!("{}", record.args)) + .args(format_args!("{}{}", record.args, context)) .build(), ); }, @@ -222,3 +301,127 @@ impl LdkLogger for Logger { } } } + +#[cfg(test)] +mod tests { + use super::*; + use std::sync::Mutex; + + /// A minimal log facade logger that captures log output for testing. + struct TestLogger { + log: Arc>, + } + + impl log::Log for TestLogger { + fn enabled(&self, _metadata: &log::Metadata) -> bool { + true + } + + fn log(&self, record: &log::Record) { + *self.log.lock().unwrap() = record.args().to_string(); + } + + fn flush(&self) {} + } + + /// Tests that LogContext correctly formats all three structured fields + /// (channel_id, peer_id, payment_hash) with space prefixes and 6-char truncation. + #[test] + fn test_log_context_all_fields() { + let channel_id = ChannelId::from_bytes([ + 0xab, 0xcd, 0xef, 0x12, 0x34, 0x56, 0x78, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + ]); + let peer_id = PublicKey::from_slice(&[ + 0x02, 0xab, 0xcd, 0xef, 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf1, 0x23, 0x45, + 0x67, 0x89, 0xab, 0xcd, 0xef, 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf1, 0x23, + 0x45, 0x67, 0x89, 0xab, 0xcd, + ]) + .unwrap(); + let payment_hash = PaymentHash([ + 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + ]); + + let context = LogContext { + channel_id: Some(&channel_id), + peer_id: Some(&peer_id), + payment_hash: Some(&payment_hash), + }; + + assert_eq!(context.to_string(), " (ch:abcdef p:02abcd h:fedcba)"); + } + + /// Tests that LogContext returns an empty string when no fields are provided. + #[test] + fn test_log_context_no_fields() { + let context = LogContext { channel_id: None, peer_id: None, payment_hash: None }; + assert_eq!(context.to_string(), ""); + } + + /// Tests that LogContext only includes present fields. + #[test] + fn test_log_context_partial_fields() { + let channel_id = ChannelId::from_bytes([ + 0x12, 0x34, 0x56, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + ]); + + let context = + LogContext { channel_id: Some(&channel_id), peer_id: None, payment_hash: None }; + assert_eq!(context.to_string(), " (ch:123456)"); + } + + /// Tests that LogFacadeWriter appends structured context fields to the log message. + #[test] + fn test_log_facade_writer_includes_structured_context() { + let log = Arc::new(Mutex::new(String::new())); + let test_logger = TestLogger { log: log.clone() }; + + let _ = log::set_boxed_logger(Box::new(test_logger)); + log::set_max_level(log::LevelFilter::Trace); + + let writer = Writer::LogFacadeWriter; + + let channel_id = ChannelId::from_bytes([ + 0xab, 0xcd, 0xef, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + ]); + let peer_id = PublicKey::from_slice(&[ + 0x02, 0xab, 0xcd, 0xef, 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf1, 0x23, 0x45, + 0x67, 0x89, 0xab, 0xcd, 0xef, 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf1, 0x23, + 0x45, 0x67, 0x89, 0xab, 0xcd, + ]) + .unwrap(); + + #[cfg(not(feature = "uniffi"))] + let record = LogRecord { + level: LogLevel::Info, + args: format_args!("Test message"), + module_path: "test_module", + line: 42, + peer_id: Some(peer_id), + channel_id: Some(channel_id), + payment_hash: None, + }; + + #[cfg(feature = "uniffi")] + let record = LogRecord { + level: LogLevel::Info, + args: "Test message".to_string(), + module_path: "test_module".to_string(), + line: 42, + peer_id: Some(peer_id), + channel_id: Some(channel_id), + payment_hash: None, + }; + + writer.log(record); + + assert_eq!(*log.lock().unwrap(), "Test message (ch:abcdef p:02abcd)"); + } +} diff --git a/tests/common/logging.rs b/tests/common/logging.rs index 3ff24d34d..1e3a8a1c2 100644 --- a/tests/common/logging.rs +++ b/tests/common/logging.rs @@ -1,7 +1,7 @@ use std::sync::{Arc, Mutex}; use chrono::Utc; -use ldk_node::logger::{LogLevel, LogRecord, LogWriter}; +use ldk_node::logger::{LogContext, LogLevel, LogRecord, LogWriter}; #[cfg(not(feature = "uniffi"))] use log::Record as LogFacadeRecord; use log::{Level as LogFacadeLevel, LevelFilter as LogFacadeLevelFilter, Log as LogFacadeLog}; @@ -156,13 +156,18 @@ impl MultiNodeLogger { impl LogWriter for MultiNodeLogger { fn log(&self, record: LogRecord) { let log = format!( - "[{}] {} {:<5} [{}:{}] {}\n", + "[{}] {} {:<5} [{}:{}] {}{}\n", self.node_id, Utc::now().format("%Y-%m-%d %H:%M:%S%.3f"), record.level.to_string(), record.module_path, record.line, - record.args + record.args, + LogContext { + channel_id: record.channel_id.as_ref(), + peer_id: record.peer_id.as_ref(), + payment_hash: record.payment_hash.as_ref(), + }, ); print!("{}", log); From 625cd5d63171f64e194720aec2114bbf8ef459a9 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 13 Jan 2026 09:40:08 +0100 Subject: [PATCH 28/75] Drop entropy creation helper, cleanup remaining warnings Previously we introduced a `create_test_entropy` helper method in `common.rs` that was only used in one specific test. We here opt to drop this again and also clean up the remaining warnings when run under `--features uniffi`. --- tests/common/mod.rs | 12 ------------ tests/integration_tests_rust.rs | 14 ++++++++++---- 2 files changed, 10 insertions(+), 16 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index a998217b2..96f58297c 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1288,18 +1288,6 @@ pub(crate) async fn do_channel_full_cycle( println!("\nB stopped"); } -// Helper to unify entropy creation -pub(crate) fn create_test_entropy(seed: [u8; 64]) -> NodeEntropy { - #[cfg(feature = "uniffi")] - { - NodeEntropy::from_seed_bytes(seed.to_vec()).unwrap() - } - #[cfg(not(feature = "uniffi"))] - { - NodeEntropy::from_seed_bytes(seed) - } -} - // A `KVStore` impl for testing purposes that wraps all our `KVStore`s and asserts their synchronicity. #[derive(Clone)] pub(crate) struct TestSyncStore { diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index e642c0713..4e94dd044 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -17,8 +17,8 @@ use bitcoin::hashes::Hash; use bitcoin::{Address, Amount, ScriptBuf}; use common::logging::{init_log_logger, validate_log_entry, MultiNodeLogger, TestLogWriter}; use common::{ - bump_fee_and_broadcast, create_test_entropy, distribute_funds_unconfirmed, - do_channel_full_cycle, expect_channel_pending_event, expect_channel_ready_event, expect_event, + bump_fee_and_broadcast, distribute_funds_unconfirmed, do_channel_full_cycle, + expect_channel_pending_event, expect_channel_ready_event, expect_event, expect_payment_claimable_event, expect_payment_received_event, expect_payment_successful_event, expect_splice_pending_event, generate_blocks_and_wait, open_channel, open_channel_push_amt, premine_and_distribute_funds, premine_blocks, prepare_rbf, random_config, @@ -27,6 +27,7 @@ use common::{ TestSyncStore, }; use ldk_node::config::{AsyncPaymentsRole, EsploraSyncConfig}; +use ldk_node::entropy::NodeEntropy; use ldk_node::liquidity::LSPS2ServiceConfig; use ldk_node::payment::{ ConfirmationStatus, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, @@ -2445,8 +2446,6 @@ async fn persistence_backwards_compatibility() { let storage_path = common::random_storage_path().to_str().unwrap().to_owned(); let seed_bytes = [42u8; 64]; - let node_entropy = create_test_entropy(seed_bytes); - // Setup a v0.6.2 `Node` let (old_balance, old_node_id) = { let mut builder_old = ldk_node_062::Builder::new(); @@ -2477,11 +2476,18 @@ async fn persistence_backwards_compatibility() { }; // Now ensure we can still reinit from the same backend. + #[cfg(feature = "uniffi")] + let builder_new = Builder::new(); + #[cfg(not(feature = "uniffi"))] let mut builder_new = Builder::new(); builder_new.set_network(bitcoin::Network::Regtest); builder_new.set_storage_dir_path(storage_path); builder_new.set_chain_source_esplora(esplora_url, None); + #[cfg(feature = "uniffi")] + let node_entropy = NodeEntropy::from_seed_bytes(seed_bytes.to_vec()).unwrap(); + #[cfg(not(feature = "uniffi"))] + let node_entropy = NodeEntropy::from_seed_bytes(seed_bytes); let node_new = builder_new.build(node_entropy.into()).unwrap(); node_new.start().unwrap(); From ca0d08dff1cd0af821abf33ee8e2765eb44a67d9 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 13 Jan 2026 10:18:53 +0100 Subject: [PATCH 29/75] Drop mention of Flutter bindings from the README The Flutter bindings have not actively been maintained recently, so we drop them from the README to not misguide users. --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 4e60d3602..dd4f434db 100644 --- a/README.md +++ b/README.md @@ -67,7 +67,7 @@ LDK Node currently comes with a decidedly opinionated set of design choices: - Entropy for the Lightning and on-chain wallets may be sourced from raw bytes or a [BIP39](https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki) mnemonic. In addition, LDK Node offers the means to generate and persist the entropy bytes to disk. ## Language Support -LDK Node itself is written in [Rust][rust] and may therefore be natively added as a library dependency to any `std` Rust program. However, beyond its Rust API it also offers language bindings for [Swift][swift], [Kotlin][kotlin], and [Python][python] based on the [UniFFI](https://github.com/mozilla/uniffi-rs/). Moreover, [Flutter bindings][flutter_bindings] are also available. +LDK Node itself is written in [Rust][rust] and may therefore be natively added as a library dependency to any `std` Rust program. However, beyond its Rust API it also offers language bindings for [Swift][swift], [Kotlin][kotlin], and [Python][python] based on the [UniFFI](https://github.com/mozilla/uniffi-rs/). ## MSRV The Minimum Supported Rust Version (MSRV) is currently 1.85.0. @@ -85,4 +85,3 @@ The Minimum Supported Rust Version (MSRV) is currently 1.85.0. [swift]: https://www.swift.org/ [kotlin]: https://kotlinlang.org/ [python]: https://www.python.org/ -[flutter_bindings]: https://github.com/LtbLightning/ldk-node-flutter From 95c7097f5b698863dbf1001e5426e075519266ae Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 13 Jan 2026 14:51:27 +0000 Subject: [PATCH 30/75] Move `RuntimeSpawner` to `runtime.rs` In a few commits as we upgrade LDK we'll use `RuntimeSpawner` outside of gossip, making it make much more sense to have it in `runtime.rs` instead. --- src/gossip.rs | 20 +------------------- src/runtime.rs | 18 ++++++++++++++++++ src/types.rs | 2 +- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/src/gossip.rs b/src/gossip.rs index 2b524d9ae..f42b4602c 100644 --- a/src/gossip.rs +++ b/src/gossip.rs @@ -5,18 +5,16 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use std::future::Future; use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::Arc; use std::time::Duration; -use lightning::util::native_async::FutureSpawner; use lightning_block_sync::gossip::GossipVerifier; use crate::chain::ChainSource; use crate::config::RGS_SYNC_TIMEOUT_SECS; use crate::logger::{log_trace, LdkLogger, Logger}; -use crate::runtime::Runtime; +use crate::runtime::{Runtime, RuntimeSpawner}; use crate::types::{GossipSync, Graph, P2PGossipSync, RapidGossipSync}; use crate::Error; @@ -114,19 +112,3 @@ impl GossipSource { } } } - -pub(crate) struct RuntimeSpawner { - runtime: Arc, -} - -impl RuntimeSpawner { - pub(crate) fn new(runtime: Arc) -> Self { - Self { runtime } - } -} - -impl FutureSpawner for RuntimeSpawner { - fn spawn + Send + 'static>(&self, future: T) { - self.runtime.spawn_cancellable_background_task(future); - } -} diff --git a/src/runtime.rs b/src/runtime.rs index 1e9883ae4..d9d39e84b 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -9,6 +9,8 @@ use std::future::Future; use std::sync::{Arc, Mutex}; use std::time::Duration; +use lightning::util::native_async::FutureSpawner; + use tokio::task::{JoinHandle, JoinSet}; use crate::config::{ @@ -219,3 +221,19 @@ enum RuntimeMode { Owned(tokio::runtime::Runtime), Handle(tokio::runtime::Handle), } + +pub(crate) struct RuntimeSpawner { + runtime: Arc, +} + +impl RuntimeSpawner { + pub(crate) fn new(runtime: Arc) -> Self { + Self { runtime } + } +} + +impl FutureSpawner for RuntimeSpawner { + fn spawn + Send + 'static>(&self, future: T) { + self.runtime.spawn_cancellable_background_task(future); + } +} diff --git a/src/types.rs b/src/types.rs index 2b7d3829a..96b9a9866 100644 --- a/src/types.rs +++ b/src/types.rs @@ -35,10 +35,10 @@ use crate::chain::ChainSource; use crate::config::ChannelConfig; use crate::data_store::DataStore; use crate::fee_estimator::OnchainFeeEstimator; -use crate::gossip::RuntimeSpawner; use crate::logger::Logger; use crate::message_handler::NodeCustomMessageHandler; use crate::payment::PaymentDetails; +use crate::runtime::RuntimeSpawner; /// A supertrait that requires that a type implements both [`KVStore`] and [`KVStoreSync`] at the /// same time. From 68e92ffba7dd9dad3ba70d79c167a324e83e6fdf Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 13 Jan 2026 14:57:51 +0000 Subject: [PATCH 31/75] Upgrade to latest LDK (which spawns futures with a return value) --- Cargo.toml | 26 +++++++++++++------------- src/runtime.rs | 14 ++++++++++++-- 2 files changed, 25 insertions(+), 15 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 09ae4b03a..e0ace123f 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,17 +39,17 @@ default = [] #lightning-liquidity = { version = "0.2.0", features = ["std"] } #lightning-macros = { version = "0.2.0" } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891", features = ["std"] } -lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891" } -lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891", features = ["std"] } -lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891" } -lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891", features = ["tokio"] } -lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891" } -lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891" } -lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } -lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891", features = ["std"] } -lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891" } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256", features = ["std"] } +lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256" } +lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256", features = ["std"] } +lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256", features = ["tokio"] } +lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256" } +lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256" } +lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } +lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256", features = ["std"] } +lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256" } bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} @@ -78,13 +78,13 @@ log = { version = "0.4.22", default-features = false, features = ["std"]} vss-client = { package = "vss-client-ng", version = "0.4" } prost = { version = "0.11.6", default-features = false} #bitcoin-payment-instructions = { version = "0.6" } -bitcoin-payment-instructions = { git = "https://github.com/tnull/bitcoin-payment-instructions", rev = "fdca6c62f2fe2c53427d3e51e322a49aa7323ee2" } +bitcoin-payment-instructions = { git = "https://github.com/tnull/bitcoin-payment-instructions", rev = "ce9ff5281ae9bb05526981f6f9df8f8d929c7c44" } [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5236dba053a3f4f01cf0c32ce42b609a93738891", features = ["std", "_test_utils"] } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" criterion = { version = "0.7.0", features = ["async_tokio"] } diff --git a/src/runtime.rs b/src/runtime.rs index d9d39e84b..f43cbb9f0 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -233,7 +233,17 @@ impl RuntimeSpawner { } impl FutureSpawner for RuntimeSpawner { - fn spawn + Send + 'static>(&self, future: T) { - self.runtime.spawn_cancellable_background_task(future); + type E = tokio::sync::oneshot::error::RecvError; + type SpawnedFutureResult = tokio::sync::oneshot::Receiver; + fn spawn + Send + 'static>( + &self, future: F, + ) -> Self::SpawnedFutureResult { + let (result, output) = tokio::sync::oneshot::channel(); + self.runtime.spawn_cancellable_background_task(async move { + // We don't care if the send works or not, if the receiver is dropped its not our + // problem. + let _ = result.send(future.await); + }); + output } } From b0f4a5e45fb7293a139b93ac654aa16a797fb890 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 8 Jan 2026 19:25:44 +0000 Subject: [PATCH 32/75] Switch to the new highly-parallel `ChannelMonitor` reader Upstream LDK added the ability to read `ChannelMonitor`s from storage in parallel, which we switch to here. --- src/builder.rs | 24 +++++++++++++++++++----- src/types.rs | 14 +++++++++++++- 2 files changed, 32 insertions(+), 6 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 510d86bdd..6348c26ba 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -69,11 +69,12 @@ use crate::logger::{log_error, LdkLogger, LogLevel, LogWriter, Logger}; use crate::message_handler::NodeCustomMessageHandler; use crate::payment::asynchronous::om_mailbox::OnionMessageMailbox; use crate::peer_store::PeerStore; -use crate::runtime::Runtime; +use crate::runtime::{Runtime, RuntimeSpawner}; use crate::tx_broadcaster::TransactionBroadcaster; use crate::types::{ - ChainMonitor, ChannelManager, DynStore, DynStoreWrapper, GossipSync, Graph, KeysManager, - MessageRouter, OnionMessenger, PaymentStore, PeerManager, Persister, SyncAndAsyncKVStore, + AsyncPersister, ChainMonitor, ChannelManager, DynStore, DynStoreWrapper, GossipSync, Graph, + KeysManager, MessageRouter, OnionMessenger, PaymentStore, PeerManager, Persister, + SyncAndAsyncKVStore, }; use crate::wallet::persist::KVStoreWalletPersister; use crate::wallet::Wallet; @@ -1261,8 +1262,9 @@ fn build_with_store_internal( )); let peer_storage_key = keys_manager.get_peer_storage_key(); - let persister = Arc::new(Persister::new( + let monitor_reader = Arc::new(AsyncPersister::new( Arc::clone(&kv_store), + RuntimeSpawner::new(Arc::clone(&runtime)), Arc::clone(&logger), PERSISTER_MAX_PENDING_UPDATES, Arc::clone(&keys_manager), @@ -1272,7 +1274,9 @@ fn build_with_store_internal( )); // Read ChannelMonitor state from store - let channel_monitors = match persister.read_all_channel_monitors_with_updates() { + let monitor_read_result = + runtime.block_on(monitor_reader.read_all_channel_monitors_with_updates_parallel()); + let channel_monitors = match monitor_read_result { Ok(monitors) => monitors, Err(e) => { if e.kind() == lightning::io::ErrorKind::NotFound { @@ -1284,6 +1288,16 @@ fn build_with_store_internal( }, }; + let persister = Arc::new(Persister::new( + Arc::clone(&kv_store), + Arc::clone(&logger), + PERSISTER_MAX_PENDING_UPDATES, + Arc::clone(&keys_manager), + Arc::clone(&keys_manager), + Arc::clone(&tx_broadcaster), + Arc::clone(&fee_estimator), + )); + // Initialize the ChainMonitor let chain_monitor: Arc = Arc::new(chainmonitor::ChainMonitor::new( Some(Arc::clone(&chain_source)), diff --git a/src/types.rs b/src/types.rs index 96b9a9866..614efd90e 100644 --- a/src/types.rs +++ b/src/types.rs @@ -23,7 +23,9 @@ use lightning::routing::gossip; use lightning::routing::router::DefaultRouter; use lightning::routing::scoring::{CombinedScorer, ProbabilisticScoringFeeParameters}; use lightning::sign::InMemorySigner; -use lightning::util::persist::{KVStore, KVStoreSync, MonitorUpdatingPersister}; +use lightning::util::persist::{ + KVStore, KVStoreSync, MonitorUpdatingPersister, MonitorUpdatingPersisterAsync, +}; use lightning::util::ser::{Readable, Writeable, Writer}; use lightning::util::sweep::OutputSweeper; use lightning_block_sync::gossip::GossipVerifier; @@ -185,6 +187,16 @@ impl DynStoreTrait for DynStoreWrapper } } +pub(crate) type AsyncPersister = MonitorUpdatingPersisterAsync< + Arc, + RuntimeSpawner, + Arc, + Arc, + Arc, + Arc, + Arc, +>; + pub type Persister = MonitorUpdatingPersister< Arc, Arc, From 78842ad3ac12c4d5be0ff46361b411cad7199088 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 8 Jan 2026 19:51:14 +0000 Subject: [PATCH 33/75] Parallelize store reads in init Since I was editing the init logic anyway I couldn't resist going ahead and parallelizing various read calls. Since we added support for an async `KVStore` in LDK 0.2/ldk-node 0.7, we can now practically do initialization reads in parallel. Thus, rather than making a long series of read calls in `build`, we use `tokio::join` to reduce the number of round-trips to our backing store, which should be a very large win for initialization cost on those using remote storage (e.g. VSS). Sadly we can't trivially do all our reads in one go, we need the payment history to initialize the BDK wallet, which is used in the `Walet` object which is referenced in our `KeysManager`. Thus we first read the payment store and node metrics before moving on. Then, we need a reference to the `NetworkGraph` when we build the scorer. While we could/eventually should move to reading the *bytes* for the scorer while reading the graph and only building the scorer later, that's a larger refactor we leave for later. In the end, we end up with: * 1 round-trip to load the payment history and node metrics, * 2 round-trips to load ChannelMonitors and NetworkGraph (where there's an internal extra round-trip after listing the monitor updates for a monitor), * 1 round-trip to validate bitcoind RPC/REST access for those using bitcoind as a chain source, * 1 round-trip to load various smaller LDK and ldk-node objects, * and 1 additional round-trip to drop the rgs snapshot timestamp for nodes using P2P network gossip syncing for a total of 4 round-trips in the common case and 6 for nodes using less common chain source and gossip sync sources. We then have additional round-trips to our storage and chain source during node start, but those are in many cases already async. --- src/builder.rs | 135 +++++++++++++++++++++++++++++-------------------- 1 file changed, 80 insertions(+), 55 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 6348c26ba..2046956e0 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -33,7 +33,7 @@ use lightning::routing::scoring::{ }; use lightning::sign::{EntropySource, NodeSigner}; use lightning::util::persist::{ - KVStoreSync, CHANNEL_MANAGER_PERSISTENCE_KEY, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + KVStore, CHANNEL_MANAGER_PERSISTENCE_KEY, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, }; use lightning::util::ser::ReadableArgs; @@ -1052,10 +1052,20 @@ fn build_with_store_internal( } } + let tx_broadcaster = Arc::new(TransactionBroadcaster::new(Arc::clone(&logger))); + let fee_estimator = Arc::new(OnchainFeeEstimator::new()); + + let kv_store_ref = Arc::clone(&kv_store); + let logger_ref = Arc::clone(&logger); + let (payment_store_res, node_metris_res) = runtime.block_on(async move { + tokio::join!( + read_payments(&*kv_store_ref, Arc::clone(&logger_ref)), + read_node_metrics(&*kv_store_ref, Arc::clone(&logger_ref)), + ) + }); + // Initialize the status fields. - let node_metrics = match runtime - .block_on(async { read_node_metrics(&*kv_store, Arc::clone(&logger)).await }) - { + let node_metrics = match node_metris_res { Ok(metrics) => Arc::new(RwLock::new(metrics)), Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { @@ -1066,23 +1076,20 @@ fn build_with_store_internal( } }, }; - let tx_broadcaster = Arc::new(TransactionBroadcaster::new(Arc::clone(&logger))); - let fee_estimator = Arc::new(OnchainFeeEstimator::new()); - let payment_store = - match runtime.block_on(async { read_payments(&*kv_store, Arc::clone(&logger)).await }) { - Ok(payments) => Arc::new(PaymentStore::new( - payments, - PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE.to_string(), - PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE.to_string(), - Arc::clone(&kv_store), - Arc::clone(&logger), - )), - Err(e) => { - log_error!(logger, "Failed to read payment data from store: {}", e); - return Err(BuildError::ReadFailed); - }, - }; + let payment_store = match payment_store_res { + Ok(payments) => Arc::new(PaymentStore::new( + payments, + PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE.to_string(), + PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE.to_string(), + Arc::clone(&kv_store), + Arc::clone(&logger), + )), + Err(e) => { + log_error!(logger, "Failed to read payment data from store: {}", e); + return Err(BuildError::ReadFailed); + }, + }; let (chain_source, chain_tip_opt) = match chain_data_source_config { Some(ChainDataSourceConfig::Esplora { server_url, headers, sync_config }) => { @@ -1273,10 +1280,18 @@ fn build_with_store_internal( Arc::clone(&fee_estimator), )); + // Read ChannelMonitors and the NetworkGraph + let kv_store_ref = Arc::clone(&kv_store); + let logger_ref = Arc::clone(&logger); + let (monitor_read_res, network_graph_res) = runtime.block_on(async move { + tokio::join!( + monitor_reader.read_all_channel_monitors_with_updates_parallel(), + read_network_graph(&*kv_store_ref, logger_ref), + ) + }); + // Read ChannelMonitor state from store - let monitor_read_result = - runtime.block_on(monitor_reader.read_all_channel_monitors_with_updates_parallel()); - let channel_monitors = match monitor_read_result { + let channel_monitors = match monitor_read_res { Ok(monitors) => monitors, Err(e) => { if e.kind() == lightning::io::ErrorKind::NotFound { @@ -1310,9 +1325,7 @@ fn build_with_store_internal( )); // Initialize the network graph, scorer, and router - let network_graph = match runtime - .block_on(async { read_network_graph(&*kv_store, Arc::clone(&logger)).await }) - { + let network_graph = match network_graph_res { Ok(graph) => Arc::new(graph), Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { @@ -1324,9 +1337,42 @@ fn build_with_store_internal( }, }; - let local_scorer = match runtime.block_on(async { - read_scorer(&*kv_store, Arc::clone(&network_graph), Arc::clone(&logger)).await - }) { + // Read various smaller LDK and ldk-node objects from the store + let kv_store_ref = Arc::clone(&kv_store); + let logger_ref = Arc::clone(&logger); + let network_graph_ref = Arc::clone(&network_graph); + let output_sweeper_future = read_output_sweeper( + Arc::clone(&tx_broadcaster), + Arc::clone(&fee_estimator), + Arc::clone(&chain_source), + Arc::clone(&keys_manager), + Arc::clone(&kv_store_ref), + Arc::clone(&logger_ref), + ); + let ( + scorer_res, + external_scores_res, + channel_manager_bytes_res, + sweeper_bytes_res, + event_queue_res, + peer_info_res, + ) = runtime.block_on(async move { + tokio::join!( + read_scorer(&*kv_store_ref, network_graph_ref, Arc::clone(&logger_ref)), + read_external_pathfinding_scores_from_cache(&*kv_store_ref, Arc::clone(&logger_ref)), + KVStore::read( + &*kv_store_ref, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ), + output_sweeper_future, + read_event_queue(Arc::clone(&kv_store_ref), Arc::clone(&logger_ref)), + read_peer_info(Arc::clone(&kv_store_ref), Arc::clone(&logger_ref)), + ) + }); + + let local_scorer = match scorer_res { Ok(scorer) => scorer, Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { @@ -1342,9 +1388,7 @@ fn build_with_store_internal( let scorer = Arc::new(Mutex::new(CombinedScorer::new(local_scorer))); // Restore external pathfinding scores from cache if possible. - match runtime.block_on(async { - read_external_pathfinding_scores_from_cache(&*kv_store, Arc::clone(&logger)).await - }) { + match external_scores_res { Ok(external_scores) => { scorer.lock().unwrap().merge(external_scores, cur_time); log_trace!(logger, "External scores from cache merged successfully"); @@ -1397,12 +1441,7 @@ fn build_with_store_internal( // Initialize the ChannelManager let channel_manager = { - if let Ok(reader) = KVStoreSync::read( - &*kv_store, - CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_KEY, - ) { + if let Ok(reader) = channel_manager_bytes_res { let channel_monitor_references = channel_monitors.iter().map(|(_, chanmon)| chanmon).collect(); let read_args = ChannelManagerReadArgs::new( @@ -1627,17 +1666,7 @@ fn build_with_store_internal( let connection_manager = Arc::new(ConnectionManager::new(Arc::clone(&peer_manager), Arc::clone(&logger))); - let output_sweeper = match runtime.block_on(async { - read_output_sweeper( - Arc::clone(&tx_broadcaster), - Arc::clone(&fee_estimator), - Arc::clone(&chain_source), - Arc::clone(&keys_manager), - Arc::clone(&kv_store), - Arc::clone(&logger), - ) - .await - }) { + let output_sweeper = match sweeper_bytes_res { Ok(output_sweeper) => Arc::new(output_sweeper), Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { @@ -1658,9 +1687,7 @@ fn build_with_store_internal( }, }; - let event_queue = match runtime - .block_on(async { read_event_queue(Arc::clone(&kv_store), Arc::clone(&logger)).await }) - { + let event_queue = match event_queue_res { Ok(event_queue) => Arc::new(event_queue), Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { @@ -1672,9 +1699,7 @@ fn build_with_store_internal( }, }; - let peer_store = match runtime - .block_on(async { read_peer_info(Arc::clone(&kv_store), Arc::clone(&logger)).await }) - { + let peer_store = match peer_info_res { Ok(peer_store) => Arc::new(peer_store), Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { From f0447e1bee03abe3a12de78106a042d5923fc796 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 14 Jan 2026 12:48:07 +0100 Subject: [PATCH 34/75] Fix typos in comments and documentation - Fix "the the" -> "the" in event.rs - Fix "to to" -> "to" in builder.rs, ffi/types.rs, and migrations.rs - Fix "openend" -> "opened" in ffi/types.rs - Fix "unnannounced" -> "unannounced" in builder.rs Co-Authored-By: HAL 9000 --- src/builder.rs | 4 ++-- src/event.rs | 2 +- src/ffi/types.rs | 4 ++-- src/io/sqlite_store/migrations.rs | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 08ac123fa..0c8df2aa3 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1352,11 +1352,11 @@ fn build_with_store_internal( let mut user_config = default_user_config(&config); if liquidity_source_config.and_then(|lsc| lsc.lsps2_service.as_ref()).is_some() { - // If we act as an LSPS2 service, we need to to be able to intercept HTLCs and forward the + // If we act as an LSPS2 service, we need to be able to intercept HTLCs and forward the // information to the service handler. user_config.accept_intercept_htlcs = true; - // If we act as an LSPS2 service, we allow forwarding to unnannounced channels. + // If we act as an LSPS2 service, we allow forwarding to unannounced channels. user_config.accept_forwards_to_priv_channels = true; // If we act as an LSPS2 service, set the HTLC-value-in-flight to 100% of the channel value diff --git a/src/event.rs b/src/event.rs index 75270bf53..6f0ed8e09 100644 --- a/src/event.rs +++ b/src/event.rs @@ -165,7 +165,7 @@ pub enum Event { /// /// This needs to be manually claimed by supplying the correct preimage to [`claim_for_hash`]. /// - /// If the the provided parameters don't match the expectations or the preimage can't be + /// If the provided parameters don't match the expectations or the preimage can't be /// retrieved in time, should be failed-back via [`fail_for_hash`]. /// /// Note claiming will necessarily fail after the `claim_deadline` has been reached. diff --git a/src/ffi/types.rs b/src/ffi/types.rs index a5ff8372f..033f0387d 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -1169,9 +1169,9 @@ pub struct LSPS1OnchainPaymentInfo { pub expires_at: LSPSDateTime, /// The total fee the LSP will charge to open this channel in satoshi. pub fee_total_sat: u64, - /// The amount the client needs to pay to have the requested channel openend. + /// The amount the client needs to pay to have the requested channel opened. pub order_total_sat: u64, - /// An on-chain address the client can send [`Self::order_total_sat`] to to have the channel + /// An on-chain address the client can send [`Self::order_total_sat`] to have the channel /// opened. pub address: bitcoin::Address, /// The minimum number of block confirmations that are required for the on-chain payment to be diff --git a/src/io/sqlite_store/migrations.rs b/src/io/sqlite_store/migrations.rs index abfbdf6ef..ea809be08 100644 --- a/src/io/sqlite_store/migrations.rs +++ b/src/io/sqlite_store/migrations.rs @@ -124,7 +124,7 @@ mod tests { connection.execute(&sql, []).unwrap(); - // We write some data to to the table + // We write some data to the table let sql = format!( "INSERT OR REPLACE INTO {} (namespace, key, value) VALUES (:namespace, :key, :value);", kv_table_name From 7bb147fc6e2dff66e4ce5b329625ab6af2488c33 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 1 Jan 2026 00:15:34 +0000 Subject: [PATCH 35/75] Switch from `reqwest` to `bitreq` `reqwest` is one of the largest contributors of code size and dependencies (including single-author dependencies) to much of the rust-bitcoin ecosystem, including ldk-node. Thus, Tobin took the time to (ask an LLM to) fork `minreq` and add async support to it, including async `rustls` support. As its now a functional HTTP(s) client, its time for the ecosystem to start switching over. Luckily, its ~trivial to do. --- Cargo.toml | 2 +- src/config.rs | 8 ++++++++ src/gossip.rs | 36 ++++++++++++------------------------ src/scoring.rs | 40 +++++++++++++++++----------------------- 4 files changed, 38 insertions(+), 48 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e0ace123f..9881b5308 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,7 +56,7 @@ bdk_esplora = { version = "0.22.0", default-features = false, features = ["async bdk_electrum = { version = "0.23.0", default-features = false, features = ["use-rustls-ring"]} bdk_wallet = { version = "2.2.0", default-features = false, features = ["std", "keys-bip39"]} -reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] } +bitreq = { version = "0.3", default-features = false, features = ["async-https"] } rustls = { version = "0.23", default-features = false } rusqlite = { version = "0.31.0", features = ["bundled"] } bitcoin = "0.32.7" diff --git a/src/config.rs b/src/config.rs index 1b71d0d4e..6c9d1640a 100644 --- a/src/config.rs +++ b/src/config.rs @@ -87,12 +87,20 @@ pub(crate) const FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS: u64 = 5; // The timeout after which we abort a transaction broadcast operation. pub(crate) const TX_BROADCAST_TIMEOUT_SECS: u64 = 5; +// The maximum encoded size of an RGS snapshot we'll accept. +// In practice the maximum we see is around 4MiB. +pub(crate) const RGS_SNAPSHOT_MAX_SIZE: usize = 15 * 1024 * 1024; + // The timeout after which we abort a RGS sync operation. pub(crate) const RGS_SYNC_TIMEOUT_SECS: u64 = 5; /// The length in bytes of our wallets' keys seed. pub const WALLET_KEYS_SEED_LEN: usize = 64; +// The maximum encoded size of external scores we'll accept. +// In practice we see scores files in the 5MiB range. +pub(crate) const EXTERNAL_PATHFINDING_SCORES_MAX_SIZE: usize = 20 * 1024 * 1024; + // The timeout after which we abort a external scores sync operation. pub(crate) const EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS: u64 = 5; diff --git a/src/gossip.rs b/src/gossip.rs index f42b4602c..4ef280273 100644 --- a/src/gossip.rs +++ b/src/gossip.rs @@ -7,13 +7,12 @@ use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::Arc; -use std::time::Duration; use lightning_block_sync::gossip::GossipVerifier; use crate::chain::ChainSource; -use crate::config::RGS_SYNC_TIMEOUT_SECS; -use crate::logger::{log_trace, LdkLogger, Logger}; +use crate::config::{RGS_SNAPSHOT_MAX_SIZE, RGS_SYNC_TIMEOUT_SECS}; +use crate::logger::{log_error, log_trace, LdkLogger, Logger}; use crate::runtime::{Runtime, RuntimeSpawner}; use crate::types::{GossipSync, Graph, P2PGossipSync, RapidGossipSync}; use crate::Error; @@ -70,29 +69,18 @@ impl GossipSource { let query_timestamp = latest_sync_timestamp.load(Ordering::Acquire); let query_url = format!("{}/{}", server_url, query_timestamp); - let response = tokio::time::timeout( - Duration::from_secs(RGS_SYNC_TIMEOUT_SECS), - reqwest::get(query_url), - ) - .await - .map_err(|e| { - log_trace!(logger, "Retrieving RGS gossip update timed out: {}", e); + let query = bitreq::get(query_url) + .with_max_body_size(Some(RGS_SNAPSHOT_MAX_SIZE)) + .with_timeout(RGS_SYNC_TIMEOUT_SECS); + let response = query.send_async().await.map_err(|e| { + log_error!(logger, "Failed to retrieve RGS gossip update: {e}"); Error::GossipUpdateTimeout - })? - .map_err(|e| { - log_trace!(logger, "Failed to retrieve RGS gossip update: {}", e); - Error::GossipUpdateFailed })?; - match response.error_for_status() { - Ok(res) => { - let update_data = res.bytes().await.map_err(|e| { - log_trace!(logger, "Failed to retrieve RGS gossip update: {}", e); - Error::GossipUpdateFailed - })?; - + match response.status_code { + 200 => { let new_latest_sync_timestamp = - gossip_sync.update_network_graph(&update_data).map_err(|e| { + gossip_sync.update_network_graph(response.as_bytes()).map_err(|e| { log_trace!( logger, "Failed to update network graph with RGS data: {:?}", @@ -103,8 +91,8 @@ impl GossipSource { latest_sync_timestamp.store(new_latest_sync_timestamp, Ordering::Release); Ok(new_latest_sync_timestamp) }, - Err(e) => { - log_trace!(logger, "Failed to retrieve RGS gossip update: {}", e); + code => { + log_trace!(logger, "Failed to retrieve RGS gossip update: HTTP {}", code); Err(Error::GossipUpdateFailed) }, } diff --git a/src/scoring.rs b/src/scoring.rs index 2e0d226ff..3ed7b9d1e 100644 --- a/src/scoring.rs +++ b/src/scoring.rs @@ -1,12 +1,13 @@ use std::sync::{Arc, Mutex, RwLock}; -use std::time::{Duration, SystemTime}; +use std::time::SystemTime; use lightning::routing::scoring::ChannelLiquidities; use lightning::util::ser::Readable; use lightning::{log_error, log_info, log_trace}; use crate::config::{ - EXTERNAL_PATHFINDING_SCORES_SYNC_INTERVAL, EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS, + EXTERNAL_PATHFINDING_SCORES_MAX_SIZE, EXTERNAL_PATHFINDING_SCORES_SYNC_INTERVAL, + EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS, }; use crate::io::utils::write_external_pathfinding_scores_to_cache; use crate::logger::LdkLogger; @@ -53,34 +54,27 @@ async fn sync_external_scores( logger: &Logger, scorer: &Mutex, node_metrics: &RwLock, kv_store: Arc, url: &String, ) -> () { - let response = tokio::time::timeout( - Duration::from_secs(EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS), - reqwest::get(url), - ) - .await; + let request = bitreq::get(url) + .with_timeout(EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS) + .with_max_body_size(Some(EXTERNAL_PATHFINDING_SCORES_MAX_SIZE)); - let response = match response { - Ok(resp) => resp, - Err(e) => { - log_error!(logger, "Retrieving external scores timed out: {}", e); - return; - }, - }; - let response = match response { + let response = match request.send_async().await { Ok(resp) => resp, Err(e) => { log_error!(logger, "Failed to retrieve external scores update: {}", e); return; }, }; - let reader = match response.bytes().await { - Ok(bytes) => bytes, - Err(e) => { - log_error!(logger, "Failed to read external scores update: {}", e); - return; - }, - }; - match ChannelLiquidities::read(&mut &*reader) { + if response.status_code != 200 { + log_error!( + logger, + "Failed to retrieve external scores update: HTTP {}", + response.status_code + ); + return; + } + let mut reader = response.as_bytes(); + match ChannelLiquidities::read(&mut reader) { Ok(liquidities) => { if let Err(e) = write_external_pathfinding_scores_to_cache(&*kv_store, &liquidities, logger).await From 6b6e523b9d773019a95bb2275211e1549724f19d Mon Sep 17 00:00:00 2001 From: Fmt Bot Date: Sun, 18 Jan 2026 01:43:45 +0000 Subject: [PATCH 36/75] 2026-01-18 automated rustfmt nightly --- src/logger.rs | 3 ++- src/runtime.rs | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/logger.rs b/src/logger.rs index e38a5f1fc..f2b53a1dc 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -304,9 +304,10 @@ impl LdkLogger for Logger { #[cfg(test)] mod tests { - use super::*; use std::sync::Mutex; + use super::*; + /// A minimal log facade logger that captures log output for testing. struct TestLogger { log: Arc>, diff --git a/src/runtime.rs b/src/runtime.rs index f43cbb9f0..39a34ddfe 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -10,7 +10,6 @@ use std::sync::{Arc, Mutex}; use std::time::Duration; use lightning::util::native_async::FutureSpawner; - use tokio::task::{JoinHandle, JoinSet}; use crate::config::{ From 351b5c6c5f7a47fe4368b72f3e5a1fec8107dda8 Mon Sep 17 00:00:00 2001 From: Leo Nash Date: Mon, 19 Jan 2026 04:35:13 +0000 Subject: [PATCH 37/75] Move most logs in the chain module to the debug level When the node is idle, the info level was quite spammy. It is now quiet. --- src/chain/bitcoind.rs | 6 +++--- src/chain/electrum.rs | 12 ++++++------ src/chain/esplora.rs | 12 ++++++------ 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index b0cf69395..1c8cf16ba 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -38,7 +38,7 @@ use crate::fee_estimator::{ ConfirmationTarget, OnchainFeeEstimator, }; use crate::io::utils::write_node_metrics; -use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::logger::{log_bytes, log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; @@ -364,7 +364,7 @@ impl BitcoindChainSource { }; if let Some(mut sync_receiver) = receiver_res { - log_info!(self.logger, "Sync in progress, skipping."); + log_debug!(self.logger, "Sync in progress, skipping."); return sync_receiver.recv().await.map_err(|e| { debug_assert!(false, "Failed to receive wallet polling result: {:?}", e); log_error!(self.logger, "Failed to receive wallet polling result: {:?}", e); @@ -558,7 +558,7 @@ impl BitcoindChainSource { if self.fee_estimator.set_fee_rate_cache(new_fee_rate_cache) { // We only log if the values changed, as it might be very spammy otherwise. - log_info!( + log_debug!( self.logger, "Fee rate cache update finished in {}ms.", now.elapsed().as_millis() diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index df96dfddb..21e66f3a6 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -34,7 +34,7 @@ use crate::fee_estimator::{ ConfirmationTarget, OnchainFeeEstimator, }; use crate::io::utils::write_node_metrics; -use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::logger::{log_bytes, log_debug, log_error, log_trace, LdkLogger, Logger}; use crate::runtime::Runtime; use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::NodeMetrics; @@ -100,7 +100,7 @@ impl ElectrumChainSource { status_lock.register_or_subscribe_pending_sync() }; if let Some(mut sync_receiver) = receiver_res { - log_info!(self.logger, "Sync in progress, skipping."); + log_debug!(self.logger, "Sync in progress, skipping."); return sync_receiver.recv().await.map_err(|e| { debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); @@ -135,7 +135,7 @@ impl ElectrumChainSource { |update_res: Result, now: Instant| match update_res { Ok(update) => match onchain_wallet.apply_update(update) { Ok(()) => { - log_info!( + log_debug!( self.logger, "{} of on-chain wallet finished in {}ms.", if incremental_sync { "Incremental sync" } else { "Sync" }, @@ -191,7 +191,7 @@ impl ElectrumChainSource { status_lock.register_or_subscribe_pending_sync() }; if let Some(mut sync_receiver) = receiver_res { - log_info!(self.logger, "Sync in progress, skipping."); + log_debug!(self.logger, "Sync in progress, skipping."); return sync_receiver.recv().await.map_err(|e| { debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); @@ -261,7 +261,7 @@ impl ElectrumChainSource { let new_fee_rate_cache = electrum_client.get_fee_rate_cache_update().await?; self.fee_estimator.set_fee_rate_cache(new_fee_rate_cache); - log_info!( + log_debug!( self.logger, "Fee rate cache update finished in {}ms.", now.elapsed().as_millis() @@ -437,7 +437,7 @@ impl ElectrumRuntimeClient { Error::TxSyncFailed })?; - log_info!( + log_debug!( self.logger, "Sync of Lightning wallet finished in {}ms.", now.elapsed().as_millis() diff --git a/src/chain/esplora.rs b/src/chain/esplora.rs index 4d9f051cf..8ab941888 100644 --- a/src/chain/esplora.rs +++ b/src/chain/esplora.rs @@ -27,7 +27,7 @@ use crate::fee_estimator::{ OnchainFeeEstimator, }; use crate::io::utils::write_node_metrics; -use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::logger::{log_bytes, log_debug, log_error, log_trace, LdkLogger, Logger}; use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; @@ -85,7 +85,7 @@ impl EsploraChainSource { status_lock.register_or_subscribe_pending_sync() }; if let Some(mut sync_receiver) = receiver_res { - log_info!(self.logger, "Sync in progress, skipping."); + log_debug!(self.logger, "Sync in progress, skipping."); return sync_receiver.recv().await.map_err(|e| { debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); @@ -113,7 +113,7 @@ impl EsploraChainSource { Ok(res) => match res { Ok(update) => match onchain_wallet.apply_update(update) { Ok(()) => { - log_info!( + log_debug!( self.logger, "{} of on-chain wallet finished in {}ms.", if incremental_sync { "Incremental sync" } else { "Sync" }, @@ -210,7 +210,7 @@ impl EsploraChainSource { status_lock.register_or_subscribe_pending_sync() }; if let Some(mut sync_receiver) = receiver_res { - log_info!(self.logger, "Sync in progress, skipping."); + log_debug!(self.logger, "Sync in progress, skipping."); return sync_receiver.recv().await.map_err(|e| { debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); @@ -247,7 +247,7 @@ impl EsploraChainSource { match timeout_fut.await { Ok(res) => match res { Ok(()) => { - log_info!( + log_debug!( self.logger, "Sync of Lightning wallet finished in {}ms.", now.elapsed().as_millis() @@ -331,7 +331,7 @@ impl EsploraChainSource { self.fee_estimator.set_fee_rate_cache(new_fee_rate_cache); - log_info!( + log_debug!( self.logger, "Fee rate cache update finished in {}ms.", now.elapsed().as_millis() From 6328e1b1a01a238729a88a879e19e7f0d6b5f840 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 21 Jan 2026 10:54:25 +0100 Subject: [PATCH 38/75] Add commented-out `patch` section for LDK Git dependencies For convenience we added ready-to-go `patch` setions to `Cargo.toml`, which however only allowed us to quickly override official LDK releases. Here we also add the corrsponding counterparts for LDK `git` dependencies. --- Cargo.toml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 9881b5308..a602340c3 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -168,3 +168,16 @@ harness = false #vss-client-ng = { path = "../vss-client" } #vss-client-ng = { git = "https://github.com/lightningdevkit/vss-client", branch = "main" } +# +#[patch."https://github.com/lightningdevkit/rust-lightning"] +#lightning = { path = "../rust-lightning/lightning" } +#lightning-types = { path = "../rust-lightning/lightning-types" } +#lightning-invoice = { path = "../rust-lightning/lightning-invoice" } +#lightning-net-tokio = { path = "../rust-lightning/lightning-net-tokio" } +#lightning-persister = { path = "../rust-lightning/lightning-persister" } +#lightning-background-processor = { path = "../rust-lightning/lightning-background-processor" } +#lightning-rapid-gossip-sync = { path = "../rust-lightning/lightning-rapid-gossip-sync" } +#lightning-block-sync = { path = "../rust-lightning/lightning-block-sync" } +#lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync" } +#lightning-liquidity = { path = "../rust-lightning/lightning-liquidity" } +#lightning-macros = { path = "../rust-lightning/lightning-macros" } From 77a7c26534136170c6ee776d2281fcc3dbd38e90 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Tue, 20 Jan 2026 17:16:26 -0600 Subject: [PATCH 39/75] Bump LDK dependency for minor splicing API changes --- Cargo.toml | 26 +++++++++++++------------- src/lib.rs | 18 ++++++++---------- 2 files changed, 21 insertions(+), 23 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9881b5308..f99b164a2 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,17 +39,17 @@ default = [] #lightning-liquidity = { version = "0.2.0", features = ["std"] } #lightning-macros = { version = "0.2.0" } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256", features = ["std"] } -lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256" } -lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256", features = ["std"] } -lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256" } -lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256", features = ["tokio"] } -lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256" } -lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256" } -lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } -lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256", features = ["std"] } -lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256" } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5", features = ["std"] } +lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5" } +lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5", features = ["std"] } +lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5", features = ["tokio"] } +lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5" } +lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5" } +lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } +lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5", features = ["std"] } +lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5" } bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} @@ -78,13 +78,13 @@ log = { version = "0.4.22", default-features = false, features = ["std"]} vss-client = { package = "vss-client-ng", version = "0.4" } prost = { version = "0.11.6", default-features = false} #bitcoin-payment-instructions = { version = "0.6" } -bitcoin-payment-instructions = { git = "https://github.com/tnull/bitcoin-payment-instructions", rev = "ce9ff5281ae9bb05526981f6f9df8f8d929c7c44" } +bitcoin-payment-instructions = { git = "https://github.com/tnull/bitcoin-payment-instructions", rev = "6796e87525d6c564e1332354a808730e2ba2ebf8" } [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "c9f022bcccb33964604159e6bdb4722020b4d256", features = ["std", "_test_utils"] } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" criterion = { version = "0.7.0", features = ["async_tokio"] } diff --git a/src/lib.rs b/src/lib.rs index 405e26779..d2222d949 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1335,11 +1335,11 @@ impl Node { let change_address = self.wallet.get_new_internal_address()?; - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_amount_sats), + let contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_amount_sats), inputs, - change_script: Some(change_address.script_pubkey()), - }; + Some(change_address.script_pubkey()), + ); let funding_feerate_per_kw: u32 = match fee_rate.to_sat_per_kwu().try_into() { Ok(fee_rate) => fee_rate, @@ -1411,12 +1411,10 @@ impl Node { self.wallet.parse_and_validate_address(address)?; - let contribution = SpliceContribution::SpliceOut { - outputs: vec![bitcoin::TxOut { - value: Amount::from_sat(splice_amount_sats), - script_pubkey: address.script_pubkey(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![bitcoin::TxOut { + value: Amount::from_sat(splice_amount_sats), + script_pubkey: address.script_pubkey(), + }]); let fee_rate = self.fee_estimator.estimate_fee_rate(ConfirmationTarget::ChannelFunding); let funding_feerate_per_kw: u32 = match fee_rate.to_sat_per_kwu().try_into() { From 69cd02d631c06cf274ed9745347e96388c79c3a9 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Thu, 22 Jan 2026 14:19:24 -0500 Subject: [PATCH 40/75] Update to latest rust-lightning UtxoSource trait API --- src/chain/bitcoind.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index 1c8cf16ba..d9f43ee17 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -626,13 +626,6 @@ pub(crate) enum UtxoSourceClient { Rest(Arc), } -impl std::ops::Deref for UtxoSourceClient { - type Target = Self; - fn deref(&self) -> &Self { - self - } -} - impl BlockSource for UtxoSourceClient { fn get_header<'a>( &'a self, header_hash: &'a BlockHash, height_hint: Option, From dbc9edd963631f9c0533038361a274fffa98d0dc Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 23 Jan 2026 09:36:07 +0100 Subject: [PATCH 41/75] Drop unnecessary Rust install in SemVer CI In the last few days there was incompatibility of `cargo-semver-checks` with the new stable Rust 1.93.0. While this should fixed by today's release of `cargo-semver-checks`, we take the opportunity to drop an unnecessary install step from the CI workflow, as the action will bring their own Rust version if not configured otherwise. --- .github/workflows/semver.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/semver.yml b/.github/workflows/semver.yml index 8472cbd2a..2a3b14ef8 100644 --- a/.github/workflows/semver.yml +++ b/.github/workflows/semver.yml @@ -7,9 +7,5 @@ jobs: steps: - name: Checkout source code uses: actions/checkout@v4 - - name: Install Rust stable toolchain - run: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain stable - rustup override set stable - name: Check SemVer uses: obi1kenobi/cargo-semver-checks-action@v2 From f9834f3a8100a7734e83c14f1829132b5b0e60df Mon Sep 17 00:00:00 2001 From: Camillarhi Date: Tue, 9 Dec 2025 19:26:51 +0100 Subject: [PATCH 42/75] refactor: Extract payment creation logic into `create_payment_from_tx` --- src/wallet/mod.rs | 81 ++++++++++++++++++++++++++--------------------- 1 file changed, 45 insertions(+), 36 deletions(-) diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index 5fd7b3d8e..0540c5a0a 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -192,45 +192,14 @@ impl Wallet { (PaymentStatus::Pending, ConfirmationStatus::Unconfirmed) }, }; - // TODO: It would be great to introduce additional variants for - // `ChannelFunding` and `ChannelClosing`. For the former, we could just - // take a reference to `ChannelManager` here and check against - // `list_channels`. But for the latter the best approach is much less - // clear: for force-closes/HTLC spends we should be good querying - // `OutputSweeper::tracked_spendable_outputs`, but regular channel closes - // (i.e., `SpendableOutputDescriptor::StaticOutput` variants) are directly - // spent to a wallet address. The only solution I can come up with is to - // create and persist a list of 'static pending outputs' that we could use - // here to determine the `PaymentKind`, but that's not really satisfactory, so - // we're punting on it until we can come up with a better solution. - let kind = crate::payment::PaymentKind::Onchain { txid, status: confirmation_status }; - let fee = locked_wallet.calculate_fee(&wtx.tx_node.tx).unwrap_or(Amount::ZERO); - let (sent, received) = locked_wallet.sent_and_received(&wtx.tx_node.tx); - let (direction, amount_msat) = if sent > received { - let direction = PaymentDirection::Outbound; - let amount_msat = Some( - sent.to_sat().saturating_sub(fee.to_sat()).saturating_sub(received.to_sat()) - * 1000, - ); - (direction, amount_msat) - } else { - let direction = PaymentDirection::Inbound; - let amount_msat = Some( - received.to_sat().saturating_sub(sent.to_sat().saturating_sub(fee.to_sat())) - * 1000, - ); - (direction, amount_msat) - }; - let fee_paid_msat = Some(fee.to_sat() * 1000); - - let payment = PaymentDetails::new( + let payment = self.create_payment_from_tx( + locked_wallet, + txid, id, - kind, - amount_msat, - fee_paid_msat, - direction, + &wtx.tx_node.tx, payment_status, + confirmation_status, ); self.payment_store.insert_or_update(payment)?; @@ -806,6 +775,46 @@ impl Wallet { Ok(tx) } + + fn create_payment_from_tx( + &self, locked_wallet: &PersistedWallet, txid: Txid, + payment_id: PaymentId, tx: &Transaction, payment_status: PaymentStatus, + confirmation_status: ConfirmationStatus, + ) -> PaymentDetails { + // TODO: It would be great to introduce additional variants for + // `ChannelFunding` and `ChannelClosing`. For the former, we could just + // take a reference to `ChannelManager` here and check against + // `list_channels`. But for the latter the best approach is much less + // clear: for force-closes/HTLC spends we should be good querying + // `OutputSweeper::tracked_spendable_outputs`, but regular channel closes + // (i.e., `SpendableOutputDescriptor::StaticOutput` variants) are directly + // spent to a wallet address. The only solution I can come up with is to + // create and persist a list of 'static pending outputs' that we could use + // here to determine the `PaymentKind`, but that's not really satisfactory, so + // we're punting on it until we can come up with a better solution. + + let kind = crate::payment::PaymentKind::Onchain { txid, status: confirmation_status }; + + let fee = locked_wallet.calculate_fee(tx).unwrap_or(Amount::ZERO); + let (sent, received) = locked_wallet.sent_and_received(tx); + let (direction, amount_msat) = if sent > received { + let direction = PaymentDirection::Outbound; + let amount_msat = Some( + sent.to_sat().saturating_sub(fee.to_sat()).saturating_sub(received.to_sat()) * 1000, + ); + (direction, amount_msat) + } else { + let direction = PaymentDirection::Inbound; + let amount_msat = Some( + received.to_sat().saturating_sub(sent.to_sat().saturating_sub(fee.to_sat())) * 1000, + ); + (direction, amount_msat) + }; + + let fee_paid_msat = Some(fee.to_sat() * 1000); + + PaymentDetails::new(payment_id, kind, amount_msat, fee_paid_msat, direction, payment_status) + } } impl Listen for Wallet { From 9c081ce97a3ed15c523f5be1e459a42d6cd6d2ca Mon Sep 17 00:00:00 2001 From: Camillarhi Date: Tue, 9 Dec 2025 15:44:43 +0100 Subject: [PATCH 43/75] Bump BDK_Wallet to 2.3.0 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index f99b164a2..04f9e2c9e 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -54,7 +54,7 @@ lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} bdk_electrum = { version = "0.23.0", default-features = false, features = ["use-rustls-ring"]} -bdk_wallet = { version = "2.2.0", default-features = false, features = ["std", "keys-bip39"]} +bdk_wallet = { version = "2.3.0", default-features = false, features = ["std", "keys-bip39"]} bitreq = { version = "0.3", default-features = false, features = ["async-https"] } rustls = { version = "0.23", default-features = false } From 08cbd8b7a3d959d81520f4dafac3d071b2ecb0ce Mon Sep 17 00:00:00 2001 From: Camillarhi Date: Mon, 26 Jan 2026 14:04:59 +0100 Subject: [PATCH 44/75] Add `contains_key` method to DataStore --- src/data_store.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/data_store.rs b/src/data_store.rs index d295ece51..ff09d9902 100644 --- a/src/data_store.rs +++ b/src/data_store.rs @@ -167,6 +167,10 @@ where })?; Ok(()) } + + pub(crate) fn contains_key(&self, id: &SO::Id) -> bool { + self.objects.lock().unwrap().contains_key(id) + } } #[cfg(test)] From 17f80ba739c19ebb8e729200dd01cb3c7afabb9a Mon Sep 17 00:00:00 2001 From: Camillarhi Date: Mon, 26 Jan 2026 14:14:59 +0100 Subject: [PATCH 45/75] Add PendingPaymentStore to track pending payments and replaced txids Track pending payments with their replaced/conflicting transaction IDs in a separate store. Pending payments are created here on WalletEvent::TxUnconfirmed, then removed once they reach ANTI_REORG_DELAY confirmations. This avoids scanning the entire payment store and enables efficient cleanup. --- src/io/mod.rs | 4 ++ src/io/utils.rs | 78 +++++++++++++++++++++++ src/payment/mod.rs | 2 + src/payment/pending_payment_store.rs | 93 ++++++++++++++++++++++++++++ src/types.rs | 4 +- 5 files changed, 180 insertions(+), 1 deletion(-) create mode 100644 src/payment/pending_payment_store.rs diff --git a/src/io/mod.rs b/src/io/mod.rs index 7afd5bd40..e080d39f7 100644 --- a/src/io/mod.rs +++ b/src/io/mod.rs @@ -78,3 +78,7 @@ pub(crate) const BDK_WALLET_INDEXER_KEY: &str = "indexer"; /// /// [`StaticInvoice`]: lightning::offers::static_invoice::StaticInvoice pub(crate) const STATIC_INVOICE_STORE_PRIMARY_NAMESPACE: &str = "static_invoices"; + +/// The pending payment information will be persisted under this prefix. +pub(crate) const PENDING_PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE: &str = "pending_payments"; +pub(crate) const PENDING_PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; diff --git a/src/io/utils.rs b/src/io/utils.rs index 4ddc03b07..d2f70377b 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -46,6 +46,7 @@ use crate::io::{ NODE_METRICS_KEY, NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, }; use crate::logger::{log_error, LdkLogger, Logger}; +use crate::payment::PendingPaymentDetails; use crate::peer_store::PeerStore; use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper}; use crate::wallet::ser::{ChangeSetDeserWrapper, ChangeSetSerWrapper}; @@ -626,6 +627,83 @@ pub(crate) fn read_bdk_wallet_change_set( Ok(Some(change_set)) } +/// Read previously persisted pending payments information from the store. +pub(crate) async fn read_pending_payments( + kv_store: &DynStore, logger: L, +) -> Result, std::io::Error> +where + L::Target: LdkLogger, +{ + let mut res = Vec::new(); + + let mut stored_keys = KVStore::list( + &*kv_store, + PENDING_PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PENDING_PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + ) + .await?; + + const BATCH_SIZE: usize = 50; + + let mut set = tokio::task::JoinSet::new(); + + // Fill JoinSet with tasks if possible + while set.len() < BATCH_SIZE && !stored_keys.is_empty() { + if let Some(next_key) = stored_keys.pop() { + let fut = KVStore::read( + &*kv_store, + PENDING_PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PENDING_PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + &next_key, + ); + set.spawn(fut); + debug_assert!(set.len() <= BATCH_SIZE); + } + } + + while let Some(read_res) = set.join_next().await { + // Exit early if we get an IO error. + let reader = read_res + .map_err(|e| { + log_error!(logger, "Failed to read PendingPaymentDetails: {}", e); + set.abort_all(); + e + })? + .map_err(|e| { + log_error!(logger, "Failed to read PendingPaymentDetails: {}", e); + set.abort_all(); + e + })?; + + // Refill set for every finished future, if we still have something to do. + if let Some(next_key) = stored_keys.pop() { + let fut = KVStore::read( + &*kv_store, + PENDING_PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PENDING_PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + &next_key, + ); + set.spawn(fut); + debug_assert!(set.len() <= BATCH_SIZE); + } + + // Handle result. + let pending_payment = PendingPaymentDetails::read(&mut &*reader).map_err(|e| { + log_error!(logger, "Failed to deserialize PendingPaymentDetails: {}", e); + std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Failed to deserialize PendingPaymentDetails", + ) + })?; + res.push(pending_payment); + } + + debug_assert!(set.is_empty()); + debug_assert!(stored_keys.is_empty()); + + Ok(res) +} + #[cfg(test)] mod tests { use super::read_or_generate_seed_file; diff --git a/src/payment/mod.rs b/src/payment/mod.rs index c82f35c8f..42b5aff3b 100644 --- a/src/payment/mod.rs +++ b/src/payment/mod.rs @@ -11,6 +11,7 @@ pub(crate) mod asynchronous; mod bolt11; mod bolt12; mod onchain; +pub(crate) mod pending_payment_store; mod spontaneous; pub(crate) mod store; mod unified; @@ -18,6 +19,7 @@ mod unified; pub use bolt11::Bolt11Payment; pub use bolt12::Bolt12Payment; pub use onchain::OnchainPayment; +pub use pending_payment_store::PendingPaymentDetails; pub use spontaneous::SpontaneousPayment; pub use store::{ ConfirmationStatus, LSPFeeLimits, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, diff --git a/src/payment/pending_payment_store.rs b/src/payment/pending_payment_store.rs new file mode 100644 index 000000000..580bdcbcc --- /dev/null +++ b/src/payment/pending_payment_store.rs @@ -0,0 +1,93 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use bitcoin::Txid; +use lightning::{impl_writeable_tlv_based, ln::channelmanager::PaymentId}; + +use crate::{ + data_store::{StorableObject, StorableObjectUpdate}, + payment::{store::PaymentDetailsUpdate, PaymentDetails}, +}; + +/// Represents a pending payment +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct PendingPaymentDetails { + /// The full payment details + pub details: PaymentDetails, + /// Transaction IDs that have replaced or conflict with this payment. + pub conflicting_txids: Vec, +} + +impl PendingPaymentDetails { + pub(crate) fn new(details: PaymentDetails, conflicting_txids: Vec) -> Self { + Self { details, conflicting_txids } + } + + /// Convert to finalized payment for the main payment store + pub fn into_payment_details(self) -> PaymentDetails { + self.details + } +} + +impl_writeable_tlv_based!(PendingPaymentDetails, { + (0, details, required), + (2, conflicting_txids, optional_vec), +}); + +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct PendingPaymentDetailsUpdate { + pub id: PaymentId, + pub payment_update: Option, + pub conflicting_txids: Option>, +} + +impl StorableObject for PendingPaymentDetails { + type Id = PaymentId; + type Update = PendingPaymentDetailsUpdate; + + fn id(&self) -> Self::Id { + self.details.id + } + + fn update(&mut self, update: &Self::Update) -> bool { + let mut updated = false; + + // Update the underlying payment details if present + if let Some(payment_update) = &update.payment_update { + updated |= self.details.update(payment_update); + } + + if let Some(new_conflicting_txids) = &update.conflicting_txids { + if &self.conflicting_txids != new_conflicting_txids { + self.conflicting_txids = new_conflicting_txids.clone(); + updated = true; + } + } + + updated + } + + fn to_update(&self) -> Self::Update { + self.into() + } +} + +impl StorableObjectUpdate for PendingPaymentDetailsUpdate { + fn id(&self) -> ::Id { + self.id + } +} + +impl From<&PendingPaymentDetails> for PendingPaymentDetailsUpdate { + fn from(value: &PendingPaymentDetails) -> Self { + Self { + id: value.id(), + payment_update: Some(value.details.to_update()), + conflicting_txids: Some(value.conflicting_txids.clone()), + } + } +} diff --git a/src/types.rs b/src/types.rs index 614efd90e..b5b1ffed7 100644 --- a/src/types.rs +++ b/src/types.rs @@ -39,7 +39,7 @@ use crate::data_store::DataStore; use crate::fee_estimator::OnchainFeeEstimator; use crate::logger::Logger; use crate::message_handler::NodeCustomMessageHandler; -use crate::payment::PaymentDetails; +use crate::payment::{PaymentDetails, PendingPaymentDetails}; use crate::runtime::RuntimeSpawner; /// A supertrait that requires that a type implements both [`KVStore`] and [`KVStoreSync`] at the @@ -621,3 +621,5 @@ impl From<&(u64, Vec)> for CustomTlvRecord { CustomTlvRecord { type_num: tlv.0, value: tlv.1.clone() } } } + +pub(crate) type PendingPaymentStore = DataStore>; From 9f02ec030d2b80eff89b6424cd8b70eda78c6025 Mon Sep 17 00:00:00 2001 From: Camillarhi Date: Mon, 26 Jan 2026 14:16:23 +0100 Subject: [PATCH 46/75] Use BDK events in `update_payment_store` instead of scanning all transactions Replace the full transaction list scan in `update_payment_store` with handling of BDK's `WalletEvent` stream during sync. This leverages the new events in BDK 2.2, reduces redundant work, and prepares the foundation for reliable RBF/CPFP tracking via `WalletEvent::TxReplaced` --- src/builder.rs | 39 +++++-- src/wallet/mod.rs | 253 +++++++++++++++++++++++++++++++++++++--------- 2 files changed, 237 insertions(+), 55 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 0e8e8c166..bedaba4b5 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -55,12 +55,14 @@ use crate::gossip::GossipSource; use crate::io::sqlite_store::SqliteStore; use crate::io::utils::{ read_event_queue, read_external_pathfinding_scores_from_cache, read_network_graph, - read_node_metrics, read_output_sweeper, read_payments, read_peer_info, read_scorer, - write_node_metrics, + read_node_metrics, read_output_sweeper, read_payments, read_peer_info, read_pending_payments, + read_scorer, write_node_metrics, }; use crate::io::vss_store::VssStoreBuilder; use crate::io::{ self, PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + PENDING_PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PENDING_PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, }; use crate::liquidity::{ LSPS1ClientConfig, LSPS2ClientConfig, LSPS2ServiceConfig, LiquiditySourceBuilder, @@ -73,8 +75,8 @@ use crate::runtime::{Runtime, RuntimeSpawner}; use crate::tx_broadcaster::TransactionBroadcaster; use crate::types::{ AsyncPersister, ChainMonitor, ChannelManager, DynStore, DynStoreWrapper, GossipSync, Graph, - KeysManager, MessageRouter, OnionMessenger, PaymentStore, PeerManager, Persister, - SyncAndAsyncKVStore, + KeysManager, MessageRouter, OnionMessenger, PaymentStore, PeerManager, PendingPaymentStore, + Persister, SyncAndAsyncKVStore, }; use crate::wallet::persist::KVStoreWalletPersister; use crate::wallet::Wallet; @@ -1057,12 +1059,14 @@ fn build_with_store_internal( let kv_store_ref = Arc::clone(&kv_store); let logger_ref = Arc::clone(&logger); - let (payment_store_res, node_metris_res) = runtime.block_on(async move { - tokio::join!( - read_payments(&*kv_store_ref, Arc::clone(&logger_ref)), - read_node_metrics(&*kv_store_ref, Arc::clone(&logger_ref)), - ) - }); + let (payment_store_res, node_metris_res, pending_payment_store_res) = + runtime.block_on(async move { + tokio::join!( + read_payments(&*kv_store_ref, Arc::clone(&logger_ref)), + read_node_metrics(&*kv_store_ref, Arc::clone(&logger_ref)), + read_pending_payments(&*kv_store_ref, Arc::clone(&logger_ref)) + ) + }); // Initialize the status fields. let node_metrics = match node_metris_res { @@ -1243,6 +1247,20 @@ fn build_with_store_internal( }, }; + let pending_payment_store = match pending_payment_store_res { + Ok(pending_payments) => Arc::new(PendingPaymentStore::new( + pending_payments, + PENDING_PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE.to_string(), + PENDING_PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE.to_string(), + Arc::clone(&kv_store), + Arc::clone(&logger), + )), + Err(e) => { + log_error!(logger, "Failed to read pending payment data from store: {}", e); + return Err(BuildError::ReadFailed); + }, + }; + let wallet = Arc::new(Wallet::new( bdk_wallet, wallet_persister, @@ -1251,6 +1269,7 @@ fn build_with_store_internal( Arc::clone(&payment_store), Arc::clone(&config), Arc::clone(&logger), + Arc::clone(&pending_payment_store), )); // Initialize the KeysManager diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index 0540c5a0a..05c743bd9 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -12,6 +12,7 @@ use std::sync::{Arc, Mutex}; use bdk_chain::spk_client::{FullScanRequest, SyncRequest}; use bdk_wallet::descriptor::ExtendedDescriptor; +use bdk_wallet::event::WalletEvent; #[allow(deprecated)] use bdk_wallet::SignOptions; use bdk_wallet::{Balance, KeychainKind, PersistedWallet, Update}; @@ -49,8 +50,10 @@ use crate::config::Config; use crate::fee_estimator::{ConfirmationTarget, FeeEstimator, OnchainFeeEstimator}; use crate::logger::{log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; use crate::payment::store::ConfirmationStatus; -use crate::payment::{PaymentDetails, PaymentDirection, PaymentStatus}; -use crate::types::{Broadcaster, PaymentStore}; +use crate::payment::{ + PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, PendingPaymentDetails, +}; +use crate::types::{Broadcaster, PaymentStore, PendingPaymentStore}; use crate::Error; pub(crate) enum OnchainSendAmount { @@ -71,6 +74,7 @@ pub(crate) struct Wallet { payment_store: Arc, config: Arc, logger: Arc, + pending_payment_store: Arc, } impl Wallet { @@ -78,11 +82,20 @@ impl Wallet { wallet: bdk_wallet::PersistedWallet, wallet_persister: KVStoreWalletPersister, broadcaster: Arc, fee_estimator: Arc, payment_store: Arc, - config: Arc, logger: Arc, + config: Arc, logger: Arc, pending_payment_store: Arc, ) -> Self { let inner = Mutex::new(wallet); let persister = Mutex::new(wallet_persister); - Self { inner, persister, broadcaster, fee_estimator, payment_store, config, logger } + Self { + inner, + persister, + broadcaster, + fee_estimator, + payment_store, + config, + logger, + pending_payment_store, + } } pub(crate) fn get_full_scan_request(&self) -> FullScanRequest { @@ -114,15 +127,15 @@ impl Wallet { pub(crate) fn apply_update(&self, update: impl Into) -> Result<(), Error> { let mut locked_wallet = self.inner.lock().unwrap(); - match locked_wallet.apply_update(update) { - Ok(()) => { + match locked_wallet.apply_update_events(update) { + Ok(events) => { let mut locked_persister = self.persister.lock().unwrap(); locked_wallet.persist(&mut locked_persister).map_err(|e| { log_error!(self.logger, "Failed to persist wallet: {}", e); Error::PersistenceFailed })?; - self.update_payment_store(&mut *locked_wallet).map_err(|e| { + self.update_payment_store(&mut *locked_wallet, events).map_err(|e| { log_error!(self.logger, "Failed to update payment store: {}", e); Error::PersistenceFailed })?; @@ -167,42 +180,159 @@ impl Wallet { fn update_payment_store<'a>( &self, locked_wallet: &'a mut PersistedWallet, + mut events: Vec, ) -> Result<(), Error> { - for wtx in locked_wallet.transactions() { - let id = PaymentId(wtx.tx_node.txid.to_byte_array()); - let txid = wtx.tx_node.txid; - let (payment_status, confirmation_status) = match wtx.chain_position { - bdk_chain::ChainPosition::Confirmed { anchor, .. } => { - let confirmation_height = anchor.block_id.height; + if events.is_empty() { + return Ok(()); + } + + // Sort events to ensure proper sequencing for data consistency: + // 1. TXReplaced (0) before TxUnconfirmed (1) - Critical for RBF handling + // When a transaction is replaced via RBF, both events fire. Processing + // TXReplaced first stores the replaced transaction, allowing TxUnconfirmed + // to detect and skip duplicate payment record creation. + // 2. TxConfirmed (2) before ChainTipChanged (3) - Ensures height accuracy + // ChainTipChanged updates block height. Processing TxConfirmed first ensures + // it references the correct height for confirmation depth calculations. + // 3. Other events follow in deterministic order for predictable processing + if events.len() > 1 { + events.sort_by_key(|e| match e { + WalletEvent::TxReplaced { .. } => 0, + WalletEvent::TxUnconfirmed { .. } => 1, + WalletEvent::TxConfirmed { .. } => 2, + WalletEvent::ChainTipChanged { .. } => 3, + WalletEvent::TxDropped { .. } => 4, + _ => 5, + }); + } + + for event in events { + match event { + WalletEvent::TxConfirmed { txid, tx, block_time, .. } => { let cur_height = locked_wallet.latest_checkpoint().height(); + let confirmation_height = block_time.block_id.height; let payment_status = if cur_height >= confirmation_height + ANTI_REORG_DELAY - 1 { PaymentStatus::Succeeded } else { PaymentStatus::Pending }; + let confirmation_status = ConfirmationStatus::Confirmed { - block_hash: anchor.block_id.hash, + block_hash: block_time.block_id.hash, height: confirmation_height, - timestamp: anchor.confirmation_time, + timestamp: block_time.confirmation_time, }; - (payment_status, confirmation_status) + + let payment_id = self + .find_payment_by_txid(txid) + .unwrap_or_else(|| PaymentId(txid.to_byte_array())); + + let payment = self.create_payment_from_tx( + locked_wallet, + txid, + payment_id, + &tx, + payment_status, + confirmation_status, + ); + + let pending_payment = + self.create_pending_payment_from_tx(payment.clone(), Vec::new()); + + self.payment_store.insert_or_update(payment)?; + self.pending_payment_store.insert_or_update(pending_payment)?; }, - bdk_chain::ChainPosition::Unconfirmed { .. } => { - (PaymentStatus::Pending, ConfirmationStatus::Unconfirmed) + WalletEvent::ChainTipChanged { new_tip, .. } => { + // Get all payments that are Pending with Confirmed status + let pending_payments: Vec = + self.pending_payment_store.list_filter(|p| { + p.details.status == PaymentStatus::Pending + && matches!( + p.details.kind, + PaymentKind::Onchain { + status: ConfirmationStatus::Confirmed { .. }, + .. + } + ) + }); + + for mut payment in pending_payments { + if let PaymentKind::Onchain { + status: ConfirmationStatus::Confirmed { height, .. }, + .. + } = payment.details.kind + { + let payment_id = payment.details.id; + if new_tip.height >= height + ANTI_REORG_DELAY - 1 { + payment.details.status = PaymentStatus::Succeeded; + self.payment_store.insert_or_update(payment.details)?; + self.pending_payment_store.remove(&payment_id)?; + } + } + } }, - }; - - let payment = self.create_payment_from_tx( - locked_wallet, - txid, - id, - &wtx.tx_node.tx, - payment_status, - confirmation_status, - ); + WalletEvent::TxUnconfirmed { txid, tx, old_block_time: None } => { + let payment_id = self + .find_payment_by_txid(txid) + .unwrap_or_else(|| PaymentId(txid.to_byte_array())); + + let payment = self.create_payment_from_tx( + locked_wallet, + txid, + payment_id, + &tx, + PaymentStatus::Pending, + ConfirmationStatus::Unconfirmed, + ); + let pending_payment = + self.create_pending_payment_from_tx(payment.clone(), Vec::new()); + self.payment_store.insert_or_update(payment)?; + self.pending_payment_store.insert_or_update(pending_payment)?; + }, + WalletEvent::TxReplaced { txid, conflicts, tx, .. } => { + let payment_id = self + .find_payment_by_txid(txid) + .unwrap_or_else(|| PaymentId(txid.to_byte_array())); + + // Collect all conflict txids + let conflict_txids: Vec = + conflicts.iter().map(|(_, conflict_txid)| *conflict_txid).collect(); + + let payment = self.create_payment_from_tx( + locked_wallet, + txid, + payment_id, + &tx, + PaymentStatus::Pending, + ConfirmationStatus::Unconfirmed, + ); + let pending_payment_details = self + .create_pending_payment_from_tx(payment.clone(), conflict_txids.clone()); - self.payment_store.insert_or_update(payment)?; + self.pending_payment_store.insert_or_update(pending_payment_details)?; + }, + WalletEvent::TxDropped { txid, tx } => { + let payment_id = self + .find_payment_by_txid(txid) + .unwrap_or_else(|| PaymentId(txid.to_byte_array())); + let payment = self.create_payment_from_tx( + locked_wallet, + txid, + payment_id, + &tx, + PaymentStatus::Pending, + ConfirmationStatus::Unconfirmed, + ); + let pending_payment = + self.create_pending_payment_from_tx(payment.clone(), Vec::new()); + self.payment_store.insert_or_update(payment)?; + self.pending_payment_store.insert_or_update(pending_payment)?; + }, + _ => { + continue; + }, + }; } Ok(()) @@ -793,27 +923,60 @@ impl Wallet { // here to determine the `PaymentKind`, but that's not really satisfactory, so // we're punting on it until we can come up with a better solution. - let kind = crate::payment::PaymentKind::Onchain { txid, status: confirmation_status }; + let kind = PaymentKind::Onchain { txid, status: confirmation_status }; let fee = locked_wallet.calculate_fee(tx).unwrap_or(Amount::ZERO); let (sent, received) = locked_wallet.sent_and_received(tx); + let fee_sat = fee.to_sat(); + let (direction, amount_msat) = if sent > received { - let direction = PaymentDirection::Outbound; - let amount_msat = Some( - sent.to_sat().saturating_sub(fee.to_sat()).saturating_sub(received.to_sat()) * 1000, - ); - (direction, amount_msat) + ( + PaymentDirection::Outbound, + Some( + (sent.to_sat().saturating_sub(fee_sat).saturating_sub(received.to_sat())) + * 1000, + ), + ) } else { - let direction = PaymentDirection::Inbound; - let amount_msat = Some( - received.to_sat().saturating_sub(sent.to_sat().saturating_sub(fee.to_sat())) * 1000, - ); - (direction, amount_msat) + ( + PaymentDirection::Inbound, + Some( + received.to_sat().saturating_sub(sent.to_sat().saturating_sub(fee_sat)) * 1000, + ), + ) }; - let fee_paid_msat = Some(fee.to_sat() * 1000); + PaymentDetails::new( + payment_id, + kind, + amount_msat, + Some(fee_sat * 1000), + direction, + payment_status, + ) + } + + fn create_pending_payment_from_tx( + &self, payment: PaymentDetails, conflicting_txids: Vec, + ) -> PendingPaymentDetails { + PendingPaymentDetails::new(payment, conflicting_txids) + } + + fn find_payment_by_txid(&self, target_txid: Txid) -> Option { + let direct_payment_id = PaymentId(target_txid.to_byte_array()); + if self.pending_payment_store.contains_key(&direct_payment_id) { + return Some(direct_payment_id); + } + + if let Some(replaced_details) = self + .pending_payment_store + .list_filter(|p| p.conflicting_txids.contains(&target_txid)) + .first() + { + return Some(replaced_details.details.id); + } - PaymentDetails::new(payment_id, kind, amount_msat, fee_paid_msat, direction, payment_status) + None } } @@ -843,9 +1006,9 @@ impl Listen for Wallet { ); } - match locked_wallet.apply_block(block, height) { - Ok(()) => { - if let Err(e) = self.update_payment_store(&mut *locked_wallet) { + match locked_wallet.apply_block_events(block, height) { + Ok(events) => { + if let Err(e) = self.update_payment_store(&mut *locked_wallet, events) { log_error!(self.logger, "Failed to update payment store: {}", e); return; } From c99e9648fbf1db9bcad3d7925ce4c0653bea3e9d Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 27 Jan 2026 12:52:36 +0100 Subject: [PATCH 47/75] Add basic `CLAUDE.md` file --- CLAUDE.md | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 CLAUDE.md diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 000000000..75dcec108 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,11 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Development Rules + +- Always ensure tests pass before committing. +- Run `cargo fmt --all` after every code change +- Never add new dependencies unless explicitly requested +- Please always disclose the use of any AI tools in commit messages and PR descriptions +- When adding new `.rs` files, please ensure to always add the licensing header as found in all other files. From 3e9f834fe163794d6d012d9319ae29ed17efd210 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 28 Jan 2026 11:26:53 +0100 Subject: [PATCH 48/75] Add architecture overview section to `CLAUDE.md` Document the project architecture including core components, module organization, key design patterns, lifecycle management, and important type aliases. This provides context for AI assistants working with the codebase. Generated with Claude Code (AI-assisted) Co-Authored-By: HAL 9000 Signed-off-by: Elias Rohrer --- CLAUDE.md | 56 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/CLAUDE.md b/CLAUDE.md index 75dcec108..300342771 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -9,3 +9,59 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co - Never add new dependencies unless explicitly requested - Please always disclose the use of any AI tools in commit messages and PR descriptions - When adding new `.rs` files, please ensure to always add the licensing header as found in all other files. + +## Architecture Overview + +LDK-Node is a self-custodial Lightning Network node library built on top of **LDK** (Lightning Development Kit) for Lightning functionality and **BDK** (Bitcoin Development Kit) for on-chain wallet operations. It provides a simple, ready-to-go interface for building Lightning applications with language bindings for Swift, Kotlin, and Python via UniFFI. + +### Core Components + +| Component | Location | Responsibility | +|-----------|----------|----------------| +| `Node` | `src/lib.rs` | Central abstraction containing all subsystems; entry point for API | +| `Builder` | `src/builder.rs` | Fluent configuration interface for constructing `Node` instances | +| `Wallet` | `src/wallet/` | BDK-based on-chain wallet with SQLite persistence | +| `ChainSource` | `src/chain/` | Chain data abstraction (Esplora, Electrum, Bitcoin Core) | +| `EventHandler` | `src/event.rs` | Translates LDK events to user-facing `Node` events | +| `PaymentStore` | `src/payment/store.rs` | Persistent payment tracking with status and metadata | + +### Module Organization + +| Module | Purpose | +|--------|---------| +| `payment/` | Payment processing (BOLT11, BOLT12, on-chain, spontaneous, unified) | +| `wallet/` | On-chain wallet abstraction, serialization, persistence | +| `chain/` | Chain data sources, wallet syncing, transaction broadcasting | +| `io/` | Persistence layer (`SQLiteStore`, `VssStore`, KV-store utilities) | +| `liquidity.rs` | Liquidity provider integration (LSPS1/LSPS2) | +| `connection.rs` | Peer connection management and reconnection logic | +| `gossip.rs` | Gossip data source management (RGS, P2P) | +| `graph.rs` | Network graph querying and channel/node information | +| `types.rs` | Type aliases for LDK components (`ChannelManager`, `PeerManager`, etc.) | +| `ffi/` | UniFFI bindings for cross-language support | + +### Key Design Patterns + +- **Arc-based Shared Ownership**: Extensive use of `Arc` for thread-safe shared components enabling background task spawning +- **Event-Driven Architecture**: Events flow from LDK → `EventHandler` → `EventQueue` → User application +- **Trait-based Abstraction**: `KVStore`/`KVStoreSync` for storage, `ChainSource` for chain backends, `StorableObject` for persistence +- **Builder Pattern**: Fluent configuration with sensible defaults and validation during build phase +- **Background Tasks**: Multiple categories (wallet sync, gossip updates, peer reconnection, fee updates, event processing) + +### Lifecycle + +**Startup (`node.start()`)**: Acquires lock → starts chain source → updates fee rates → spawns background sync tasks → sets up gossip/listeners/peer reconnection → starts event processor → marks running + +**Shutdown (`node.stop()`)**: Acquires lock → signals stop → aborts cancellable tasks → waits for background tasks → disconnects peers → persists final state + +### Type Aliases (from `types.rs`) + +Key LDK type aliases used throughout the codebase: +- `ChannelManager` - LDK channel management +- `ChainMonitor` - LDK chain monitoring +- `PeerManager` - LDK peer connections +- `OnionMessenger` - LDK onion messaging +- `Router` - LDK pathfinding (`DefaultRouter`) +- `Scorer` - Combined probabilistic + external scoring +- `Graph` - `NetworkGraph` +- `Sweeper` - `OutputSweeper` From 760e5e7f6a8f329109daff52e8f6427882927b97 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 29 Jan 2026 11:09:40 +0100 Subject: [PATCH 49/75] Make sync timeouts configurable Previously, the timeouts applied to chain syncing weren't configurable to users. While historically there were good reasons for this (mostly to avoid leaving the Node in a blocked state for extended periods during chain syncing), by now we should be good to let the users configure timeouts ~freely, if they deem it fit. Here we allow for exactly that. Signed-off-by: Elias Rohrer --- bindings/ldk_node.udl | 10 ++++ src/chain/bitcoind.rs | 7 +-- src/chain/electrum.rs | 60 ++++++++++++++++-------- src/chain/esplora.rs | 27 ++++++----- src/config.rs | 81 ++++++++++++++++++++++++++------- src/ffi/types.rs | 2 +- tests/common/mod.rs | 6 ++- tests/integration_tests_rust.rs | 15 ++++-- 8 files changed, 149 insertions(+), 59 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index c881dbe09..6bd031379 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -26,12 +26,22 @@ dictionary BackgroundSyncConfig { u64 fee_rate_cache_update_interval_secs; }; +dictionary SyncTimeoutsConfig { + u64 onchain_wallet_sync_timeout_secs; + u64 lightning_wallet_sync_timeout_secs; + u64 fee_rate_cache_update_timeout_secs; + u64 tx_broadcast_timeout_secs; + u8 per_request_timeout_secs; +}; + dictionary EsploraSyncConfig { BackgroundSyncConfig? background_sync_config; + SyncTimeoutsConfig timeouts_config; }; dictionary ElectrumSyncConfig { BackgroundSyncConfig? background_sync_config; + SyncTimeoutsConfig timeouts_config; }; dictionary LSPS2ServiceConfig { diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index d9f43ee17..8a7167022 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -31,7 +31,8 @@ use serde::Serialize; use super::WalletSyncStatus; use crate::config::{ - BitcoindRestClientConfig, Config, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, + BitcoindRestClientConfig, Config, DEFAULT_FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, + DEFAULT_TX_BROADCAST_TIMEOUT_SECS, }; use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, @@ -466,7 +467,7 @@ impl BitcoindChainSource { macro_rules! get_fee_rate_update { ($estimation_fut:expr) => {{ let update_res = tokio::time::timeout( - Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), + Duration::from_secs(DEFAULT_FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), $estimation_fut, ) .await @@ -584,7 +585,7 @@ impl BitcoindChainSource { for tx in &package { let txid = tx.compute_txid(); let timeout_fut = tokio::time::timeout( - Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), + Duration::from_secs(DEFAULT_TX_BROADCAST_TIMEOUT_SECS), self.api_client.broadcast_transaction(tx), ); match timeout_fut.await { diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index 21e66f3a6..7b08c3845 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -24,10 +24,7 @@ use lightning::util::ser::Writeable; use lightning_transaction_sync::ElectrumSyncClient; use super::WalletSyncStatus; -use crate::config::{ - Config, ElectrumSyncConfig, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, - FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, -}; +use crate::config::{Config, ElectrumSyncConfig, BDK_CLIENT_STOP_GAP}; use crate::error::Error; use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, @@ -41,7 +38,6 @@ use crate::NodeMetrics; const BDK_ELECTRUM_CLIENT_BATCH_SIZE: usize = 5; const ELECTRUM_CLIENT_NUM_RETRIES: u8 = 3; -const ELECTRUM_CLIENT_TIMEOUT_SECS: u8 = 10; pub(super) struct ElectrumChainSource { server_url: String, @@ -82,6 +78,7 @@ impl ElectrumChainSource { pub(super) fn start(&self, runtime: Arc) -> Result<(), Error> { self.electrum_runtime_status.write().unwrap().start( self.server_url.clone(), + self.sync_config.clone(), Arc::clone(&runtime), Arc::clone(&self.config), Arc::clone(&self.logger), @@ -318,13 +315,14 @@ impl ElectrumRuntimeStatus { } pub(super) fn start( - &mut self, server_url: String, runtime: Arc, config: Arc, - logger: Arc, + &mut self, server_url: String, sync_config: ElectrumSyncConfig, runtime: Arc, + config: Arc, logger: Arc, ) -> Result<(), Error> { match self { Self::Stopped { pending_registered_txs, pending_registered_outputs } => { let client = Arc::new(ElectrumRuntimeClient::new( - server_url.clone(), + server_url, + sync_config, runtime, config, logger, @@ -380,6 +378,7 @@ impl ElectrumRuntimeStatus { struct ElectrumRuntimeClient { electrum_client: Arc, + sync_config: ElectrumSyncConfig, bdk_electrum_client: Arc>>, tx_sync: Arc>>, runtime: Arc, @@ -389,11 +388,12 @@ struct ElectrumRuntimeClient { impl ElectrumRuntimeClient { fn new( - server_url: String, runtime: Arc, config: Arc, logger: Arc, + server_url: String, sync_config: ElectrumSyncConfig, runtime: Arc, + config: Arc, logger: Arc, ) -> Result { let electrum_config = ElectrumConfigBuilder::new() .retry(ELECTRUM_CLIENT_NUM_RETRIES) - .timeout(Some(ELECTRUM_CLIENT_TIMEOUT_SECS)) + .timeout(Some(sync_config.timeouts_config.per_request_timeout_secs)) .build(); let electrum_client = Arc::new( @@ -409,7 +409,15 @@ impl ElectrumRuntimeClient { Error::ConnectionFailed })?, ); - Ok(Self { electrum_client, bdk_electrum_client, tx_sync, runtime, config, logger }) + Ok(Self { + electrum_client, + sync_config, + bdk_electrum_client, + tx_sync, + runtime, + config, + logger, + }) } async fn sync_confirmables( @@ -419,8 +427,12 @@ impl ElectrumRuntimeClient { let tx_sync = Arc::clone(&self.tx_sync); let spawn_fut = self.runtime.spawn_blocking(move || tx_sync.sync(confirmables)); - let timeout_fut = - tokio::time::timeout(Duration::from_secs(LDK_WALLET_SYNC_TIMEOUT_SECS), spawn_fut); + let timeout_fut = tokio::time::timeout( + Duration::from_secs( + self.sync_config.timeouts_config.lightning_wallet_sync_timeout_secs, + ), + spawn_fut, + ); let res = timeout_fut .await @@ -461,8 +473,10 @@ impl ElectrumRuntimeClient { true, ) }); - let wallet_sync_timeout_fut = - tokio::time::timeout(Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), spawn_fut); + let wallet_sync_timeout_fut = tokio::time::timeout( + Duration::from_secs(self.sync_config.timeouts_config.onchain_wallet_sync_timeout_secs), + spawn_fut, + ); wallet_sync_timeout_fut .await @@ -490,8 +504,10 @@ impl ElectrumRuntimeClient { let spawn_fut = self.runtime.spawn_blocking(move || { bdk_electrum_client.sync(request, BDK_ELECTRUM_CLIENT_BATCH_SIZE, true) }); - let wallet_sync_timeout_fut = - tokio::time::timeout(Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), spawn_fut); + let wallet_sync_timeout_fut = tokio::time::timeout( + Duration::from_secs(self.sync_config.timeouts_config.onchain_wallet_sync_timeout_secs), + spawn_fut, + ); wallet_sync_timeout_fut .await @@ -517,8 +533,10 @@ impl ElectrumRuntimeClient { let spawn_fut = self.runtime.spawn_blocking(move || electrum_client.transaction_broadcast(&tx)); - let timeout_fut = - tokio::time::timeout(Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), spawn_fut); + let timeout_fut = tokio::time::timeout( + Duration::from_secs(self.sync_config.timeouts_config.tx_broadcast_timeout_secs), + spawn_fut, + ); match timeout_fut.await { Ok(res) => match res { @@ -565,7 +583,9 @@ impl ElectrumRuntimeClient { let spawn_fut = self.runtime.spawn_blocking(move || electrum_client.batch_call(&batch)); let timeout_fut = tokio::time::timeout( - Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), + Duration::from_secs( + self.sync_config.timeouts_config.fee_rate_cache_update_timeout_secs, + ), spawn_fut, ); diff --git a/src/chain/esplora.rs b/src/chain/esplora.rs index 8ab941888..245db72f6 100644 --- a/src/chain/esplora.rs +++ b/src/chain/esplora.rs @@ -17,11 +17,7 @@ use lightning::util::ser::Writeable; use lightning_transaction_sync::EsploraSyncClient; use super::WalletSyncStatus; -use crate::config::{ - Config, EsploraSyncConfig, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, - BDK_WALLET_SYNC_TIMEOUT_SECS, DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS, - FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, -}; +use crate::config::{Config, EsploraSyncConfig, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP}; use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, OnchainFeeEstimator, @@ -51,7 +47,8 @@ impl EsploraChainSource { logger: Arc, node_metrics: Arc>, ) -> Self { let mut client_builder = esplora_client::Builder::new(&server_url); - client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); + client_builder = + client_builder.timeout(sync_config.timeouts_config.per_request_timeout_secs as u64); for (header_name, header_value) in &headers { client_builder = client_builder.header(header_name, header_value); @@ -183,14 +180,18 @@ impl EsploraChainSource { if incremental_sync { let sync_request = onchain_wallet.get_incremental_sync_request(); let wallet_sync_timeout_fut = tokio::time::timeout( - Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), + Duration::from_secs( + self.sync_config.timeouts_config.onchain_wallet_sync_timeout_secs, + ), self.esplora_client.sync(sync_request, BDK_CLIENT_CONCURRENCY), ); get_and_apply_wallet_update!(wallet_sync_timeout_fut) } else { let full_scan_request = onchain_wallet.get_full_scan_request(); let wallet_sync_timeout_fut = tokio::time::timeout( - Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), + Duration::from_secs( + self.sync_config.timeouts_config.onchain_wallet_sync_timeout_secs, + ), self.esplora_client.full_scan( full_scan_request, BDK_CLIENT_STOP_GAP, @@ -240,7 +241,9 @@ impl EsploraChainSource { ]; let timeout_fut = tokio::time::timeout( - Duration::from_secs(LDK_WALLET_SYNC_TIMEOUT_SECS), + Duration::from_secs( + self.sync_config.timeouts_config.lightning_wallet_sync_timeout_secs as u64, + ), self.tx_sync.sync(confirmables), ); let now = Instant::now(); @@ -278,7 +281,9 @@ impl EsploraChainSource { pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { let now = Instant::now(); let estimates = tokio::time::timeout( - Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), + Duration::from_secs( + self.sync_config.timeouts_config.fee_rate_cache_update_timeout_secs, + ), self.esplora_client.get_fee_estimates(), ) .await @@ -351,7 +356,7 @@ impl EsploraChainSource { for tx in &package { let txid = tx.compute_txid(); let timeout_fut = tokio::time::timeout( - Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), + Duration::from_secs(self.sync_config.timeouts_config.tx_broadcast_timeout_secs), self.esplora_client.broadcast(tx), ); match timeout_fut.await { diff --git a/src/config.rs b/src/config.rs index 6c9d1640a..47493bca0 100644 --- a/src/config.rs +++ b/src/config.rs @@ -29,6 +29,21 @@ const DEFAULT_FEE_RATE_CACHE_UPDATE_INTERVAL_SECS: u64 = 60 * 10; const DEFAULT_PROBING_LIQUIDITY_LIMIT_MULTIPLIER: u64 = 3; const DEFAULT_ANCHOR_PER_CHANNEL_RESERVE_SATS: u64 = 25_000; +// The default timeout after which we abort a wallet syncing operation. +const DEFAULT_BDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 20; + +// The default timeout after which we abort a wallet syncing operation. +const DEFAULT_LDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 10; + +// The default timeout after which we abort a fee rate cache update operation. +pub(crate) const DEFAULT_FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS: u64 = 5; + +// The default timeout after which we abort a transaction broadcast operation. +pub(crate) const DEFAULT_TX_BROADCAST_TIMEOUT_SECS: u64 = 5; + +// The default {Esplora,Electrum} client timeout we're using. +const DEFAULT_PER_REQUEST_TIMEOUT_SECS: u8 = 10; + /// The default log level. pub const DEFAULT_LOG_LEVEL: LogLevel = LogLevel::Debug; @@ -41,9 +56,6 @@ pub const DEFAULT_STORAGE_DIR_PATH: &str = "/tmp/ldk_node"; // The default Esplora server we're using. pub(crate) const DEFAULT_ESPLORA_SERVER_URL: &str = "https://blockstream.info/api"; -// The default Esplora client timeout we're using. -pub(crate) const DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS: u64 = 10; - // The 'stop gap' parameter used by BDK's wallet sync. This seems to configure the threshold // number of derivation indexes after which BDK stops looking for new scripts belonging to the wallet. pub(crate) const BDK_CLIENT_STOP_GAP: usize = 20; @@ -69,24 +81,12 @@ pub(crate) const NODE_ANN_BCAST_INTERVAL: Duration = Duration::from_secs(60 * 60 // The lower limit which we apply to any configured wallet sync intervals. pub(crate) const WALLET_SYNC_INTERVAL_MINIMUM_SECS: u64 = 10; -// The timeout after which we abort a wallet syncing operation. -pub(crate) const BDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 20; - -// The timeout after which we abort a wallet syncing operation. -pub(crate) const LDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 10; - // The timeout after which we give up waiting on LDK's event handler to exit on shutdown. pub(crate) const LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS: u64 = 30; // The timeout after which we give up waiting on a background task to exit on shutdown. pub(crate) const BACKGROUND_TASK_SHUTDOWN_TIMEOUT_SECS: u64 = 5; -// The timeout after which we abort a fee rate cache update operation. -pub(crate) const FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS: u64 = 5; - -// The timeout after which we abort a transaction broadcast operation. -pub(crate) const TX_BROADCAST_TIMEOUT_SECS: u64 = 5; - // The maximum encoded size of an RGS snapshot we'll accept. // In practice the maximum we see is around 4MiB. pub(crate) const RGS_SNAPSHOT_MAX_SIZE: usize = 15 * 1024 * 1024; @@ -381,6 +381,43 @@ impl Default for BackgroundSyncConfig { } } +/// Timeout-related parameters for syncing the Lightning and on-chain wallets. +/// +/// ### Defaults +/// +/// | Parameter | Value | +/// |----------------------------------------|--------------------| +/// | `onchain_wallet_sync_timeout_secs` | 20 | +/// | `lightning_wallet_sync_timeout_secs` | 10 | +/// | `fee_rate_cache_update_timeout_secs` | 5 | +/// | `tx_broadcast_timeout_secs` | 5 | +/// | `per_request_timeout_secs` | 10 | +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct SyncTimeoutsConfig { + /// The timeout after which we abort syncing the onchain wallet. + pub onchain_wallet_sync_timeout_secs: u64, + /// The timeout after which we abort syncing the LDK wallet. + pub lightning_wallet_sync_timeout_secs: u64, + /// The timeout after which we abort updating the fee rate cache. + pub fee_rate_cache_update_timeout_secs: u64, + /// The timeout after which we abort broadcasting a transaction. + pub tx_broadcast_timeout_secs: u64, + /// The per-request timeout after which we abort a single Electrum or Esplora API request. + pub per_request_timeout_secs: u8, +} + +impl Default for SyncTimeoutsConfig { + fn default() -> Self { + Self { + onchain_wallet_sync_timeout_secs: DEFAULT_BDK_WALLET_SYNC_TIMEOUT_SECS, + lightning_wallet_sync_timeout_secs: DEFAULT_LDK_WALLET_SYNC_TIMEOUT_SECS, + fee_rate_cache_update_timeout_secs: DEFAULT_FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, + tx_broadcast_timeout_secs: DEFAULT_TX_BROADCAST_TIMEOUT_SECS, + per_request_timeout_secs: DEFAULT_PER_REQUEST_TIMEOUT_SECS, + } + } +} + /// Configuration for syncing with an Esplora backend. /// /// Background syncing is enabled by default, using the default values specified in @@ -394,11 +431,16 @@ pub struct EsploraSyncConfig { /// /// [`Node::sync_wallets`]: crate::Node::sync_wallets pub background_sync_config: Option, + /// Sync timeouts configuration. + pub timeouts_config: SyncTimeoutsConfig, } impl Default for EsploraSyncConfig { fn default() -> Self { - Self { background_sync_config: Some(BackgroundSyncConfig::default()) } + Self { + background_sync_config: Some(BackgroundSyncConfig::default()), + timeouts_config: SyncTimeoutsConfig::default(), + } } } @@ -415,11 +457,16 @@ pub struct ElectrumSyncConfig { /// /// [`Node::sync_wallets`]: crate::Node::sync_wallets pub background_sync_config: Option, + /// Sync timeouts configuration. + pub timeouts_config: SyncTimeoutsConfig, } impl Default for ElectrumSyncConfig { fn default() -> Self { - Self { background_sync_config: Some(BackgroundSyncConfig::default()) } + Self { + background_sync_config: Some(BackgroundSyncConfig::default()), + timeouts_config: SyncTimeoutsConfig::default(), + } } } diff --git a/src/ffi/types.rs b/src/ffi/types.rs index f63a715e1..22dc47c7b 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -46,7 +46,7 @@ pub use vss_client::headers::{VssHeaderProvider, VssHeaderProviderError}; use crate::builder::sanitize_alias; pub use crate::config::{ default_config, AnchorChannelsConfig, BackgroundSyncConfig, ElectrumSyncConfig, - EsploraSyncConfig, MaxDustHTLCExposure, + EsploraSyncConfig, MaxDustHTLCExposure, SyncTimeoutsConfig, }; pub use crate::entropy::{generate_entropy_mnemonic, EntropyError, NodeEntropy, WordCount}; use crate::error::Error; diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 96f58297c..5f6657260 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -370,12 +370,14 @@ pub(crate) fn setup_node_for_async_payments( match chain_source { TestChainSource::Esplora(electrsd) => { let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); - let sync_config = EsploraSyncConfig { background_sync_config: None }; + let mut sync_config = EsploraSyncConfig::default(); + sync_config.background_sync_config = None; builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); }, TestChainSource::Electrum(electrsd) => { let electrum_url = format!("tcp://{}", electrsd.electrum_url); - let sync_config = ElectrumSyncConfig { background_sync_config: None }; + let mut sync_config = ElectrumSyncConfig::default(); + sync_config.background_sync_config = None; builder.set_chain_source_electrum(electrum_url.clone(), Some(sync_config)); }, TestChainSource::BitcoindRpcSync(bitcoind) => { diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 4e94dd044..605dd0613 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -158,7 +158,8 @@ async fn multi_hop_sending() { let mut nodes = Vec::new(); for _ in 0..5 { let config = random_config(true); - let sync_config = EsploraSyncConfig { background_sync_config: None }; + let mut sync_config = EsploraSyncConfig::default(); + sync_config.background_sync_config = None; setup_builder!(builder, config.node_config); builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); let node = builder.build(config.node_entropy.into()).unwrap(); @@ -256,7 +257,8 @@ async fn start_stop_reinit() { let test_sync_store = TestSyncStore::new(config.node_config.storage_dir_path.clone().into()); - let sync_config = EsploraSyncConfig { background_sync_config: None }; + let mut sync_config = EsploraSyncConfig::default(); + sync_config.background_sync_config = None; setup_builder!(builder, config.node_config); builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); @@ -1709,7 +1711,8 @@ async fn do_lsps2_client_service_integration(client_trusts_lsp: bool) { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); - let sync_config = EsploraSyncConfig { background_sync_config: None }; + let mut sync_config = EsploraSyncConfig::default(); + sync_config.background_sync_config = None; // Setup three nodes: service, client, and payer let channel_opening_fee_ppm = 10_000; @@ -2026,7 +2029,8 @@ async fn lsps2_client_trusts_lsp() { let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); - let sync_config = EsploraSyncConfig { background_sync_config: None }; + let mut sync_config = EsploraSyncConfig::default(); + sync_config.background_sync_config = None; // Setup three nodes: service, client, and payer let channel_opening_fee_ppm = 10_000; @@ -2199,7 +2203,8 @@ async fn lsps2_lsp_trusts_client_but_client_does_not_claim() { let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); - let sync_config = EsploraSyncConfig { background_sync_config: None }; + let mut sync_config = EsploraSyncConfig::default(); + sync_config.background_sync_config = None; // Setup three nodes: service, client, and payer let channel_opening_fee_ppm = 10_000; From 40c4b099efffcce9dc8181f4d75f9106480c9893 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 29 Jan 2026 11:21:23 +0100 Subject: [PATCH 50/75] Bump default wallet syncing timeouts It seems that users often hit these timeouts when running in production, especially when run in sub-optimal network condidtions. Here we considerably bump the timeouts, in the hopes that users now normally shouldn't hit them ever. Signed-off-by: Elias Rohrer --- src/config.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/config.rs b/src/config.rs index 47493bca0..103b74657 100644 --- a/src/config.rs +++ b/src/config.rs @@ -30,16 +30,16 @@ const DEFAULT_PROBING_LIQUIDITY_LIMIT_MULTIPLIER: u64 = 3; const DEFAULT_ANCHOR_PER_CHANNEL_RESERVE_SATS: u64 = 25_000; // The default timeout after which we abort a wallet syncing operation. -const DEFAULT_BDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 20; +const DEFAULT_BDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 60; // The default timeout after which we abort a wallet syncing operation. -const DEFAULT_LDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 10; +const DEFAULT_LDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 30; // The default timeout after which we abort a fee rate cache update operation. -pub(crate) const DEFAULT_FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS: u64 = 5; +pub(crate) const DEFAULT_FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS: u64 = 10; // The default timeout after which we abort a transaction broadcast operation. -pub(crate) const DEFAULT_TX_BROADCAST_TIMEOUT_SECS: u64 = 5; +pub(crate) const DEFAULT_TX_BROADCAST_TIMEOUT_SECS: u64 = 10; // The default {Esplora,Electrum} client timeout we're using. const DEFAULT_PER_REQUEST_TIMEOUT_SECS: u8 = 10; @@ -387,10 +387,10 @@ impl Default for BackgroundSyncConfig { /// /// | Parameter | Value | /// |----------------------------------------|--------------------| -/// | `onchain_wallet_sync_timeout_secs` | 20 | -/// | `lightning_wallet_sync_timeout_secs` | 10 | -/// | `fee_rate_cache_update_timeout_secs` | 5 | -/// | `tx_broadcast_timeout_secs` | 5 | +/// | `onchain_wallet_sync_timeout_secs` | 60 | +/// | `lightning_wallet_sync_timeout_secs` | 30 | +/// | `fee_rate_cache_update_timeout_secs` | 10 | +/// | `tx_broadcast_timeout_secs` | 10 | /// | `per_request_timeout_secs` | 10 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct SyncTimeoutsConfig { From 83fc0a7debc0abcce634d6f27f533a4ea85688c5 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 29 Jan 2026 13:32:24 +0100 Subject: [PATCH 51/75] Bump LDK to latest `main` We bump our LDK dependency to commit 7fe3268475551b0664d315bfbc860416ca8fc774. Signed-off-by: Elias Rohrer --- Cargo.toml | 26 +++++++++++++------------- src/builder.rs | 3 ++- src/ffi/types.rs | 7 ++----- src/payment/bolt11.rs | 12 ++++++------ 4 files changed, 23 insertions(+), 25 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 6996f59f0..a2f4f1984 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,17 +39,17 @@ default = [] #lightning-liquidity = { version = "0.2.0", features = ["std"] } #lightning-macros = { version = "0.2.0" } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5", features = ["std"] } -lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5" } -lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5", features = ["std"] } -lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5" } -lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5", features = ["tokio"] } -lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5" } -lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5" } -lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } -lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5", features = ["std"] } -lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5" } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["std"] } +lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774" } +lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["std"] } +lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["tokio"] } +lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774" } +lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774" } +lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } +lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["std"] } +lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774" } bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} @@ -78,13 +78,13 @@ log = { version = "0.4.22", default-features = false, features = ["std"]} vss-client = { package = "vss-client-ng", version = "0.4" } prost = { version = "0.11.6", default-features = false} #bitcoin-payment-instructions = { version = "0.6" } -bitcoin-payment-instructions = { git = "https://github.com/tnull/bitcoin-payment-instructions", rev = "6796e87525d6c564e1332354a808730e2ba2ebf8" } +bitcoin-payment-instructions = { git = "https://github.com/tnull/bitcoin-payment-instructions", rev = "b9f9991b42e9d71b3ca966818a93b158cf8f6c40" } [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "e9ce486a425933041b319ac72512227353310dc5", features = ["std", "_test_utils"] } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" criterion = { version = "0.7.0", features = ["async_tokio"] } diff --git a/src/builder.rs b/src/builder.rs index bedaba4b5..5d8a5a7a9 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -32,6 +32,7 @@ use lightning::routing::scoring::{ ProbabilisticScoringFeeParameters, }; use lightning::sign::{EntropySource, NodeSigner}; +use lightning::util::config::HTLCInterceptionFlags; use lightning::util::persist::{ KVStore, CHANNEL_MANAGER_PERSISTENCE_KEY, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, @@ -1434,7 +1435,7 @@ fn build_with_store_internal( if liquidity_source_config.and_then(|lsc| lsc.lsps2_service.as_ref()).is_some() { // If we act as an LSPS2 service, we need to be able to intercept HTLCs and forward the // information to the service handler. - user_config.accept_intercept_htlcs = true; + user_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs.into(); // If we act as an LSPS2 service, we allow forwarding to unannounced channels. user_config.accept_forwards_to_priv_channels = true; diff --git a/src/ffi/types.rs b/src/ffi/types.rs index f63a715e1..63870893f 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -1016,7 +1016,7 @@ impl Bolt11Invoice { /// Returns the hash to which we will receive the preimage on completion of the payment pub fn payment_hash(&self) -> PaymentHash { - PaymentHash(self.inner.payment_hash().to_byte_array()) + self.inner.payment_hash() } /// Get the payment secret if one was included in the invoice @@ -1434,10 +1434,7 @@ mod tests { let invoice_str = wrapped_invoice.to_string(); let parsed_invoice: LdkBolt11Invoice = invoice_str.parse().unwrap(); - assert_eq!( - ldk_invoice.payment_hash().to_byte_array().to_vec(), - parsed_invoice.payment_hash().to_byte_array().to_vec() - ); + assert_eq!(ldk_invoice.payment_hash(), parsed_invoice.payment_hash(),); } #[test] diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index 60c313381..41597bfcc 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -98,8 +98,8 @@ impl Bolt11Payment { } let invoice = maybe_deref(invoice); - let payment_hash = PaymentHash(invoice.payment_hash().to_byte_array()); - let payment_id = PaymentId(invoice.payment_hash().to_byte_array()); + let payment_hash = invoice.payment_hash(); + let payment_id = PaymentId(invoice.payment_hash().0); if let Some(payment) = self.payment_store.get(&payment_id) { if payment.status == PaymentStatus::Pending || payment.status == PaymentStatus::Succeeded @@ -204,8 +204,8 @@ impl Bolt11Payment { } } - let payment_hash = PaymentHash(invoice.payment_hash().to_byte_array()); - let payment_id = PaymentId(invoice.payment_hash().to_byte_array()); + let payment_hash = invoice.payment_hash(); + let payment_id = PaymentId(invoice.payment_hash().0); if let Some(payment) = self.payment_store.get(&payment_id) { if payment.status == PaymentStatus::Pending || payment.status == PaymentStatus::Succeeded @@ -494,7 +494,7 @@ impl Bolt11Payment { } }; - let payment_hash = PaymentHash(invoice.payment_hash().to_byte_array()); + let payment_hash = invoice.payment_hash(); let payment_secret = invoice.payment_secret(); let id = PaymentId(payment_hash.0); let preimage = if manual_claim_payment_hash.is_none() { @@ -712,7 +712,7 @@ impl Bolt11Payment { })?; // Register payment in payment store. - let payment_hash = PaymentHash(invoice.payment_hash().to_byte_array()); + let payment_hash = invoice.payment_hash(); let payment_secret = invoice.payment_secret(); let lsp_fee_limits = LSPFeeLimits { max_total_opening_fee_msat: lsp_total_opening_fee, From 9d81ac6873feb49cf821ac31eb25382f8a7e5a74 Mon Sep 17 00:00:00 2001 From: Fmt Bot Date: Sun, 1 Feb 2026 02:07:32 +0000 Subject: [PATCH 52/75] 2026-02-01 automated rustfmt nightly --- src/payment/pending_payment_store.rs | 186 +++++++++++++-------------- 1 file changed, 93 insertions(+), 93 deletions(-) diff --git a/src/payment/pending_payment_store.rs b/src/payment/pending_payment_store.rs index 580bdcbcc..5fef8d14a 100644 --- a/src/payment/pending_payment_store.rs +++ b/src/payment/pending_payment_store.rs @@ -1,93 +1,93 @@ -// This file is Copyright its original authors, visible in version control history. -// -// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in -// accordance with one or both of these licenses. - -use bitcoin::Txid; -use lightning::{impl_writeable_tlv_based, ln::channelmanager::PaymentId}; - -use crate::{ - data_store::{StorableObject, StorableObjectUpdate}, - payment::{store::PaymentDetailsUpdate, PaymentDetails}, -}; - -/// Represents a pending payment -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct PendingPaymentDetails { - /// The full payment details - pub details: PaymentDetails, - /// Transaction IDs that have replaced or conflict with this payment. - pub conflicting_txids: Vec, -} - -impl PendingPaymentDetails { - pub(crate) fn new(details: PaymentDetails, conflicting_txids: Vec) -> Self { - Self { details, conflicting_txids } - } - - /// Convert to finalized payment for the main payment store - pub fn into_payment_details(self) -> PaymentDetails { - self.details - } -} - -impl_writeable_tlv_based!(PendingPaymentDetails, { - (0, details, required), - (2, conflicting_txids, optional_vec), -}); - -#[derive(Clone, Debug, PartialEq, Eq)] -pub(crate) struct PendingPaymentDetailsUpdate { - pub id: PaymentId, - pub payment_update: Option, - pub conflicting_txids: Option>, -} - -impl StorableObject for PendingPaymentDetails { - type Id = PaymentId; - type Update = PendingPaymentDetailsUpdate; - - fn id(&self) -> Self::Id { - self.details.id - } - - fn update(&mut self, update: &Self::Update) -> bool { - let mut updated = false; - - // Update the underlying payment details if present - if let Some(payment_update) = &update.payment_update { - updated |= self.details.update(payment_update); - } - - if let Some(new_conflicting_txids) = &update.conflicting_txids { - if &self.conflicting_txids != new_conflicting_txids { - self.conflicting_txids = new_conflicting_txids.clone(); - updated = true; - } - } - - updated - } - - fn to_update(&self) -> Self::Update { - self.into() - } -} - -impl StorableObjectUpdate for PendingPaymentDetailsUpdate { - fn id(&self) -> ::Id { - self.id - } -} - -impl From<&PendingPaymentDetails> for PendingPaymentDetailsUpdate { - fn from(value: &PendingPaymentDetails) -> Self { - Self { - id: value.id(), - payment_update: Some(value.details.to_update()), - conflicting_txids: Some(value.conflicting_txids.clone()), - } - } -} +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use bitcoin::Txid; +use lightning::impl_writeable_tlv_based; +use lightning::ln::channelmanager::PaymentId; + +use crate::data_store::{StorableObject, StorableObjectUpdate}; +use crate::payment::store::PaymentDetailsUpdate; +use crate::payment::PaymentDetails; + +/// Represents a pending payment +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct PendingPaymentDetails { + /// The full payment details + pub details: PaymentDetails, + /// Transaction IDs that have replaced or conflict with this payment. + pub conflicting_txids: Vec, +} + +impl PendingPaymentDetails { + pub(crate) fn new(details: PaymentDetails, conflicting_txids: Vec) -> Self { + Self { details, conflicting_txids } + } + + /// Convert to finalized payment for the main payment store + pub fn into_payment_details(self) -> PaymentDetails { + self.details + } +} + +impl_writeable_tlv_based!(PendingPaymentDetails, { + (0, details, required), + (2, conflicting_txids, optional_vec), +}); + +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct PendingPaymentDetailsUpdate { + pub id: PaymentId, + pub payment_update: Option, + pub conflicting_txids: Option>, +} + +impl StorableObject for PendingPaymentDetails { + type Id = PaymentId; + type Update = PendingPaymentDetailsUpdate; + + fn id(&self) -> Self::Id { + self.details.id + } + + fn update(&mut self, update: &Self::Update) -> bool { + let mut updated = false; + + // Update the underlying payment details if present + if let Some(payment_update) = &update.payment_update { + updated |= self.details.update(payment_update); + } + + if let Some(new_conflicting_txids) = &update.conflicting_txids { + if &self.conflicting_txids != new_conflicting_txids { + self.conflicting_txids = new_conflicting_txids.clone(); + updated = true; + } + } + + updated + } + + fn to_update(&self) -> Self::Update { + self.into() + } +} + +impl StorableObjectUpdate for PendingPaymentDetailsUpdate { + fn id(&self) -> ::Id { + self.id + } +} + +impl From<&PendingPaymentDetails> for PendingPaymentDetailsUpdate { + fn from(value: &PendingPaymentDetails) -> Self { + Self { + id: value.id(), + payment_update: Some(value.details.to_update()), + conflicting_txids: Some(value.conflicting_txids.clone()), + } + } +} From 43ebe0eff465716422702ad397f520a16341a316 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 3 Feb 2026 10:58:48 +0100 Subject: [PATCH 53/75] Update to upstream LDK after 4263 and 4370 lightningdevkit/rust-lightning#4263 and lightningdevkit/rust-lightning#4370 changed the `lightning` API, which we update to here. --- Cargo.toml | 26 +++++++++++++------------- src/payment/bolt11.rs | 23 ++++++++++++++++------- src/payment/bolt12.rs | 3 ++- src/payment/spontaneous.rs | 23 +++++++++++++++-------- 4 files changed, 46 insertions(+), 29 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a2f4f1984..3dcad31a5 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,17 +39,17 @@ default = [] #lightning-liquidity = { version = "0.2.0", features = ["std"] } #lightning-macros = { version = "0.2.0" } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["std"] } -lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774" } -lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["std"] } -lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774" } -lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["tokio"] } -lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774" } -lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774" } -lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } -lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["std"] } -lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774" } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099", features = ["std"] } +lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099" } +lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099", features = ["std"] } +lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099", features = ["tokio"] } +lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099" } +lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099" } +lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } +lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099", features = ["std"] } +lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099" } bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} @@ -78,13 +78,13 @@ log = { version = "0.4.22", default-features = false, features = ["std"]} vss-client = { package = "vss-client-ng", version = "0.4" } prost = { version = "0.11.6", default-features = false} #bitcoin-payment-instructions = { version = "0.6" } -bitcoin-payment-instructions = { git = "https://github.com/tnull/bitcoin-payment-instructions", rev = "b9f9991b42e9d71b3ca966818a93b158cf8f6c40" } +bitcoin-payment-instructions = { git = "https://github.com/tnull/bitcoin-payment-instructions", rev = "7ee3958834aba575e04b91a862f790d798a7d578" } [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["std", "_test_utils"] } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" criterion = { version = "0.7.0", features = ["async_tokio"] } diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index 41597bfcc..ee449c44c 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -14,8 +14,9 @@ use std::sync::{Arc, RwLock}; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; use lightning::ln::channelmanager::{ - Bolt11InvoiceParameters, Bolt11PaymentError, PaymentId, Retry, RetryableSendFailure, + Bolt11InvoiceParameters, OptionalBolt11PaymentParams, PaymentId, }; +use lightning::ln::outbound_payment::{Bolt11PaymentError, Retry, RetryableSendFailure}; use lightning::routing::router::{PaymentParameters, RouteParameters, RouteParametersConfig}; use lightning_invoice::{ Bolt11Invoice as LdkBolt11Invoice, Bolt11InvoiceDescription as LdkBolt11InvoiceDescription, @@ -109,17 +110,21 @@ impl Bolt11Payment { } } - let route_parameters = + let route_params_config = route_parameters.or(self.config.route_parameters).unwrap_or_default(); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); let payment_secret = Some(*invoice.payment_secret()); + let optional_params = OptionalBolt11PaymentParams { + retry_strategy, + route_params_config, + ..Default::default() + }; match self.channel_manager.pay_for_bolt11_invoice( invoice, payment_id, None, - route_parameters, - retry_strategy, + optional_params, ) { Ok(()) => { let payee_pubkey = invoice.recover_payee_pub_key(); @@ -215,17 +220,21 @@ impl Bolt11Payment { } } - let route_parameters = + let route_params_config = route_parameters.or(self.config.route_parameters).unwrap_or_default(); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); let payment_secret = Some(*invoice.payment_secret()); + let optional_params = OptionalBolt11PaymentParams { + retry_strategy, + route_params_config, + ..Default::default() + }; match self.channel_manager.pay_for_bolt11_invoice( invoice, payment_id, Some(amount_msat), - route_parameters, - retry_strategy, + optional_params, ) { Ok(()) => { let payee_pubkey = invoice.recover_payee_pub_key(); diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 98f1d21ef..ada4cd7e2 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -14,7 +14,8 @@ use std::sync::{Arc, RwLock}; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use lightning::blinded_path::message::BlindedMessagePath; -use lightning::ln::channelmanager::{OptionalOfferPaymentParams, PaymentId, Retry}; +use lightning::ln::channelmanager::{OptionalOfferPaymentParams, PaymentId}; +use lightning::ln::outbound_payment::Retry; use lightning::offers::offer::{Amount, Offer as LdkOffer, OfferFromHrn, Quantity}; use lightning::offers::parse::Bolt12SemanticError; use lightning::routing::router::RouteParametersConfig; diff --git a/src/payment/spontaneous.rs b/src/payment/spontaneous.rs index 6c074f308..84f6c6412 100644 --- a/src/payment/spontaneous.rs +++ b/src/payment/spontaneous.rs @@ -10,7 +10,10 @@ use std::sync::{Arc, RwLock}; use bitcoin::secp256k1::PublicKey; -use lightning::ln::channelmanager::{PaymentId, RecipientOnionFields, Retry, RetryableSendFailure}; +use lightning::ln::channelmanager::PaymentId; +use lightning::ln::outbound_payment::{ + RecipientCustomTlvs, RecipientOnionFields, Retry, RetryableSendFailure, +}; use lightning::routing::router::{PaymentParameters, RouteParameters, RouteParametersConfig}; use lightning::sign::EntropySource; use lightning_types::payment::{PaymentHash, PaymentPreimage}; @@ -125,15 +128,19 @@ impl SpontaneousPayment { *max_channel_saturation_power_of_half; } - let recipient_fields = match custom_tlvs { - Some(tlvs) => RecipientOnionFields::spontaneous_empty() - .with_custom_tlvs(tlvs.into_iter().map(|tlv| (tlv.type_num, tlv.value)).collect()) - .map_err(|e| { - log_error!(self.logger, "Failed to send payment with custom TLVs: {:?}", e); + let mut recipient_fields = RecipientOnionFields::spontaneous_empty(); + if let Some(tlvs) = custom_tlvs { + let tlvs_vec = tlvs.into_iter().map(|tlv| (tlv.type_num, tlv.value)).collect(); + recipient_fields = recipient_fields.with_custom_tlvs( + RecipientCustomTlvs::new(tlvs_vec).map_err(|()| { + log_error!( + self.logger, + "Attempted to set payment custom TLVs to a spec-defined value" + ); Error::InvalidCustomTlvs })?, - None => RecipientOnionFields::spontaneous_empty(), - }; + ); + } match self.channel_manager.send_spontaneous_payment( Some(payment_preimage), From 1ffc0d2b859c829bc72cbb3c623b3d9f85c05ca3 Mon Sep 17 00:00:00 2001 From: Jon Date: Tue, 3 Feb 2026 12:02:52 -0600 Subject: [PATCH 54/75] Update rust-lightning dependencies & fix InvoiceBuilder breakage --- Cargo.toml | 26 +++++++++++++------------- src/liquidity.rs | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 3dcad31a5..67e492185 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,17 +39,17 @@ default = [] #lightning-liquidity = { version = "0.2.0", features = ["std"] } #lightning-macros = { version = "0.2.0" } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099", features = ["std"] } -lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099" } -lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099", features = ["std"] } -lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099" } -lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099", features = ["tokio"] } -lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099" } -lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099" } -lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } -lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099", features = ["std"] } -lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099" } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a", features = ["std"] } +lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a" } +lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a", features = ["std"] } +lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a", features = ["tokio"] } +lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a" } +lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a" } +lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } +lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a", features = ["std"] } +lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a" } bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} @@ -78,13 +78,13 @@ log = { version = "0.4.22", default-features = false, features = ["std"]} vss-client = { package = "vss-client-ng", version = "0.4" } prost = { version = "0.11.6", default-features = false} #bitcoin-payment-instructions = { version = "0.6" } -bitcoin-payment-instructions = { git = "https://github.com/tnull/bitcoin-payment-instructions", rev = "7ee3958834aba575e04b91a862f790d798a7d578" } +bitcoin-payment-instructions = { git = "https://github.com/tnull/bitcoin-payment-instructions", rev = "e4d519b95b26916dc6efa22f8f1cc11a818ce7a7" } [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "35ab03fbe0fe0927a9242754a0797553f6f7f099", features = ["std", "_test_utils"] } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" criterion = { version = "0.7.0", features = ["async_tokio"] } diff --git a/src/liquidity.rs b/src/liquidity.rs index 2151110b6..cbfd7b109 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -1314,7 +1314,7 @@ where let currency = self.config.network.into(); let mut invoice_builder = InvoiceBuilder::new(currency) .invoice_description(description.clone()) - .payment_hash(payment_hash) + .payment_hash(lightning_invoice::PaymentHash(payment_hash.to_byte_array())) .payment_secret(payment_secret) .current_timestamp() .min_final_cltv_expiry_delta(min_final_cltv_expiry_delta.into()) From 84284f48b1efeb0524ce94e20fb1213fb2113616 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 11 Feb 2026 10:01:58 +0100 Subject: [PATCH 55/75] Update rust-lightning to b6c17c593a and fix breaking API changes Bump rust-lightning from 817ab5e583 to b6c17c593a and bitcoin-payment-instructions to ea50a9d2a8. Adapt to upstream breaking changes: - `UserConfig::manually_accept_inbound_channels` was removed; manual acceptance is now the default and only behavior, so the explicit assignment is dropped. - `InvoiceBuilder::payment_hash()` now takes the unified `PaymentHash` type directly, removing the need for the `lightning_invoice::PaymentHash` wrapper and the `sha256`/`Hash` imports in liquidity.rs. - `BroadcasterInterface::broadcast_transactions` now takes `&[(&Transaction, TransactionType)]` instead of `&[&Transaction]`. The `TransactionType` enum was redesigned with specific variants (Funding, CooperativeClose, Sweep, etc.) replacing the old `NonLightning` variant. Wallet-originated on-chain sends now use `TransactionType::Sweep { channels: vec![] }`. - `Event::SpendableOutputs` gained a `counterparty_node_id` field, which is forwarded to `OutputSweeper::track_spendable_outputs` so sweep transactions carry channel metadata. - The `impl_writeable_tlv_based` legacy field syntax gained an additional validation closure parameter. - `negotiate_anchors_zero_fee_htlc_tx` now defaults to `true` upstream, which changes force-close behavior (commitment transactions go through BumpTransactionEvent rather than direct broadcast). The KVStore persistence test is updated to use `test_legacy_channel_config()` to match the upstream persister test pattern. Co-Authored-By: Claude Opus 4.6 --- Cargo.toml | 26 +++++++++++++------------- src/config.rs | 1 - src/event.rs | 4 ++-- src/io/test_utils.rs | 6 ++++-- src/lib.rs | 2 +- src/liquidity.rs | 8 +------- src/tx_broadcaster.rs | 6 +++--- src/wallet/mod.rs | 5 ++++- 8 files changed, 28 insertions(+), 30 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 67e492185..5c82d7d65 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,17 +39,17 @@ default = [] #lightning-liquidity = { version = "0.2.0", features = ["std"] } #lightning-macros = { version = "0.2.0" } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a", features = ["std"] } -lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a" } -lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a", features = ["std"] } -lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a" } -lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a", features = ["tokio"] } -lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a" } -lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a" } -lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } -lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a", features = ["std"] } -lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a" } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b6c17c593a5d7bacb18fe3b9f69074a0596ae8f0", features = ["std"] } +lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b6c17c593a5d7bacb18fe3b9f69074a0596ae8f0" } +lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b6c17c593a5d7bacb18fe3b9f69074a0596ae8f0", features = ["std"] } +lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b6c17c593a5d7bacb18fe3b9f69074a0596ae8f0" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b6c17c593a5d7bacb18fe3b9f69074a0596ae8f0", features = ["tokio"] } +lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b6c17c593a5d7bacb18fe3b9f69074a0596ae8f0" } +lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b6c17c593a5d7bacb18fe3b9f69074a0596ae8f0" } +lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b6c17c593a5d7bacb18fe3b9f69074a0596ae8f0", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b6c17c593a5d7bacb18fe3b9f69074a0596ae8f0", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } +lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b6c17c593a5d7bacb18fe3b9f69074a0596ae8f0", features = ["std"] } +lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b6c17c593a5d7bacb18fe3b9f69074a0596ae8f0" } bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} @@ -78,13 +78,13 @@ log = { version = "0.4.22", default-features = false, features = ["std"]} vss-client = { package = "vss-client-ng", version = "0.4" } prost = { version = "0.11.6", default-features = false} #bitcoin-payment-instructions = { version = "0.6" } -bitcoin-payment-instructions = { git = "https://github.com/tnull/bitcoin-payment-instructions", rev = "e4d519b95b26916dc6efa22f8f1cc11a818ce7a7" } +bitcoin-payment-instructions = { git = "https://github.com/tnull/bitcoin-payment-instructions", rev = "ea50a9d2a8da524b69a2af43233706666cf2ffa5" } [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "817ab5e583002df5e32b3a71e7ab093005a2a39a", features = ["std", "_test_utils"] } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "b6c17c593a5d7bacb18fe3b9f69074a0596ae8f0", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" criterion = { version = "0.7.0", features = ["async_tokio"] } diff --git a/src/config.rs b/src/config.rs index 103b74657..1dfa66176 100644 --- a/src/config.rs +++ b/src/config.rs @@ -330,7 +330,6 @@ pub(crate) fn default_user_config(config: &Config) -> UserConfig { // will mostly be relevant for inbound channels. let mut user_config = UserConfig::default(); user_config.channel_handshake_limits.force_announced_channel_preference = false; - user_config.manually_accept_inbound_channels = true; user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = config.anchor_channels_config.is_some(); user_config.reject_inbound_splices = false; diff --git a/src/event.rs b/src/event.rs index 6f0ed8e09..742d99d2f 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1118,10 +1118,10 @@ where liquidity_source.handle_htlc_handling_failed(failure_type).await; } }, - LdkEvent::SpendableOutputs { outputs, channel_id } => { + LdkEvent::SpendableOutputs { outputs, channel_id, counterparty_node_id } => { match self .output_sweeper - .track_spendable_outputs(outputs, channel_id, true, None) + .track_spendable_outputs(outputs, channel_id, counterparty_node_id, true, None) .await { Ok(_) => return Ok(()), diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index cbcd90d29..9add2d6c1 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -15,7 +15,7 @@ use lightning::events::ClosureReason; use lightning::ln::functional_test_utils::{ check_added_monitors, check_closed_event, connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, create_dummy_block, create_network, create_node_cfgs, - create_node_chanmgrs, send_payment, TestChanMonCfg, + create_node_chanmgrs, send_payment, test_legacy_channel_config, TestChanMonCfg, }; use lightning::util::persist::{ KVStore, KVStoreSync, MonitorUpdatingPersister, KVSTORE_NAMESPACE_KEY_MAX_LEN, @@ -259,7 +259,9 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs); node_cfgs[0].chain_monitor = chain_mon_0; node_cfgs[1].chain_monitor = chain_mon_1; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let legacy_cfg = test_legacy_channel_config(); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(legacy_cfg.clone()), Some(legacy_cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); // Check that the persisted channel data is empty before any channels are diff --git a/src/lib.rs b/src/lib.rs index d2222d949..64a9ac1a2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1856,7 +1856,7 @@ impl_writeable_tlv_based!(NodeMetrics, { (6, latest_rgs_snapshot_timestamp, option), (8, latest_node_announcement_broadcast_timestamp, option), // 10 used to be latest_channel_monitor_archival_height - (10, _legacy_latest_channel_monitor_archival_height, (legacy, u32, |_: &NodeMetrics| None::> )), + (10, _legacy_latest_channel_monitor_archival_height, (legacy, u32, |_| Ok(()), |_: &NodeMetrics| None::> )), }); pub(crate) fn total_anchor_channels_reserve_sats( diff --git a/src/liquidity.rs b/src/liquidity.rs index cbfd7b109..30a8068ad 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -12,7 +12,6 @@ use std::ops::Deref; use std::sync::{Arc, Mutex, RwLock, Weak}; use std::time::Duration; -use bitcoin::hashes::{sha256, Hash}; use bitcoin::secp256k1::{PublicKey, Secp256k1}; use bitcoin::Transaction; use chrono::Utc; @@ -1306,15 +1305,10 @@ where htlc_maximum_msat: None, }]); - let payment_hash = sha256::Hash::from_slice(&payment_hash.0).map_err(|e| { - log_error!(self.logger, "Invalid payment hash: {:?}", e); - Error::InvoiceCreationFailed - })?; - let currency = self.config.network.into(); let mut invoice_builder = InvoiceBuilder::new(currency) .invoice_description(description.clone()) - .payment_hash(lightning_invoice::PaymentHash(payment_hash.to_byte_array())) + .payment_hash(payment_hash) .payment_secret(payment_secret) .current_timestamp() .min_final_cltv_expiry_delta(min_final_cltv_expiry_delta.into()) diff --git a/src/tx_broadcaster.rs b/src/tx_broadcaster.rs index 12a1fe650..7084135b0 100644 --- a/src/tx_broadcaster.rs +++ b/src/tx_broadcaster.rs @@ -8,7 +8,7 @@ use std::ops::Deref; use bitcoin::Transaction; -use lightning::chain::chaininterface::BroadcasterInterface; +use lightning::chain::chaininterface::{BroadcasterInterface, TransactionType}; use tokio::sync::{mpsc, Mutex, MutexGuard}; use crate::logger::{log_error, LdkLogger}; @@ -44,8 +44,8 @@ impl BroadcasterInterface for TransactionBroadcaster where L::Target: LdkLogger, { - fn broadcast_transactions(&self, txs: &[&Transaction]) { - let package = txs.iter().map(|&t| t.clone()).collect::>(); + fn broadcast_transactions(&self, txs: &[(&Transaction, TransactionType)]) { + let package = txs.iter().map(|(t, _)| (*t).clone()).collect::>(); self.queue_sender.try_send(package).unwrap_or_else(|e| { log_error!(self.logger, "Failed to broadcast transactions: {}", e); }); diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index 05c743bd9..f808e9a3f 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -648,7 +648,10 @@ impl Wallet { })? }; - self.broadcaster.broadcast_transactions(&[&tx]); + self.broadcaster.broadcast_transactions(&[( + &tx, + lightning::chain::chaininterface::TransactionType::Sweep { channels: vec![] }, + )]); let txid = tx.compute_txid(); From 63e8b955008020188bc58c862e97da668db68723 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 11 Feb 2026 15:32:15 +0100 Subject: [PATCH 56/75] Take `StorableObjectUpdate` by value in `StorableObject::update` Change the `update` method on `StorableObject` to take the update by value rather than by reference. This avoids unnecessary clones when applying updates, since the caller typically constructs a fresh update struct that can simply be moved. Co-Authored-By: HAL 9000 Signed-off-by: Elias Rohrer --- src/data_store.rs | 14 +++++++------- src/event.rs | 16 ++++++++-------- src/payment/bolt11.rs | 2 +- src/payment/pending_payment_store.rs | 10 +++++----- src/payment/store.rs | 2 +- 5 files changed, 22 insertions(+), 22 deletions(-) diff --git a/src/data_store.rs b/src/data_store.rs index ff09d9902..ac5c78fb7 100644 --- a/src/data_store.rs +++ b/src/data_store.rs @@ -21,7 +21,7 @@ pub(crate) trait StorableObject: Clone + Readable + Writeable { type Update: StorableObjectUpdate; fn id(&self) -> Self::Id; - fn update(&mut self, update: &Self::Update) -> bool; + fn update(&mut self, update: Self::Update) -> bool; fn to_update(&self) -> Self::Update; } @@ -79,7 +79,7 @@ where match locked_objects.entry(object.id()) { hash_map::Entry::Occupied(mut e) => { let update = object.to_update(); - updated = e.get_mut().update(&update); + updated = e.get_mut().update(update); if updated { self.persist(&e.get())?; } @@ -124,7 +124,7 @@ where self.objects.lock().unwrap().get(id).cloned() } - pub(crate) fn update(&self, update: &SO::Update) -> Result { + pub(crate) fn update(&self, update: SO::Update) -> Result { let mut locked_objects = self.objects.lock().unwrap(); if let Some(object) = locked_objects.get_mut(&update.id()) { @@ -219,7 +219,7 @@ mod tests { self.id } - fn update(&mut self, update: &Self::Update) -> bool { + fn update(&mut self, update: Self::Update) -> bool { if self.data != update.data { self.data = update.data; true @@ -276,17 +276,17 @@ mod tests { // Check update returns `Updated` let update = TestObjectUpdate { id, data: [25u8; 3] }; - assert_eq!(Ok(DataStoreUpdateResult::Updated), data_store.update(&update)); + assert_eq!(Ok(DataStoreUpdateResult::Updated), data_store.update(update)); assert_eq!(data_store.get(&id).unwrap().data, [25u8; 3]); // Check no-op update yields `Unchanged` let update = TestObjectUpdate { id, data: [25u8; 3] }; - assert_eq!(Ok(DataStoreUpdateResult::Unchanged), data_store.update(&update)); + assert_eq!(Ok(DataStoreUpdateResult::Unchanged), data_store.update(update)); // Check bogus update yields `NotFound` let bogus_id = TestObjectId { id: [84u8; 4] }; let update = TestObjectUpdate { id: bogus_id, data: [12u8; 3] }; - assert_eq!(Ok(DataStoreUpdateResult::NotFound), data_store.update(&update)); + assert_eq!(Ok(DataStoreUpdateResult::NotFound), data_store.update(update)); // Check `insert_or_update` inserts unknown objects let iou_id = TestObjectId { id: [55u8; 4] }; diff --git a/src/event.rs b/src/event.rs index 742d99d2f..e24059ec7 100644 --- a/src/event.rs +++ b/src/event.rs @@ -657,7 +657,7 @@ where status: Some(PaymentStatus::Failed), ..PaymentDetailsUpdate::new(payment_id) }; - match self.payment_store.update(&update) { + match self.payment_store.update(update) { Ok(_) => return Ok(()), Err(e) => { log_error!(self.logger, "Failed to access payment store: {}", e); @@ -681,7 +681,7 @@ where status: Some(PaymentStatus::Failed), ..PaymentDetailsUpdate::new(payment_id) }; - match self.payment_store.update(&update) { + match self.payment_store.update(update) { Ok(_) => return Ok(()), Err(e) => { log_error!(self.logger, "Failed to access payment store: {}", e); @@ -722,7 +722,7 @@ where status: Some(PaymentStatus::Failed), ..PaymentDetailsUpdate::new(payment_id) }; - match self.payment_store.update(&update) { + match self.payment_store.update(update) { Ok(_) => return Ok(()), Err(e) => { log_error!(self.logger, "Failed to access payment store: {}", e); @@ -739,7 +739,7 @@ where counterparty_skimmed_fee_msat: Some(Some(counterparty_skimmed_fee_msat)), ..PaymentDetailsUpdate::new(payment_id) }; - match self.payment_store.update(&update) { + match self.payment_store.update(update) { Ok(_) => (), Err(e) => { log_error!(self.logger, "Failed to access payment store: {}", e); @@ -910,7 +910,7 @@ where status: Some(PaymentStatus::Failed), ..PaymentDetailsUpdate::new(payment_id) }; - match self.payment_store.update(&update) { + match self.payment_store.update(update) { Ok(_) => return Ok(()), Err(e) => { log_error!(self.logger, "Failed to access payment store: {}", e); @@ -978,7 +978,7 @@ where }, }; - match self.payment_store.update(&update) { + match self.payment_store.update(update) { Ok(DataStoreUpdateResult::Updated) | Ok(DataStoreUpdateResult::Unchanged) => ( // No need to do anything if the idempotent update was applied, which might // be the result of a replayed event. @@ -1039,7 +1039,7 @@ where ..PaymentDetailsUpdate::new(payment_id) }; - match self.payment_store.update(&update) { + match self.payment_store.update(update) { Ok(_) => {}, Err(e) => { log_error!(self.logger, "Failed to access payment store: {}", e); @@ -1090,7 +1090,7 @@ where status: Some(PaymentStatus::Failed), ..PaymentDetailsUpdate::new(payment_id) }; - match self.payment_store.update(&update) { + match self.payment_store.update(update) { Ok(_) => {}, Err(e) => { log_error!(self.logger, "Failed to access payment store: {}", e); diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index ee449c44c..56eb2f20b 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -383,7 +383,7 @@ impl Bolt11Payment { ..PaymentDetailsUpdate::new(payment_id) }; - match self.payment_store.update(&update) { + match self.payment_store.update(update) { Ok(DataStoreUpdateResult::Updated) | Ok(DataStoreUpdateResult::Unchanged) => (), Ok(DataStoreUpdateResult::NotFound) => { log_error!( diff --git a/src/payment/pending_payment_store.rs b/src/payment/pending_payment_store.rs index 5fef8d14a..fea03c3e0 100644 --- a/src/payment/pending_payment_store.rs +++ b/src/payment/pending_payment_store.rs @@ -53,17 +53,17 @@ impl StorableObject for PendingPaymentDetails { self.details.id } - fn update(&mut self, update: &Self::Update) -> bool { + fn update(&mut self, update: Self::Update) -> bool { let mut updated = false; // Update the underlying payment details if present - if let Some(payment_update) = &update.payment_update { + if let Some(payment_update) = update.payment_update { updated |= self.details.update(payment_update); } - if let Some(new_conflicting_txids) = &update.conflicting_txids { - if &self.conflicting_txids != new_conflicting_txids { - self.conflicting_txids = new_conflicting_txids.clone(); + if let Some(new_conflicting_txids) = update.conflicting_txids { + if self.conflicting_txids != new_conflicting_txids { + self.conflicting_txids = new_conflicting_txids; updated = true; } } diff --git a/src/payment/store.rs b/src/payment/store.rs index 15e94190c..58b410894 100644 --- a/src/payment/store.rs +++ b/src/payment/store.rs @@ -170,7 +170,7 @@ impl StorableObject for PaymentDetails { self.id } - fn update(&mut self, update: &Self::Update) -> bool { + fn update(&mut self, update: Self::Update) -> bool { debug_assert_eq!( self.id, update.id, "We should only ever override payment data for the same payment id" From b476e05ad9944d51454ac8f0ae88f7e32f1d5ca4 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 23 Jan 2026 14:08:39 +0100 Subject: [PATCH 57/75] Move `async_payment_role` to `TestConfig` It's weird to have a special intermediary `setup_node` method if we have `TestConfig` for exactly that reason by now. So we move `async_payment_role` over. --- tests/common/mod.rs | 13 ++++--------- tests/integration_tests_rust.rs | 24 +++++++----------------- 2 files changed, 11 insertions(+), 26 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 5f6657260..f0065e89a 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -292,6 +292,7 @@ pub(crate) struct TestConfig { pub log_writer: TestLogWriter, pub store_type: TestStoreType, pub node_entropy: NodeEntropy, + pub async_payments_role: Option, } impl Default for TestConfig { @@ -302,7 +303,8 @@ impl Default for TestConfig { let mnemonic = generate_entropy_mnemonic(None); let node_entropy = NodeEntropy::from_bip39_mnemonic(mnemonic, None); - TestConfig { node_config, log_writer, store_type, node_entropy } + let async_payments_role = None; + TestConfig { node_config, log_writer, store_type, node_entropy, async_payments_role } } } @@ -359,13 +361,6 @@ pub(crate) fn setup_two_nodes_with_store( } pub(crate) fn setup_node(chain_source: &TestChainSource, config: TestConfig) -> TestNode { - setup_node_for_async_payments(chain_source, config, None) -} - -pub(crate) fn setup_node_for_async_payments( - chain_source: &TestChainSource, config: TestConfig, - async_payments_role: Option, -) -> TestNode { setup_builder!(builder, config.node_config); match chain_source { TestChainSource::Esplora(electrsd) => { @@ -419,7 +414,7 @@ pub(crate) fn setup_node_for_async_payments( }, } - builder.set_async_payments_role(async_payments_role).unwrap(); + builder.set_async_payments_role(config.async_payments_role).unwrap(); let node = match config.store_type { TestStoreType::TestSyncStore => { diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 605dd0613..a598b6879 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -23,8 +23,7 @@ use common::{ expect_splice_pending_event, generate_blocks_and_wait, open_channel, open_channel_push_amt, premine_and_distribute_funds, premine_blocks, prepare_rbf, random_config, random_listening_addresses, setup_bitcoind_and_electrsd, setup_builder, setup_node, - setup_node_for_async_payments, setup_two_nodes, wait_for_tx, TestChainSource, TestStoreType, - TestSyncStore, + setup_two_nodes, wait_for_tx, TestChainSource, TestStoreType, TestSyncStore, }; use ldk_node::config::{AsyncPaymentsRole, EsploraSyncConfig}; use ldk_node::entropy::NodeEntropy; @@ -1317,30 +1316,21 @@ async fn async_payment() { config_sender.node_config.node_alias = None; config_sender.log_writer = TestLogWriter::Custom(Arc::new(MultiNodeLogger::new("sender ".to_string()))); - let node_sender = setup_node_for_async_payments( - &chain_source, - config_sender, - Some(AsyncPaymentsRole::Client), - ); + config_sender.async_payments_role = Some(AsyncPaymentsRole::Client); + let node_sender = setup_node(&chain_source, config_sender); let mut config_sender_lsp = random_config(true); config_sender_lsp.log_writer = TestLogWriter::Custom(Arc::new(MultiNodeLogger::new("sender_lsp ".to_string()))); - let node_sender_lsp = setup_node_for_async_payments( - &chain_source, - config_sender_lsp, - Some(AsyncPaymentsRole::Server), - ); + config_sender_lsp.async_payments_role = Some(AsyncPaymentsRole::Server); + let node_sender_lsp = setup_node(&chain_source, config_sender_lsp); let mut config_receiver_lsp = random_config(true); config_receiver_lsp.log_writer = TestLogWriter::Custom(Arc::new(MultiNodeLogger::new("receiver_lsp".to_string()))); + config_receiver_lsp.async_payments_role = Some(AsyncPaymentsRole::Server); - let node_receiver_lsp = setup_node_for_async_payments( - &chain_source, - config_receiver_lsp, - Some(AsyncPaymentsRole::Server), - ); + let node_receiver_lsp = setup_node(&chain_source, config_receiver_lsp); let mut config_receiver = random_config(true); config_receiver.node_config.listening_addresses = None; From 325c936f21c08601a09c45900ab432606980d771 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 23 Jan 2026 14:36:08 +0100 Subject: [PATCH 58/75] Randomize chain sources in tests .. all of our tests should be robust against switching chain sources. We here opt to pick a random one each time to considerably extend our test coverage, instead of just running some cases against non-Esplora chain sources. Signed-off-by: Elias Rohrer --- benches/payments.rs | 4 +- tests/common/mod.rs | 25 ++++++++ tests/integration_tests_rust.rs | 101 +++++++++++--------------------- tests/reorg_test.rs | 16 ++--- 4 files changed, 68 insertions(+), 78 deletions(-) diff --git a/benches/payments.rs b/benches/payments.rs index ba69e046d..52769d794 100644 --- a/benches/payments.rs +++ b/benches/payments.rs @@ -8,7 +8,7 @@ use bitcoin::hex::DisplayHex; use bitcoin::Amount; use common::{ expect_channel_ready_event, generate_blocks_and_wait, premine_and_distribute_funds, - setup_bitcoind_and_electrsd, setup_two_nodes_with_store, TestChainSource, + random_chain_source, setup_bitcoind_and_electrsd, setup_two_nodes_with_store, }; use criterion::{criterion_group, criterion_main, Criterion}; use ldk_node::{Event, Node}; @@ -119,7 +119,7 @@ async fn send_payments(node_a: Arc, node_b: Arc) -> std::time::Durat fn payment_benchmark(c: &mut Criterion) { // Set up two nodes. Because this is slow, we reuse the same nodes for each sample. let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes_with_store( &chain_source, diff --git a/tests/common/mod.rs b/tests/common/mod.rs index f0065e89a..c743ec120 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -206,6 +206,31 @@ pub(crate) fn setup_bitcoind_and_electrsd() -> (BitcoinD, ElectrsD) { (bitcoind, electrsd) } +pub(crate) fn random_chain_source<'a>( + bitcoind: &'a BitcoinD, electrsd: &'a ElectrsD, +) -> TestChainSource<'a> { + let r = rand::random_range(0..3); + match r { + 0 => { + println!("Randomly setting up Esplora chain syncing..."); + TestChainSource::Esplora(electrsd) + }, + 1 => { + println!("Randomly setting up Electrum chain syncing..."); + TestChainSource::Electrum(electrsd) + }, + 2 => { + println!("Randomly setting up Bitcoind RPC chain syncing..."); + TestChainSource::BitcoindRpcSync(bitcoind) + }, + 3 => { + println!("Randomly setting up Bitcoind REST chain syncing..."); + TestChainSource::BitcoindRestSync(bitcoind) + }, + _ => unreachable!(), + } +} + pub(crate) fn random_storage_path() -> PathBuf { let mut temp_path = std::env::temp_dir(); let mut rng = rng(); diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index a598b6879..2152ce5fd 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -21,7 +21,7 @@ use common::{ expect_channel_pending_event, expect_channel_ready_event, expect_event, expect_payment_claimable_event, expect_payment_received_event, expect_payment_successful_event, expect_splice_pending_event, generate_blocks_and_wait, open_channel, open_channel_push_amt, - premine_and_distribute_funds, premine_blocks, prepare_rbf, random_config, + premine_and_distribute_funds, premine_blocks, prepare_rbf, random_chain_source, random_config, random_listening_addresses, setup_bitcoind_and_electrsd, setup_builder, setup_node, setup_two_nodes, wait_for_tx, TestChainSource, TestStoreType, TestSyncStore, }; @@ -43,34 +43,7 @@ use log::LevelFilter; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn channel_full_cycle() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); - let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false) - .await; -} - -#[tokio::test(flavor = "multi_thread", worker_threads = 1)] -async fn channel_full_cycle_electrum() { - let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Electrum(&electrsd); - let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false) - .await; -} - -#[tokio::test(flavor = "multi_thread", worker_threads = 1)] -async fn channel_full_cycle_bitcoind_rpc_sync() { - let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::BitcoindRpcSync(&bitcoind); - let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false) - .await; -} - -#[tokio::test(flavor = "multi_thread", worker_threads = 1)] -async fn channel_full_cycle_bitcoind_rest_sync() { - let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::BitcoindRestSync(&bitcoind); + let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false) .await; @@ -79,7 +52,7 @@ async fn channel_full_cycle_bitcoind_rest_sync() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn channel_full_cycle_force_close() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true) .await; @@ -88,7 +61,7 @@ async fn channel_full_cycle_force_close() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn channel_full_cycle_force_close_trusted_no_reserve() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, true); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true) .await; @@ -97,7 +70,7 @@ async fn channel_full_cycle_force_close_trusted_no_reserve() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn channel_full_cycle_0conf() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, true, true, false); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, true, true, false) .await; @@ -106,7 +79,7 @@ async fn channel_full_cycle_0conf() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn channel_full_cycle_legacy_staticremotekey() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, false, false); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, false, false) .await; @@ -115,7 +88,7 @@ async fn channel_full_cycle_legacy_staticremotekey() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn channel_open_fails_when_funds_insufficient() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); let addr_a = node_a.onchain_payment().new_address().unwrap(); @@ -322,7 +295,7 @@ async fn start_stop_reinit() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn onchain_send_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); let addr_a = node_a.onchain_payment().new_address().unwrap(); @@ -523,7 +496,7 @@ async fn onchain_send_receive() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn onchain_send_all_retains_reserve() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); // Setup nodes @@ -608,7 +581,7 @@ async fn onchain_send_all_retains_reserve() { async fn onchain_wallet_recovery() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let chain_source = random_chain_source(&bitcoind, &electrsd); let original_config = random_config(true); let original_node_entropy = original_config.node_entropy; @@ -823,9 +796,9 @@ async fn run_rbf_test(is_insert_block: bool) { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn sign_verify_msg() { - let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let config = random_config(true); - let chain_source = TestChainSource::Esplora(&electrsd); + let chain_source = random_chain_source(&bitcoind, &electrsd); let node = setup_node(&chain_source, config); // Tests arbitrary message signing and later verification @@ -837,8 +810,8 @@ async fn sign_verify_msg() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn connection_multi_listen() { - let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, false, false); let node_id_b = node_b.node_id(); @@ -857,8 +830,8 @@ async fn connection_restart_behavior() { } async fn do_connection_restart_behavior(persist: bool) { - let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, false, false); let node_id_a = node_a.node_id(); @@ -904,8 +877,8 @@ async fn do_connection_restart_behavior(persist: bool) { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn concurrent_connections_succeed() { - let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); let node_a = Arc::new(node_a); @@ -929,13 +902,11 @@ async fn concurrent_connections_succeed() { } } -async fn run_splice_channel_test(bitcoind_chain_source: bool) { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn splice_channel() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = if bitcoind_chain_source { - TestChainSource::BitcoindRpcSync(&bitcoind) - } else { - TestChainSource::Esplora(&electrsd) - }; + let chain_source = random_chain_source(&bitcoind, &electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); let address_a = node_a.onchain_payment().new_address().unwrap(); @@ -1071,16 +1042,10 @@ async fn run_splice_channel_test(bitcoind_chain_source: bool) { ); } -#[tokio::test(flavor = "multi_thread", worker_threads = 1)] -async fn splice_channel() { - run_splice_channel_test(false).await; - run_splice_channel_test(true).await; -} - #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn simple_bolt12_send_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); let address_a = node_a.onchain_payment().new_address().unwrap(); @@ -1309,7 +1274,7 @@ async fn simple_bolt12_send_receive() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn async_payment() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let chain_source = random_chain_source(&bitcoind, &electrsd); let mut config_sender = random_config(true); config_sender.node_config.listening_addresses = None; @@ -1435,7 +1400,7 @@ async fn async_payment() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_node_announcement_propagation() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let chain_source = random_chain_source(&bitcoind, &electrsd); // Node A will use both listening and announcement addresses let mut config_a = random_config(true); @@ -1527,7 +1492,7 @@ async fn test_node_announcement_propagation() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn generate_bip21_uri() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); @@ -1582,7 +1547,7 @@ async fn generate_bip21_uri() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn unified_send_receive_bip21_uri() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); @@ -1921,8 +1886,8 @@ async fn do_lsps2_client_service_integration(client_trusts_lsp: bool) { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn facade_logging() { - let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = random_chain_source(&bitcoind, &electrsd); let logger = init_log_logger(LevelFilter::Trace); let mut config = random_config(false); @@ -1940,7 +1905,7 @@ async fn facade_logging() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn spontaneous_send_with_custom_preimage() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); let address_a = node_a.onchain_payment().new_address().unwrap(); @@ -2006,8 +1971,8 @@ async fn spontaneous_send_with_custom_preimage() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn drop_in_async_context() { - let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = random_chain_source(&bitcoind, &electrsd); let config = random_config(true); let node = setup_node(&chain_source, config); node.stop().unwrap(); @@ -2318,7 +2283,7 @@ async fn lsps2_lsp_trusts_client_but_client_does_not_claim() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn payment_persistence_after_restart() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let chain_source = random_chain_source(&bitcoind, &electrsd); // Setup nodes manually so we can restart node_a with the same config println!("== Node A =="); diff --git a/tests/reorg_test.rs b/tests/reorg_test.rs index 89660a407..295d9fdd2 100644 --- a/tests/reorg_test.rs +++ b/tests/reorg_test.rs @@ -9,8 +9,8 @@ use proptest::proptest; use crate::common::{ expect_event, generate_blocks_and_wait, invalidate_blocks, open_channel, - premine_and_distribute_funds, random_config, setup_bitcoind_and_electrsd, setup_node, - wait_for_outpoint_spend, TestChainSource, + premine_and_distribute_funds, random_chain_source, random_config, setup_bitcoind_and_electrsd, + setup_node, wait_for_outpoint_spend, }; proptest! { @@ -24,9 +24,9 @@ proptest! { rt.block_on(async { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source_bitcoind = TestChainSource::BitcoindRpcSync(&bitcoind); - let chain_source_electrsd = TestChainSource::Electrum(&electrsd); - let chain_source_esplora = TestChainSource::Esplora(&electrsd); + let chain_source_a = random_chain_source(&bitcoind, &electrsd); + let chain_source_b = random_chain_source(&bitcoind, &electrsd); + let chain_source_c = random_chain_source(&bitcoind, &electrsd); macro_rules! config_node { ($chain_source: expr, $anchor_channels: expr) => {{ @@ -37,9 +37,9 @@ proptest! { } let anchor_channels = true; let nodes = vec![ - config_node!(chain_source_electrsd, anchor_channels), - config_node!(chain_source_bitcoind, anchor_channels), - config_node!(chain_source_esplora, anchor_channels), + config_node!(chain_source_a, anchor_channels), + config_node!(chain_source_b, anchor_channels), + config_node!(chain_source_c, anchor_channels), ]; let (bitcoind, electrs) = (&bitcoind.client, &electrsd.client); From 48e9b54b5f65970707aed7d4af56eb1116089e77 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 23 Jan 2026 16:19:49 +0100 Subject: [PATCH 59/75] Fix: Also update the payment store for mempool transactions When we intially implemented `bitcoind` syncing polling the mempool was very frequent and rather inefficient so we made a choice not to unnecessarily update the payment store for mempool changes, especially since we only consider transactions `Succeeded` after `ANTI_REORG_DELAY` anyways. However, since then we made quite a few peformance improvements to the mempool syncing, and by now we should just update they payment store as not doing so will lead to rather unexpected behavior, making some tests fail for `TestChainSource::Bitcoind`, e.g., `channel_full_cycle_0conf`, which we fix here. As we recently switched to updating the payment store based on BDK's `WalletEvent`, but they currently don't offer an API returning such events when applying mempool transactions, we copy over the respective method for generating events from `bdk_wallet`, with the intention of dropping it again once they do. Signed-off-by: Elias Rohrer --- src/wallet/mod.rs | 133 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index f808e9a3f..aa2cba13c 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -152,10 +152,41 @@ impl Wallet { pub(crate) fn apply_mempool_txs( &self, unconfirmed_txs: Vec<(Transaction, u64)>, evicted_txids: Vec<(Txid, u64)>, ) -> Result<(), Error> { + if unconfirmed_txs.is_empty() && evicted_txids.is_empty() { + return Ok(()); + } + let mut locked_wallet = self.inner.lock().unwrap(); + + let chain_tip1 = locked_wallet.latest_checkpoint().block_id(); + let wallet_txs1 = locked_wallet + .transactions() + .map(|wtx| (wtx.tx_node.txid, (wtx.tx_node.tx.clone(), wtx.chain_position))) + .collect::, bdk_chain::ChainPosition), + >>(); + locked_wallet.apply_unconfirmed_txs(unconfirmed_txs); locked_wallet.apply_evicted_txs(evicted_txids); + let chain_tip2 = locked_wallet.latest_checkpoint().block_id(); + let wallet_txs2 = locked_wallet + .transactions() + .map(|wtx| (wtx.tx_node.txid, (wtx.tx_node.tx.clone(), wtx.chain_position))) + .collect::, bdk_chain::ChainPosition), + >>(); + + let events = + wallet_events(&mut *locked_wallet, chain_tip1, chain_tip2, wallet_txs1, wallet_txs2); + + self.update_payment_store(&mut *locked_wallet, events).map_err(|e| { + log_error!(self.logger, "Failed to update payment store: {}", e); + Error::PersistenceFailed + })?; + let mut locked_persister = self.persister.lock().unwrap(); locked_wallet.persist(&mut locked_persister).map_err(|e| { log_error!(self.logger, "Failed to persist wallet: {}", e); @@ -1215,3 +1246,105 @@ impl ChangeDestinationSource for WalletKeysManager { } } } + +// FIXME/TODO: This is copied-over from bdk_wallet and only used to generate `WalletEvent`s after +// applying mempool transactions. We should drop this when BDK offers to generate events for +// mempool transactions natively. +pub(crate) fn wallet_events( + wallet: &mut bdk_wallet::Wallet, chain_tip1: bdk_chain::BlockId, + chain_tip2: bdk_chain::BlockId, + wallet_txs1: std::collections::BTreeMap< + Txid, + (Arc, bdk_chain::ChainPosition), + >, + wallet_txs2: std::collections::BTreeMap< + Txid, + (Arc, bdk_chain::ChainPosition), + >, +) -> Vec { + let mut events: Vec = Vec::new(); + + if chain_tip1 != chain_tip2 { + events.push(WalletEvent::ChainTipChanged { old_tip: chain_tip1, new_tip: chain_tip2 }); + } + + wallet_txs2.iter().for_each(|(txid2, (tx2, cp2))| { + if let Some((tx1, cp1)) = wallet_txs1.get(txid2) { + assert_eq!(tx1.compute_txid(), *txid2); + match (cp1, cp2) { + ( + bdk_chain::ChainPosition::Unconfirmed { .. }, + bdk_chain::ChainPosition::Confirmed { anchor, .. }, + ) => { + events.push(WalletEvent::TxConfirmed { + txid: *txid2, + tx: tx2.clone(), + block_time: *anchor, + old_block_time: None, + }); + }, + ( + bdk_chain::ChainPosition::Confirmed { anchor, .. }, + bdk_chain::ChainPosition::Unconfirmed { .. }, + ) => { + events.push(WalletEvent::TxUnconfirmed { + txid: *txid2, + tx: tx2.clone(), + old_block_time: Some(*anchor), + }); + }, + ( + bdk_chain::ChainPosition::Confirmed { anchor: anchor1, .. }, + bdk_chain::ChainPosition::Confirmed { anchor: anchor2, .. }, + ) => { + if *anchor1 != *anchor2 { + events.push(WalletEvent::TxConfirmed { + txid: *txid2, + tx: tx2.clone(), + block_time: *anchor2, + old_block_time: Some(*anchor1), + }); + } + }, + ( + bdk_chain::ChainPosition::Unconfirmed { .. }, + bdk_chain::ChainPosition::Unconfirmed { .. }, + ) => { + // do nothing if still unconfirmed + }, + } + } else { + match cp2 { + bdk_chain::ChainPosition::Confirmed { anchor, .. } => { + events.push(WalletEvent::TxConfirmed { + txid: *txid2, + tx: tx2.clone(), + block_time: *anchor, + old_block_time: None, + }); + }, + bdk_chain::ChainPosition::Unconfirmed { .. } => { + events.push(WalletEvent::TxUnconfirmed { + txid: *txid2, + tx: tx2.clone(), + old_block_time: None, + }); + }, + } + } + }); + + // find tx that are no longer canonical + wallet_txs1.iter().for_each(|(txid1, (tx1, _))| { + if !wallet_txs2.contains_key(txid1) { + let conflicts = wallet.tx_graph().direct_conflicts(tx1).collect::>(); + if !conflicts.is_empty() { + events.push(WalletEvent::TxReplaced { txid: *txid1, tx: tx1.clone(), conflicts }); + } else { + events.push(WalletEvent::TxDropped { txid: *txid1, tx: tx1.clone() }); + } + } + }); + + events +} From 9a90003d2d9981c96ddd94d4ecff0b56419636c4 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 23 Jan 2026 17:04:34 +0100 Subject: [PATCH 60/75] Fix: Add (onchain) recovery mode Previously, we fixed than a fresh node syncing via `bitcoind` RPC would resync all chain data back to genesis. However, while introducing a wallet birthday is great, it disallowed discovery of historical funds if a wallet would be imported from seed. Here, we add a recovery mode flag to the builder that explictly allows to re-enable resyncing from genesis in such a scenario. Going forward, we intend to reuse that API for an upcoming Lightning recoery flow, too. --- bindings/ldk_node.udl | 1 + src/builder.rs | 57 ++++++++++++++++++++++++--------- tests/common/mod.rs | 19 ++++++++++- tests/integration_tests_rust.rs | 1 + 4 files changed, 62 insertions(+), 16 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 6bd031379..92622fda4 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -129,6 +129,7 @@ interface Builder { void set_node_alias(string node_alias); [Throws=BuildError] void set_async_payments_role(AsyncPaymentsRole? role); + void set_wallet_recovery_mode(); [Throws=BuildError] Node build(NodeEntropy node_entropy); [Throws=BuildError] diff --git a/src/builder.rs b/src/builder.rs index 5d8a5a7a9..ecfe4878c 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -244,6 +244,7 @@ pub struct NodeBuilder { async_payments_role: Option, runtime_handle: Option, pathfinding_scores_sync_config: Option, + recovery_mode: bool, } impl NodeBuilder { @@ -261,6 +262,7 @@ impl NodeBuilder { let log_writer_config = None; let runtime_handle = None; let pathfinding_scores_sync_config = None; + let recovery_mode = false; Self { config, chain_data_source_config, @@ -270,6 +272,7 @@ impl NodeBuilder { runtime_handle, async_payments_role: None, pathfinding_scores_sync_config, + recovery_mode, } } @@ -544,6 +547,16 @@ impl NodeBuilder { Ok(self) } + /// Configures the [`Node`] to resync chain data from genesis on first startup, recovering any + /// historical wallet funds. + /// + /// This should only be set on first startup when importing an older wallet from a previously + /// used [`NodeEntropy`]. + pub fn set_wallet_recovery_mode(&mut self) -> &mut Self { + self.recovery_mode = true; + self + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self, node_entropy: NodeEntropy) -> Result { @@ -679,6 +692,7 @@ impl NodeBuilder { self.liquidity_source_config.as_ref(), self.pathfinding_scores_sync_config.as_ref(), self.async_payments_role, + self.recovery_mode, seed_bytes, runtime, logger, @@ -919,6 +933,15 @@ impl ArcedNodeBuilder { self.inner.write().unwrap().set_async_payments_role(role).map(|_| ()) } + /// Configures the [`Node`] to resync chain data from genesis on first startup, recovering any + /// historical wallet funds. + /// + /// This should only be set on first startup when importing an older wallet from a previously + /// used [`NodeEntropy`]. + pub fn set_wallet_recovery_mode(&self) { + self.inner.write().unwrap().set_wallet_recovery_mode(); + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self, node_entropy: Arc) -> Result, BuildError> { @@ -1033,8 +1056,8 @@ fn build_with_store_internal( gossip_source_config: Option<&GossipSourceConfig>, liquidity_source_config: Option<&LiquiditySourceConfig>, pathfinding_scores_sync_config: Option<&PathfindingScoresSyncConfig>, - async_payments_role: Option, seed_bytes: [u8; 64], runtime: Arc, - logger: Arc, kv_store: Arc, + async_payments_role: Option, recovery_mode: bool, seed_bytes: [u8; 64], + runtime: Arc, logger: Arc, kv_store: Arc, ) -> Result { optionally_install_rustls_cryptoprovider(); @@ -1230,19 +1253,23 @@ fn build_with_store_internal( BuildError::WalletSetupFailed })?; - if let Some(best_block) = chain_tip_opt { - // Insert the first checkpoint if we have it, to avoid resyncing from genesis. - // TODO: Use a proper wallet birthday once BDK supports it. - let mut latest_checkpoint = wallet.latest_checkpoint(); - let block_id = - bdk_chain::BlockId { height: best_block.height, hash: best_block.block_hash }; - latest_checkpoint = latest_checkpoint.insert(block_id); - let update = - bdk_wallet::Update { chain: Some(latest_checkpoint), ..Default::default() }; - wallet.apply_update(update).map_err(|e| { - log_error!(logger, "Failed to apply checkpoint during wallet setup: {}", e); - BuildError::WalletSetupFailed - })?; + if !recovery_mode { + if let Some(best_block) = chain_tip_opt { + // Insert the first checkpoint if we have it, to avoid resyncing from genesis. + // TODO: Use a proper wallet birthday once BDK supports it. + let mut latest_checkpoint = wallet.latest_checkpoint(); + let block_id = bdk_chain::BlockId { + height: best_block.height, + hash: best_block.block_hash, + }; + latest_checkpoint = latest_checkpoint.insert(block_id); + let update = + bdk_wallet::Update { chain: Some(latest_checkpoint), ..Default::default() }; + wallet.apply_update(update).map_err(|e| { + log_error!(logger, "Failed to apply checkpoint during wallet setup: {}", e); + BuildError::WalletSetupFailed + })?; + } } wallet }, diff --git a/tests/common/mod.rs b/tests/common/mod.rs index c743ec120..41d22c690 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -318,6 +318,7 @@ pub(crate) struct TestConfig { pub store_type: TestStoreType, pub node_entropy: NodeEntropy, pub async_payments_role: Option, + pub recovery_mode: bool, } impl Default for TestConfig { @@ -329,7 +330,15 @@ impl Default for TestConfig { let mnemonic = generate_entropy_mnemonic(None); let node_entropy = NodeEntropy::from_bip39_mnemonic(mnemonic, None); let async_payments_role = None; - TestConfig { node_config, log_writer, store_type, node_entropy, async_payments_role } + let recovery_mode = false; + TestConfig { + node_config, + log_writer, + store_type, + node_entropy, + async_payments_role, + recovery_mode, + } } } @@ -441,6 +450,10 @@ pub(crate) fn setup_node(chain_source: &TestChainSource, config: TestConfig) -> builder.set_async_payments_role(config.async_payments_role).unwrap(); + if config.recovery_mode { + builder.set_wallet_recovery_mode(); + } + let node = match config.store_type { TestStoreType::TestSyncStore => { let kv_store = TestSyncStore::new(config.node_config.storage_dir_path.into()); @@ -449,6 +462,10 @@ pub(crate) fn setup_node(chain_source: &TestChainSource, config: TestConfig) -> TestStoreType::Sqlite => builder.build(config.node_entropy.into()).unwrap(), }; + if config.recovery_mode { + builder.set_wallet_recovery_mode(); + } + node.start().unwrap(); assert!(node.status().is_running); assert!(node.status().latest_fee_rate_cache_update_timestamp.is_some()); diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 2152ce5fd..9ea05aa1e 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -626,6 +626,7 @@ async fn onchain_wallet_recovery() { // Now we start from scratch, only the seed remains the same. let mut recovered_config = random_config(true); recovered_config.node_entropy = original_node_entropy; + recovered_config.recovery_mode = true; let recovered_node = setup_node(&chain_source, recovered_config); recovered_node.sync_wallets().unwrap(); From 788055f6748b1bb7e3356950008b34b5d6891cef Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 12 Feb 2026 10:55:06 +0100 Subject: [PATCH 61/75] Fix: Insert all LDK-registered transaction outputs into wallet Previously, we'd selectively insert the funding outputs into the onchain wallet to later allow calculating `fees_paid` when creating payment store entries (only for splicing mostly). However, this didn't always work, and we might for example end up with a missing funding output (and hence would fall back to `fees_paid: Some(0)`) if it was a counterparty-initiated channel and we synced via `bitcoind` RPC. Here, we fix this by tracking all LDK-registered `txids` in `ChainSource` and then in the `Wallet`'s `Listen` implementation insert all outputs of all registered transactions into the `Wallet`, ensuring we'd always have sufficient data for `calculate_fee` available. Thereby we also fix the `onchain_send_receive` test which previously failed when using `TestChainSource::Bitcoind`. Signed-off-by: Elias Rohrer --- src/builder.rs | 1 + src/chain/mod.rs | 20 +++++++++++++++----- src/lib.rs | 16 ---------------- src/wallet/mod.rs | 41 +++++++++++++++++++++++++---------------- 4 files changed, 41 insertions(+), 37 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index ecfe4878c..7a285876f 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1294,6 +1294,7 @@ fn build_with_store_internal( wallet_persister, Arc::clone(&tx_broadcaster), Arc::clone(&fee_estimator), + Arc::clone(&chain_source), Arc::clone(&payment_store), Arc::clone(&config), Arc::clone(&logger), diff --git a/src/chain/mod.rs b/src/chain/mod.rs index afd502363..49c011a78 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -10,7 +10,7 @@ mod electrum; mod esplora; use std::collections::HashMap; -use std::sync::{Arc, RwLock}; +use std::sync::{Arc, Mutex, RwLock}; use std::time::Duration; use bitcoin::{Script, Txid}; @@ -84,6 +84,7 @@ impl WalletSyncStatus { pub(crate) struct ChainSource { kind: ChainSourceKind, + registered_txids: Mutex>, tx_broadcaster: Arc, logger: Arc, } @@ -112,7 +113,8 @@ impl ChainSource { node_metrics, ); let kind = ChainSourceKind::Esplora(esplora_chain_source); - (Self { kind, tx_broadcaster, logger }, None) + let registered_txids = Mutex::new(Vec::new()); + (Self { kind, registered_txids, tx_broadcaster, logger }, None) } pub(crate) fn new_electrum( @@ -131,7 +133,8 @@ impl ChainSource { node_metrics, ); let kind = ChainSourceKind::Electrum(electrum_chain_source); - (Self { kind, tx_broadcaster, logger }, None) + let registered_txids = Mutex::new(Vec::new()); + (Self { kind, registered_txids, tx_broadcaster, logger }, None) } pub(crate) async fn new_bitcoind_rpc( @@ -153,7 +156,8 @@ impl ChainSource { ); let best_block = bitcoind_chain_source.poll_best_block().await.ok(); let kind = ChainSourceKind::Bitcoind(bitcoind_chain_source); - (Self { kind, tx_broadcaster, logger }, best_block) + let registered_txids = Mutex::new(Vec::new()); + (Self { kind, registered_txids, tx_broadcaster, logger }, best_block) } pub(crate) async fn new_bitcoind_rest( @@ -176,7 +180,8 @@ impl ChainSource { ); let best_block = bitcoind_chain_source.poll_best_block().await.ok(); let kind = ChainSourceKind::Bitcoind(bitcoind_chain_source); - (Self { kind, tx_broadcaster, logger }, best_block) + let registered_txids = Mutex::new(Vec::new()); + (Self { kind, registered_txids, tx_broadcaster, logger }, best_block) } pub(crate) fn start(&self, runtime: Arc) -> Result<(), Error> { @@ -209,6 +214,10 @@ impl ChainSource { } } + pub(crate) fn registered_txids(&self) -> Vec { + self.registered_txids.lock().unwrap().clone() + } + pub(crate) fn is_transaction_based(&self) -> bool { match &self.kind { ChainSourceKind::Esplora(_) => true, @@ -463,6 +472,7 @@ impl ChainSource { impl Filter for ChainSource { fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { + self.registered_txids.lock().unwrap().push(*txid); match &self.kind { ChainSourceKind::Esplora(esplora_chain_source) => { esplora_chain_source.register_tx(txid, script_pubkey) diff --git a/src/lib.rs b/src/lib.rs index 64a9ac1a2..2b60307b0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1329,10 +1329,6 @@ impl Node { Error::ChannelSplicingFailed })?; - // insert channel's funding utxo into the wallet so we can later calculate fees - // correctly when viewing this splice-in. - self.wallet.insert_txo(funding_txo.into_bitcoin_outpoint(), funding_output)?; - let change_address = self.wallet.get_new_internal_address()?; let contribution = SpliceContribution::splice_in( @@ -1426,18 +1422,6 @@ impl Node { }, }; - let funding_txo = channel_details.funding_txo.ok_or_else(|| { - log_error!(self.logger, "Failed to splice channel: channel not yet ready",); - Error::ChannelSplicingFailed - })?; - - let funding_output = channel_details.get_funding_output().ok_or_else(|| { - log_error!(self.logger, "Failed to splice channel: channel not yet ready"); - Error::ChannelSplicingFailed - })?; - - self.wallet.insert_txo(funding_txo.into_bitcoin_outpoint(), funding_output)?; - self.channel_manager .splice_channel( &channel_details.channel_id, diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index aa2cba13c..8bd4f4951 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -54,7 +54,7 @@ use crate::payment::{ PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, PendingPaymentDetails, }; use crate::types::{Broadcaster, PaymentStore, PendingPaymentStore}; -use crate::Error; +use crate::{ChainSource, Error}; pub(crate) enum OnchainSendAmount { ExactRetainingReserve { amount_sats: u64, cur_anchor_reserve_sats: u64 }, @@ -71,6 +71,7 @@ pub(crate) struct Wallet { persister: Mutex, broadcaster: Arc, fee_estimator: Arc, + chain_source: Arc, payment_store: Arc, config: Arc, logger: Arc, @@ -81,8 +82,9 @@ impl Wallet { pub(crate) fn new( wallet: bdk_wallet::PersistedWallet, wallet_persister: KVStoreWalletPersister, broadcaster: Arc, - fee_estimator: Arc, payment_store: Arc, - config: Arc, logger: Arc, pending_payment_store: Arc, + fee_estimator: Arc, chain_source: Arc, + payment_store: Arc, config: Arc, logger: Arc, + pending_payment_store: Arc, ) -> Self { let inner = Mutex::new(wallet); let persister = Mutex::new(wallet_persister); @@ -91,6 +93,7 @@ impl Wallet { persister, broadcaster, fee_estimator, + chain_source, payment_store, config, logger, @@ -196,19 +199,6 @@ impl Wallet { Ok(()) } - pub(crate) fn insert_txo(&self, outpoint: OutPoint, txout: TxOut) -> Result<(), Error> { - let mut locked_wallet = self.inner.lock().unwrap(); - locked_wallet.insert_txout(outpoint, txout); - - let mut locked_persister = self.persister.lock().unwrap(); - locked_wallet.persist(&mut locked_persister).map_err(|e| { - log_error!(self.logger, "Failed to persist wallet: {}", e); - Error::PersistenceFailed - })?; - - Ok(()) - } - fn update_payment_store<'a>( &self, locked_wallet: &'a mut PersistedWallet, mut events: Vec, @@ -1040,6 +1030,25 @@ impl Listen for Wallet { ); } + // In order to be able to reliably calculate fees the `Wallet` needs access to the previous + // ouput data. To this end, we here insert any ouputs of transactions that LDK is intersted + // in (e.g., funding transaction ouputs) into the wallet's transaction graph when we see + // them, so it is reliably able to calculate fees for subsequent spends. + // + // FIXME: technically, we should also do this for mempool transactions. However, at the + // current time fixing the edge case doesn't seem worth the additional conplexity / + // additional overhead.. + let registered_txids = self.chain_source.registered_txids(); + for tx in &block.txdata { + let txid = tx.compute_txid(); + if registered_txids.contains(&txid) { + for (vout, txout) in tx.output.iter().enumerate() { + let outpoint = OutPoint { txid, vout: vout as u32 }; + locked_wallet.insert_txout(outpoint, txout.clone()); + } + } + } + match locked_wallet.apply_block_events(block, height) { Ok(events) => { if let Err(e) = self.update_payment_store(&mut *locked_wallet, events) { From 594e1fbb36cbdfca86db6d339e3321519d078104 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 16 Feb 2026 10:07:36 +0100 Subject: [PATCH 62/75] Update payment store before wallet persistence Previously, we'd update the payment store after persisting the wallet in some cases. This was fine as long as we iterated all wallet transactions anyways (hence idempotent). However, now that we use the event-based flow we should persist the payment store(s) first, so that wallet events get replayed if there was a crash in-between some of the persistence operations. --- src/wallet/mod.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index 8bd4f4951..87b544566 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -132,14 +132,14 @@ impl Wallet { let mut locked_wallet = self.inner.lock().unwrap(); match locked_wallet.apply_update_events(update) { Ok(events) => { - let mut locked_persister = self.persister.lock().unwrap(); - locked_wallet.persist(&mut locked_persister).map_err(|e| { - log_error!(self.logger, "Failed to persist wallet: {}", e); + self.update_payment_store(&mut *locked_wallet, events).map_err(|e| { + log_error!(self.logger, "Failed to update payment store: {}", e); Error::PersistenceFailed })?; - self.update_payment_store(&mut *locked_wallet, events).map_err(|e| { - log_error!(self.logger, "Failed to update payment store: {}", e); + let mut locked_persister = self.persister.lock().unwrap(); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); Error::PersistenceFailed })?; From 2148fb6e04645f48534420ebffd6055d60b37bf5 Mon Sep 17 00:00:00 2001 From: Enigbe Date: Mon, 6 Oct 2025 08:04:45 +0100 Subject: [PATCH 63/75] Introduce and configure node with tiered KVStore Introduces TierStore, a KVStore implementation that manages data across three storage layers: - Primary: Main/remote data store - Ephemeral: Secondary store for non-critical, easily-rebuildable data (e.g., network graph) with fast local access - Backup: Tertiary store for disaster recovery with async/lazy operations to avoid blocking primary store Adds four configuration methods to NodeBuilder: - set_tier_store_backup: Configure backup data store - set_tier_store_ephemeral: Configure ephemeral data store - set_tier_store_retry_config: Configure retry parameters with exponential backoff - build_with_tier_store: Build node with primary data store These methods are exposed to the foreign interface via additions in ffi/types.rs: - ForeignDynStoreTrait: An FFI-safe version of DynStoreTrait - FfiDynStore: A concrete wrapper over foreign language stores that implement ForeignDynStoreTrait. --- Cargo.toml | 1 + bindings/ldk_node.udl | 58 ++ src/builder.rs | 216 +++++++ src/ffi/types.rs | 244 ++++++++ src/io/mod.rs | 1 + src/io/tier_store.rs | 1267 +++++++++++++++++++++++++++++++++++++++++ src/io/utils.rs | 14 + src/lib.rs | 10 +- src/types.rs | 5 +- 9 files changed, 1810 insertions(+), 6 deletions(-) create mode 100644 src/io/tier_store.rs diff --git a/Cargo.toml b/Cargo.toml index 5c82d7d65..dd96f0439 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,6 +63,7 @@ bitcoin = "0.32.7" bip39 = { version = "2.0.0", features = ["rand"] } bip21 = { version = "0.5", features = ["std"], default-features = false } +async-trait = {version = "0.1.89"} base64 = { version = "0.22.1", default-features = false, features = ["std"] } rand = { version = "0.9.2", default-features = false, features = ["std", "thread_rng", "os_rng"] } chrono = { version = "0.4", default-features = false, features = ["clock"] } diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 92622fda4..b479924b6 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -79,6 +79,12 @@ enum WordCount { "Words24", }; +dictionary RetryConfig { + u16 initial_retry_delay_ms; + u16 maximum_delay_ms; + f32 backoff_multiplier; +}; + enum LogLevel { "Gossip", "Trace", @@ -103,6 +109,53 @@ interface LogWriter { void log(LogRecord record); }; +interface FfiDynStore { + [Name=from_store] + constructor(ForeignDynStoreTrait store); +}; + +[Trait, WithForeign] +interface ForeignDynStoreTrait { + [Throws=IOError] + sequence read(string primary_namespace, string secondary_namespace, string key); + [Throws=IOError] + void write(string primary_namespace, string secondary_namespace, string key, sequence buf); + [Throws=IOError] + void remove(string primary_namespace, string secondary_namespace, string key, boolean lazy); + [Throws=IOError] + sequence list(string primary_namespace, string secondary_namespace); + [Throws=IOError, Async] + sequence read_async(string primary_namespace, string secondary_namespace, string key); + [Throws=IOError, Async] + void write_async(string primary_namespace, string secondary_namespace, string key, sequence buf); + [Throws=IOError, Async] + void remove_async(string primary_namespace, string secondary_namespace, string key, boolean lazy); + [Throws=IOError, Async] + sequence list_async(string primary_namespace, string secondary_namespace); +}; + +[Error] +enum IOError { + "NotFound", + "PermissionDenied", + "ConnectionRefused", + "ConnectionReset", + "ConnectionAborted", + "NotConnected", + "AddrInUse", + "AddrNotAvailable", + "BrokenPipe", + "AlreadyExists", + "WouldBlock", + "InvalidInput", + "InvalidData", + "TimedOut", + "WriteZero", + "Interrupted", + "UnexpectedEof", + "Other", +}; + interface Builder { constructor(); [Name=from_config] @@ -127,6 +180,9 @@ interface Builder { void set_announcement_addresses(sequence announcement_addresses); [Throws=BuildError] void set_node_alias(string node_alias); + void set_tier_store_retry_config(RetryConfig retry_config); + void set_tier_store_backup(FfiDynStore backup_store); + void set_tier_store_ephemeral(FfiDynStore ephemeral_store); [Throws=BuildError] void set_async_payments_role(AsyncPaymentsRole? role); void set_wallet_recovery_mode(); @@ -140,6 +196,8 @@ interface Builder { Node build_with_vss_store_and_fixed_headers(NodeEntropy node_entropy, string vss_url, string store_id, record fixed_headers); [Throws=BuildError] Node build_with_vss_store_and_header_provider(NodeEntropy node_entropy, string vss_url, string store_id, VssHeaderProvider header_provider); + [Throws=BuildError] + Node build_with_tier_store(NodeEntropy node_entropy, FfiDynStore primary_store); }; interface Node { diff --git a/src/builder.rs b/src/builder.rs index 7a285876f..d4f4252b3 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -52,8 +52,11 @@ use crate::connection::ConnectionManager; use crate::entropy::NodeEntropy; use crate::event::EventQueue; use crate::fee_estimator::OnchainFeeEstimator; +#[cfg(feature = "uniffi")] +use crate::ffi::FfiDynStore; use crate::gossip::GossipSource; use crate::io::sqlite_store::SqliteStore; +use crate::io::tier_store::{RetryConfig, TierStore}; use crate::io::utils::{ read_event_queue, read_external_pathfinding_scores_from_cache, read_network_graph, read_node_metrics, read_output_sweeper, read_payments, read_peer_info, read_pending_payments, @@ -150,6 +153,23 @@ impl std::fmt::Debug for LogWriterConfig { } } +#[derive(Default)] +struct TierStoreConfig { + ephemeral: Option>, + backup: Option>, + retry: Option, +} + +impl std::fmt::Debug for TierStoreConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TierStoreConfig") + .field("ephemeral", &self.ephemeral.as_ref().map(|_| "Arc")) + .field("backup", &self.backup.as_ref().map(|_| "Arc")) + .field("retry", &self.retry) + .finish() + } +} + /// An error encountered during building a [`Node`]. /// /// [`Node`]: crate::Node @@ -242,6 +262,7 @@ pub struct NodeBuilder { liquidity_source_config: Option, log_writer_config: Option, async_payments_role: Option, + tier_store_config: Option, runtime_handle: Option, pathfinding_scores_sync_config: Option, recovery_mode: bool, @@ -260,6 +281,7 @@ impl NodeBuilder { let gossip_source_config = None; let liquidity_source_config = None; let log_writer_config = None; + let tier_store_config = None; let runtime_handle = None; let pathfinding_scores_sync_config = None; let recovery_mode = false; @@ -269,6 +291,7 @@ impl NodeBuilder { gossip_source_config, liquidity_source_config, log_writer_config, + tier_store_config, runtime_handle, async_payments_role: None, pathfinding_scores_sync_config, @@ -557,6 +580,51 @@ impl NodeBuilder { self } + /// Configures retry behavior for transient errors when accessing the primary store. + /// + /// When building with [`build_with_tier_store`], controls the exponential backoff parameters + /// used when retrying failed operations on the primary store due to transient errors + /// (network issues, timeouts, etc.). + /// + /// If not set, default retry parameters are used. See [`RetryConfig`] for details. + /// + /// [`build_with_tier_store`]: Self::build_with_tier_store + pub fn set_tier_store_retry_config(&mut self, config: RetryConfig) -> &mut Self { + let tier_store_config = self.tier_store_config.get_or_insert(TierStoreConfig::default()); + tier_store_config.retry = Some(config); + self + } + + /// Configures the backup store for local disaster recovery. + /// + /// When building with [`build_with_tier_store`], this store receives asynchronous copies + /// of all critical data written to the primary store. If the primary store becomes + /// unavailable, reads will fall back to this backup store. + /// + /// Backup writes are non-blocking and do not affect primary store operation performance. + /// + /// [`build_with_tier_store`]: Self::build_with_tier_store + pub fn set_tier_store_backup(&mut self, backup_store: Arc) -> &mut Self { + let tier_store_config = self.tier_store_config.get_or_insert(TierStoreConfig::default()); + tier_store_config.backup = Some(backup_store); + self + } + + /// Configures the ephemeral store for non-critical, frequently-accessed data. + /// + /// When building with [`build_with_tier_store`], this store is used for data like + /// the network graph and scorer data to reduce latency for reads. Data stored here + /// can be rebuilt if lost. + /// + /// If not set, non-critical data will be stored in the primary store. + /// + /// [`build_with_tier_store`]: Self::build_with_tier_store + pub fn set_tier_store_ephemeral(&mut self, ephemeral_store: Arc) -> &mut Self { + let tier_store_config = self.tier_store_config.get_or_insert(TierStoreConfig::default()); + tier_store_config.ephemeral = Some(ephemeral_store); + self + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self, node_entropy: NodeEntropy) -> Result { @@ -569,6 +637,7 @@ impl NodeBuilder { Some(io::sqlite_store::KV_TABLE_NAME.to_string()), ) .map_err(|_| BuildError::KVStoreSetupFailed)?; + self.build_with_store(node_entropy, kv_store) } @@ -581,6 +650,7 @@ impl NodeBuilder { fs::create_dir_all(storage_dir_path.clone()) .map_err(|_| BuildError::StoragePathAccessFailed)?; let kv_store = FilesystemStore::new(storage_dir_path); + self.build_with_store(node_entropy, kv_store) } @@ -667,6 +737,91 @@ impl NodeBuilder { self.build_with_store(node_entropy, vss_store) } + /// Builds a [`Node`] instance with tiered storage for managing data across multiple storage layers. + /// + /// This build method enables a three-tier storage architecture optimized for different data types + /// and access patterns: + /// + /// ### Storage Tiers + /// + /// - **Primary Store** (required): The authoritative store for critical channel state and payment data. + /// Typically a remote/cloud storage service for durability and accessibility across devices. + /// + /// - **Ephemeral Store** (optional): Local storage for non-critical, frequently-accessed data like + /// the network graph and scorer. Improves performance by reducing latency for data that can be + /// rebuilt if lost. Configure with [`set_tier_store_ephemeral`]. + /// + /// - **Backup Store** (optional): Local backup of critical data for disaster recovery scenarios. + /// Provides a safety net if the primary store becomes temporarily unavailable. Writes are + /// asynchronous to avoid blocking primary operations. Configure with [`set_tier_store_backup`]. + /// + /// ## Configuration + /// + /// Use the setter methods to configure optional stores and retry behavior: + /// - [`set_tier_store_ephemeral`] - Set local store for network graph and scorer + /// - [`set_tier_store_backup`] - Set local backup store for disaster recovery + /// - [`set_tier_store_retry_config`] - Configure retry delays and backoff for transient errors + /// + /// ## Example + /// + /// ```ignore + /// # use ldk_node::{Builder, Config}; + /// # use ldk_node::io::tier_store::RetryConfig; + /// # use std::sync::Arc; + /// let config = Config::default(); + /// let mut builder = NodeBuilder::from_config(config); + /// + /// let primary = Arc::new(VssStore::new(...)); + /// let ephemeral = Arc::new(FilesystemStore::new(...)); + /// let backup = Arc::new(SqliteStore::new(...)); + /// let retry_config = RetryConfig::default(); + /// + /// builder + /// .set_tier_store_ephemeral(ephemeral) + /// .set_tier_store_backup(backup) + /// .set_tier_store_retry_config(retry_config); + /// + /// let node = builder.build_with_tier_store(primary)?; + /// # Ok::<(), ldk_node::BuildError>(()) + /// ``` + /// + /// [`set_tier_store_ephemeral`]: Self::set_tier_store_ephemeral + /// [`set_tier_store_backup`]: Self::set_tier_store_backup + /// [`set_tier_store_retry_config`]: Self::set_tier_store_retry_config + #[cfg(not(feature = "uniffi"))] + pub fn build_with_tier_store( + &self, node_entropy: NodeEntropy, primary_store: Arc, + ) -> Result { + self.build_with_tier_store_internal(node_entropy, primary_store) + } + + fn build_with_tier_store_internal( + &self, node_entropy: NodeEntropy, primary_store: Arc, + ) -> Result { + let logger = setup_logger(&self.log_writer_config, &self.config)?; + let runtime = if let Some(handle) = self.runtime_handle.as_ref() { + Arc::new(Runtime::with_handle(handle.clone(), Arc::clone(&logger))) + } else { + Arc::new(Runtime::new(Arc::clone(&logger)).map_err(|e| { + log_error!(logger, "Failed to setup tokio runtime: {}", e); + BuildError::RuntimeSetupFailed + })?) + }; + + let ts_config = self.tier_store_config.as_ref(); + let retry_config = ts_config.and_then(|c| c.retry).unwrap_or_default(); + + let mut tier_store = + TierStore::new(primary_store, Arc::clone(&runtime), Arc::clone(&logger), retry_config); + + if let Some(config) = ts_config { + config.ephemeral.as_ref().map(|s| tier_store.set_ephemeral_store(Arc::clone(s))); + config.backup.as_ref().map(|s| tier_store.set_backup_store(Arc::clone(s))); + } + + self.build_with_store(node_entropy, tier_store) + } + /// Builds a [`Node`] instance according to the options previously configured. pub fn build_with_store( &self, node_entropy: NodeEntropy, kv_store: S, @@ -942,6 +1097,49 @@ impl ArcedNodeBuilder { self.inner.write().unwrap().set_wallet_recovery_mode(); } + /// Configures retry behavior for transient errors when accessing the primary store. + /// + /// When building with [`build_with_tier_store`], controls the exponential backoff parameters + /// used when retrying failed operations on the primary store due to transient errors + /// (network issues, timeouts, etc.). + /// + /// If not set, default retry parameters are used. See [`RetryConfig`] for details. + /// + /// [`build_with_tier_store`]: Self::build_with_tier_store + pub fn set_tier_store_retry_config(&self, config: RetryConfig) { + self.inner.write().unwrap().set_tier_store_retry_config(config); + } + + /// Configures the backup store for local disaster recovery. + /// + /// When building with [`build_with_tier_store`], this store receives asynchronous copies + /// of all critical data written to the primary store. If the primary store becomes + /// unavailable, reads will fall back to this backup store. + /// + /// Backup writes are non-blocking and do not affect primary store operation performance. + /// + /// [`build_with_tier_store`]: Self::build_with_tier_store + pub fn set_tier_store_backup(&self, backup_store: Arc) { + let wrapper = DynStoreWrapper((*backup_store).clone()); + let store: Arc = Arc::new(wrapper); + self.inner.write().unwrap().set_tier_store_backup(store); + } + + /// Configures the ephemeral store for non-critical, frequently-accessed data. + /// + /// When building with [`build_with_tier_store`], this store is used for data like + /// the network graph and scorer data to reduce latency for reads. Data stored here + /// can be rebuilt if lost. + /// + /// If not set, non-critical data will be stored in the primary store. + /// + /// [`build_with_tier_store`]: Self::build_with_tier_store + pub fn set_tier_store_ephemeral(&self, ephemeral_store: Arc) { + let wrapper = DynStoreWrapper((*ephemeral_store).clone()); + let store: Arc = Arc::new(wrapper); + self.inner.write().unwrap().set_tier_store_ephemeral(store); + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self, node_entropy: Arc) -> Result, BuildError> { @@ -1040,6 +1238,24 @@ impl ArcedNodeBuilder { .map(Arc::new) } + // pub fn build_with_tier_store( + // &self, node_entropy: Arc, primary_store: Arc, + // ) -> Result, BuildError> { + // self.inner.read().unwrap().build_with_tier_store(*node_entropy, primary_store).map(Arc::new) + // } + + pub fn build_with_tier_store( + &self, node_entropy: Arc, primary_store: Arc, + ) -> Result, BuildError> { + let wrapper = DynStoreWrapper((*primary_store).clone()); + let store: Arc = Arc::new(wrapper); + self.inner + .read() + .unwrap() + .build_with_tier_store_internal(*node_entropy, store) + .map(Arc::new) + } + /// Builds a [`Node`] instance according to the options previously configured. // Note that the generics here don't actually work for Uniffi, but we don't currently expose // this so its not needed. diff --git a/src/ffi/types.rs b/src/ffi/types.rs index 2a349a967..f4afc19d4 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -11,11 +11,13 @@ // Make sure to add any re-exported items that need to be used in uniffi below. use std::convert::TryInto; +use std::future::Future; use std::ops::Deref; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; +use async_trait::async_trait; pub use bip39::Mnemonic; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; @@ -32,6 +34,7 @@ use lightning::offers::refund::Refund as LdkRefund; use lightning::onion_message::dns_resolution::HumanReadableName as LdkHumanReadableName; pub use lightning::routing::gossip::{NodeAlias, NodeId, RoutingFees}; pub use lightning::routing::router::RouteParametersConfig; +use lightning::util::persist::{KVStore, KVStoreSync}; use lightning::util::ser::Writeable; use lightning_invoice::{Bolt11Invoice as LdkBolt11Invoice, Bolt11InvoiceDescriptionRef}; pub use lightning_invoice::{Description, SignedRawBolt11Invoice}; @@ -59,6 +62,247 @@ pub use crate::payment::store::{ pub use crate::payment::UnifiedPaymentResult; use crate::{hex_utils, SocketAddress, UniffiCustomTypeConverter, UserChannelId}; +#[derive(Debug)] +pub enum IOError { + NotFound, + PermissionDenied, + ConnectionRefused, + ConnectionReset, + ConnectionAborted, + NotConnected, + AddrInUse, + AddrNotAvailable, + BrokenPipe, + AlreadyExists, + WouldBlock, + InvalidInput, + InvalidData, + TimedOut, + WriteZero, + Interrupted, + UnexpectedEof, + Other, +} + +impl From for IOError { + fn from(error: bitcoin::io::Error) -> Self { + match error.kind() { + bitcoin::io::ErrorKind::NotFound => IOError::NotFound, + bitcoin::io::ErrorKind::PermissionDenied => IOError::PermissionDenied, + bitcoin::io::ErrorKind::ConnectionRefused => IOError::ConnectionRefused, + bitcoin::io::ErrorKind::ConnectionReset => IOError::ConnectionReset, + bitcoin::io::ErrorKind::ConnectionAborted => IOError::ConnectionAborted, + bitcoin::io::ErrorKind::NotConnected => IOError::NotConnected, + bitcoin::io::ErrorKind::AddrInUse => IOError::AddrInUse, + bitcoin::io::ErrorKind::AddrNotAvailable => IOError::AddrNotAvailable, + bitcoin::io::ErrorKind::BrokenPipe => IOError::BrokenPipe, + bitcoin::io::ErrorKind::AlreadyExists => IOError::AlreadyExists, + bitcoin::io::ErrorKind::WouldBlock => IOError::WouldBlock, + bitcoin::io::ErrorKind::InvalidInput => IOError::InvalidInput, + bitcoin::io::ErrorKind::InvalidData => IOError::InvalidData, + bitcoin::io::ErrorKind::TimedOut => IOError::TimedOut, + bitcoin::io::ErrorKind::WriteZero => IOError::WriteZero, + bitcoin::io::ErrorKind::Interrupted => IOError::Interrupted, + bitcoin::io::ErrorKind::UnexpectedEof => IOError::UnexpectedEof, + bitcoin::io::ErrorKind::Other => IOError::Other, + } + } +} + +impl From for bitcoin::io::Error { + fn from(error: IOError) -> Self { + match error { + IOError::NotFound => bitcoin::io::ErrorKind::NotFound.into(), + IOError::PermissionDenied => bitcoin::io::ErrorKind::PermissionDenied.into(), + IOError::ConnectionRefused => bitcoin::io::ErrorKind::ConnectionRefused.into(), + IOError::ConnectionReset => bitcoin::io::ErrorKind::ConnectionReset.into(), + IOError::ConnectionAborted => bitcoin::io::ErrorKind::ConnectionAborted.into(), + IOError::NotConnected => bitcoin::io::ErrorKind::NotConnected.into(), + IOError::AddrInUse => bitcoin::io::ErrorKind::AddrInUse.into(), + IOError::AddrNotAvailable => bitcoin::io::ErrorKind::AddrNotAvailable.into(), + IOError::BrokenPipe => bitcoin::io::ErrorKind::BrokenPipe.into(), + IOError::AlreadyExists => bitcoin::io::ErrorKind::AlreadyExists.into(), + IOError::WouldBlock => bitcoin::io::ErrorKind::WouldBlock.into(), + IOError::InvalidInput => bitcoin::io::ErrorKind::InvalidInput.into(), + IOError::InvalidData => bitcoin::io::ErrorKind::InvalidData.into(), + IOError::TimedOut => bitcoin::io::ErrorKind::TimedOut.into(), + IOError::WriteZero => bitcoin::io::ErrorKind::WriteZero.into(), + IOError::Interrupted => bitcoin::io::ErrorKind::Interrupted.into(), + IOError::UnexpectedEof => bitcoin::io::ErrorKind::UnexpectedEof.into(), + IOError::Other => bitcoin::io::ErrorKind::Other.into(), + } + } +} + +impl std::fmt::Display for IOError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + IOError::NotFound => write!(f, "NotFound"), + IOError::PermissionDenied => write!(f, "PermissionDenied"), + IOError::ConnectionRefused => write!(f, "ConnectionRefused"), + IOError::ConnectionReset => write!(f, "ConnectionReset"), + IOError::ConnectionAborted => write!(f, "ConnectionAborted"), + IOError::NotConnected => write!(f, "NotConnected"), + IOError::AddrInUse => write!(f, "AddrInUse"), + IOError::AddrNotAvailable => write!(f, "AddrNotAvailable"), + IOError::BrokenPipe => write!(f, "BrokenPipe"), + IOError::AlreadyExists => write!(f, "AlreadyExists"), + IOError::WouldBlock => write!(f, "WouldBlock"), + IOError::InvalidInput => write!(f, "InvalidInput"), + IOError::InvalidData => write!(f, "InvalidData"), + IOError::TimedOut => write!(f, "TimedOut"), + IOError::WriteZero => write!(f, "WriteZero"), + IOError::Interrupted => write!(f, "Interrupted"), + IOError::UnexpectedEof => write!(f, "UnexpectedEof"), + IOError::Other => write!(f, "Other"), + } + } +} + +#[async_trait] +pub trait ForeignDynStoreTrait: Send + Sync { + async fn read_async( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> Result, IOError>; + async fn write_async( + &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> Result<(), IOError>; + async fn remove_async( + &self, primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + ) -> Result<(), IOError>; + async fn list_async( + &self, primary_namespace: String, secondary_namespace: String, + ) -> Result, IOError>; + + fn read( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> Result, IOError>; + fn write( + &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> Result<(), IOError>; + fn remove( + &self, primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + ) -> Result<(), IOError>; + fn list( + &self, primary_namespace: String, secondary_namespace: String, + ) -> Result, IOError>; +} + +#[derive(Clone)] +pub struct FfiDynStore { + pub(crate) inner: Arc, +} + +impl FfiDynStore { + pub fn from_store(store: Arc) -> Self { + Self { inner: store } + } +} + +impl KVStore for FfiDynStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> impl Future, lightning::io::Error>> + 'static + Send { + let this = Arc::clone(&self.inner); + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + async move { + this.read_async(primary_namespace, secondary_namespace, key).await.map_err(|e| e.into()) + } + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> impl Future> + 'static + Send { + let this = Arc::clone(&self.inner); + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + async move { + this.write_async(primary_namespace, secondary_namespace, key, buf) + .await + .map_err(|e| e.into()) + } + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> impl Future> + 'static + Send { + let this = Arc::clone(&self.inner); + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + async move { + this.remove_async(primary_namespace, secondary_namespace, key, lazy) + .await + .map_err(|e| e.into()) + } + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> impl Future, lightning::io::Error>> + 'static + Send { + let this = Arc::clone(&self.inner); + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + async move { + this.list_async(primary_namespace, secondary_namespace).await.map_err(|e| e.into()) + } + } +} + +impl KVStoreSync for FfiDynStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Result, lightning::io::Error> { + ForeignDynStoreTrait::read( + self.inner.as_ref(), + primary_namespace.to_string(), + secondary_namespace.to_string(), + key.to_string(), + ) + .map_err(|e| e.into()) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Result<(), lightning::io::Error> { + ForeignDynStoreTrait::write( + self.inner.as_ref(), + primary_namespace.to_string(), + secondary_namespace.to_string(), + key.to_string(), + buf, + ) + .map_err(|e| e.into()) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> Result<(), lightning::io::Error> { + ForeignDynStoreTrait::remove( + self.inner.as_ref(), + primary_namespace.to_string(), + secondary_namespace.to_string(), + key.to_string(), + lazy, + ) + .map_err(|e| e.into()) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Result, lightning::io::Error> { + ForeignDynStoreTrait::list( + self.inner.as_ref(), + primary_namespace.to_string(), + secondary_namespace.to_string(), + ) + .map_err(|e| e.into()) + } +} + impl UniffiCustomTypeConverter for PublicKey { type Builtin = String; diff --git a/src/io/mod.rs b/src/io/mod.rs index e080d39f7..bf6366c45 100644 --- a/src/io/mod.rs +++ b/src/io/mod.rs @@ -10,6 +10,7 @@ pub mod sqlite_store; #[cfg(test)] pub(crate) mod test_utils; +pub(crate) mod tier_store; pub(crate) mod utils; pub mod vss_store; diff --git a/src/io/tier_store.rs b/src/io/tier_store.rs new file mode 100644 index 000000000..1f201e9f4 --- /dev/null +++ b/src/io/tier_store.rs @@ -0,0 +1,1267 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use crate::io::utils::{check_namespace_key_validity, is_possibly_transient}; +use crate::logger::{LdkLogger, Logger}; +use crate::runtime::Runtime; +use crate::types::DynStore; + +use lightning::util::persist::{ + KVStore, KVStoreSync, NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + SCORER_PERSISTENCE_KEY, SCORER_PERSISTENCE_PRIMARY_NAMESPACE, +}; +use lightning::{io, log_trace}; +use lightning::{log_debug, log_error, log_info, log_warn}; + +use tokio::sync::mpsc::{self, error::TrySendError}; + +use std::collections::HashMap; +use std::future::Future; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +// todo(enigbe): Uncertain about appropriate queue size and if this would need +// configuring. +const BACKUP_QUEUE_CAPACITY: usize = 100; + +const DEFAULT_INITIAL_RETRY_DELAY_MS: u16 = 10; +const DEFAULT_MAXIMUM_RETRY_DELAY_MS: u16 = 500; +const DEFAULT_BACKOFF_MULTIPLIER: f32 = 1.5; + +/// Configuration for exponential backoff retry behavior. +#[derive(Debug, Copy, Clone)] +pub struct RetryConfig { + /// The initial delay before the first retry attempt, in milliseconds. + pub initial_retry_delay_ms: u16, + /// The maximum delay between retry attempts, in milliseconds. + pub maximum_delay_ms: u16, + /// The multiplier applied to the delay after each retry attempt. + /// + /// For example, a value of `2.0` doubles the delay after each failed retry. + pub backoff_multiplier: f32, +} + +impl Default for RetryConfig { + fn default() -> Self { + Self { + initial_retry_delay_ms: DEFAULT_INITIAL_RETRY_DELAY_MS, + maximum_delay_ms: DEFAULT_MAXIMUM_RETRY_DELAY_MS, + backoff_multiplier: DEFAULT_BACKOFF_MULTIPLIER, + } + } +} + +/// A 3-tiered [`KVStoreSync`] implementation that manages data across +/// three distinct storage locations, i.e. primary (preferably remote) +/// store for all critical data, optional ephemeral (local) store for +/// non-critical and easily rebuildable data, and backup (preferably +/// local) to lazily backup the primary store for disaster recovery +/// scenarios. +pub(crate) struct TierStore { + inner: Arc, + next_version: AtomicU64, + runtime: Arc, + logger: Arc, +} + +impl TierStore { + pub fn new( + primary_store: Arc, runtime: Arc, logger: Arc, + retry_config: RetryConfig, + ) -> Self { + let inner = Arc::new(TierStoreInner::new(primary_store, Arc::clone(&logger), retry_config)); + + Self { inner, next_version: AtomicU64::new(1), runtime, logger } + } + + /// Configures the local backup store for disaster recovery. + /// + /// This store serves as a local copy of the critical data for disaster + /// recovery scenarios. When configured, this method also spawns a background + /// task that asynchronously processes backup writes and removals to avoid + /// blocking primary store operations. + /// + /// The backup operates on a best-effort basis: + /// - Writes are queued asynchronously (non-blocking) + /// - No retry logic (We assume local store is unlikely to have transient failures). + /// - Failures are logged but don't propagate to all the way to caller. + pub fn set_backup_store(&mut self, backup: Arc) { + let (tx, rx) = mpsc::channel::(BACKUP_QUEUE_CAPACITY); + + let backup_clone = Arc::clone(&backup); + let logger = Arc::clone(&self.logger); + + self.runtime.spawn_background_task(Self::process_backup_operation( + rx, + backup_clone, + logger, + )); + + debug_assert_eq!(Arc::strong_count(&self.inner), 1); + + let inner = Arc::get_mut(&mut self.inner).expect( + "TierStore should not be shared during configuration. No other references should exist", + ); + + inner.backup_store = Some(backup); + inner.backup_sender = Some(tx); + } + + async fn process_backup_operation( + mut receiver: mpsc::Receiver, backup_store: Arc, logger: Arc, + ) { + while let Some(op) = receiver.recv().await { + match Self::apply_backup_operation(&op, &backup_store) { + Ok(_) => { + log_trace!( + logger, + "Backup succeeded for key {}/{}/{}", + op.primary_namespace(), + op.secondary_namespace(), + op.key() + ); + }, + Err(e) => { + log_error!( + logger, + "Backup failed permanently for key {}/{}/{}: {}", + op.primary_namespace(), + op.secondary_namespace(), + op.key(), + e + ); + }, + } + } + } + + fn apply_backup_operation(op: &BackupOp, store: &Arc) -> io::Result<()> { + match op { + BackupOp::Write { primary_namespace, secondary_namespace, key, data } => { + KVStoreSync::write( + store.as_ref(), + primary_namespace, + secondary_namespace, + key, + data.clone(), + ) + }, + BackupOp::Remove { primary_namespace, secondary_namespace, key, lazy } => { + KVStoreSync::remove( + store.as_ref(), + primary_namespace, + secondary_namespace, + key, + *lazy, + ) + }, + } + } + + /// Configures the local store for non-critical data storage. + pub fn set_ephemeral_store(&mut self, ephemeral: Arc) { + debug_assert_eq!(Arc::strong_count(&self.inner), 1); + + let inner = Arc::get_mut(&mut self.inner).expect( + "TierStore should not be shared during configuration. No other references should exist", + ); + + inner.ephemeral_store = Some(ephemeral); + } + + fn build_locking_key( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> String { + if primary_namespace.is_empty() { + key.to_owned() + } else { + format!("{}#{}#{}", primary_namespace, secondary_namespace, key) + } + } + + fn get_new_version_and_lock_ref( + &self, locking_key: String, + ) -> (Arc>, u64) { + let version = self.next_version.fetch_add(1, Ordering::Relaxed); + if version == u64::MAX { + panic!("TierStore version counter overflowed"); + } + + // Get a reference to the inner lock. We do this early so that the arc can double as an in-flight counter for + // cleaning up unused locks. + let inner_lock_ref = self.inner.get_inner_lock_ref(locking_key); + + (inner_lock_ref, version) + } +} + +impl KVStore for TierStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> impl Future, io::Error>> + 'static + Send { + let inner = Arc::clone(&self.inner); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + async move { inner.read_internal(primary_namespace, secondary_namespace, key).await } + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> impl Future> + 'static + Send { + let inner = Arc::clone(&self.inner); + + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + async move { + inner + .write_internal( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + buf, + ) + .await + } + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> impl Future> + 'static + Send { + let inner = Arc::clone(&self.inner); + + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + async move { + inner + .remove_internal( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + lazy, + ) + .await + } + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> impl Future, io::Error>> + 'static + Send { + let inner = Arc::clone(&self.inner); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + + async move { inner.list_internal(primary_namespace, secondary_namespace).await } + } +} + +impl KVStoreSync for TierStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + self.runtime.block_on(self.inner.read_internal( + primary_namespace.to_string(), + secondary_namespace.to_string(), + key.to_string(), + )) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + self.runtime.block_on(self.inner.write_internal( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + buf, + )) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + self.runtime.block_on(self.inner.remove_internal( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + lazy, + )) + } + + fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { + self.runtime.block_on( + self.inner + .list_internal(primary_namespace.to_string(), secondary_namespace.to_string()), + ) + } +} + +pub struct TierStoreInner { + /// For remote data. + primary_store: Arc, + /// For local non-critical/ephemeral data. + ephemeral_store: Option>, + /// For redundancy (disaster recovery). + backup_store: Option>, + backup_sender: Option>, + logger: Arc, + retry_config: RetryConfig, + /// Per-key locks for the available data tiers, i.e. (primary, backup, ephemeral), + /// that ensures we don't have concurrent writes to the same namespace/key. + locks: Mutex>>>, +} + +impl TierStoreInner { + /// Creates a tier store with the primary (remote) data store. + pub fn new( + primary_store: Arc, logger: Arc, retry_config: RetryConfig, + ) -> Self { + Self { + primary_store, + ephemeral_store: None, + backup_store: None, + backup_sender: None, + logger, + retry_config, + locks: Mutex::new(HashMap::new()), + } + } + + /// Queues data for asynchronous backup/write to the configured backup store. + /// + /// We perform a non-blocking send to avoid impacting primary storage operations. + /// This is a no-op if backup store is not configured. + /// + /// ## Returns + /// - `Ok(())`: Backup was successfully queued or no backup is configured + /// - `Err(WouldBlock)`: Backup queue is full - data was not queued + /// - `Err(BrokenPipe)`: Backup queue is no longer available + fn enqueue_backup_write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + if let Some(backup_sender) = &self.backup_sender { + let backup_res = backup_sender.try_send(BackupOp::Write { + primary_namespace: primary_namespace.to_string(), + secondary_namespace: secondary_namespace.to_string(), + key: key.to_string(), + data: buf, + }); + if let Err(e) = backup_res { + match e { + // Assuming the channel is only full for a short time, should we explore + // retrying here to add some resiliency? + TrySendError::Full(op) => { + log_warn!( + self.logger, + "Backup queue is full. Cannot write data for key: {}/{}/{}", + op.primary_namespace(), + op.secondary_namespace(), + op.key() + ); + let e = io::Error::new( + io::ErrorKind::WouldBlock, + "Backup queue is currently full.", + ); + return Err(e); + }, + TrySendError::Closed(op) => { + log_error!( + self.logger, + "Backup queue is closed. Cannot write data for key: {}/{}/{}", + op.primary_namespace(), + op.secondary_namespace(), + op.key() + ); + let e = + io::Error::new(io::ErrorKind::BrokenPipe, "Backup queue is closed."); + return Err(e); + }, + } + } + } + Ok(()) + } + + /// Queues the removal of data from the configured backup store. + /// + /// We perform a non-blocking send to avoid impacting primary storage operations. + /// This is a no-op if backup store is not configured. + /// + /// # Returns + /// - `Ok(())`: Backup was successfully queued or no backup is configured + /// - `Err(WouldBlock)`: Backup queue is full - data was not queued + /// - `Err(BrokenPipe)`: Backup system is no longer available + fn enqueue_backup_remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + if let Some(backup_sender) = &self.backup_sender { + let removal_res = backup_sender.try_send(BackupOp::Remove { + primary_namespace: primary_namespace.to_string(), + secondary_namespace: secondary_namespace.to_string(), + key: key.to_string(), + lazy, + }); + if let Err(e) = removal_res { + match e { + TrySendError::Full(op) => { + log_warn!( + self.logger, + "Backup queue is full. Cannot remove data for key: {}/{}/{}", + op.primary_namespace(), + op.secondary_namespace(), + op.key() + ); + let e = io::Error::new( + io::ErrorKind::WouldBlock, + "Backup queue is currently full.", + ); + return Err(e); + }, + TrySendError::Closed(op) => { + log_error!( + self.logger, + "Backup queue is closed. Cannot remove data for key: {}/{}/{}", + op.primary_namespace(), + op.secondary_namespace(), + op.key() + ); + let e = + io::Error::new(io::ErrorKind::BrokenPipe, "Backup queue is closed."); + return Err(e); + }, + } + } + } + Ok(()) + } + + /// Reads data from the backup store (if configured). + fn read_from_backup( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + if let Some(backup) = self.backup_store.as_ref() { + KVStoreSync::read(backup.as_ref(), primary_namespace, secondary_namespace, key) + } else { + Err(io::Error::new(io::ErrorKind::NotFound, "Backup store not previously configured.")) + } + } + + /// Lists keys from the given primary and secondary namespace pair from the backup + /// store (if configured). + fn list_from_backup( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> io::Result> { + if let Some(backup) = &self.backup_store { + KVStoreSync::list(backup.as_ref(), primary_namespace, secondary_namespace) + } else { + Err(io::Error::new(io::ErrorKind::NotFound, "Backup store not previously configured.")) + } + } + + /// Reads from the primary data store with basic retry logic, or falls back to backup. + /// + /// For transient errors, retries up to a maximum delay time with exponential + /// backoff. For any error (transient after exhaustion or non-transient), falls + /// to the backup store (if configured). + async fn read_primary_or_backup( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + let mut delay = Duration::from_millis(self.retry_config.initial_retry_delay_ms as u64); + let maximum_delay = Duration::from_millis(self.retry_config.maximum_delay_ms as u64); + let mut tries = 0_u16; + + loop { + match KVStore::read( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + ) + .await + { + Ok(data) => { + log_info!( + self.logger, + "Read succeeded after {} retries for key: {}/{}/{}", + tries, + primary_namespace, + secondary_namespace, + key + ); + return Ok(data); + }, + + Err(e) if is_possibly_transient(&e) && (delay < maximum_delay) => { + log_warn!( + self.logger, + "Possible transient error reading key {}/{}/{} (attempt {}): {}. Retrying...", + primary_namespace, + secondary_namespace, + key, + tries + 1, + e + ); + tries += 1; + tokio::time::sleep(delay).await; + delay = std::cmp::min( + delay.mul_f32(self.retry_config.backoff_multiplier), + maximum_delay, + ); + }, + + Err(e) => { + log_error!(self.logger, "Failed to read from primary store for key {}/{}/{}: {}. Falling back to backup.", + primary_namespace, secondary_namespace, key, e); + return self.read_from_backup(primary_namespace, secondary_namespace, key); + }, + } + } + } + + /// Lists keys from the primary data store with retry logic, or falls back to backup. + /// + /// For transient errors, retries up to a maximum delay time with exponential + /// backoff. For any error (transient after exhaustion or non-transient), falls + /// back to the backup store (if configured) for disaster recovery. + async fn list_primary_or_backup( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> io::Result> { + let mut delay = Duration::from_millis(self.retry_config.initial_retry_delay_ms as u64); + let maximum_delay = Duration::from_millis(self.retry_config.maximum_delay_ms as u64); + let mut tries = 0_u16; + + loop { + match KVStore::list(self.primary_store.as_ref(), primary_namespace, secondary_namespace) + .await + { + Ok(keys) => { + log_info!( + self.logger, + "List succeeded after {} retries for namespace: {}/{}", + tries, + primary_namespace, + secondary_namespace + ); + return Ok(keys); + }, + Err(e) if is_possibly_transient(&e) && (delay < maximum_delay) => { + log_warn!( + self.logger, + "Possible transient error listing namespace {}/{} (attempt {}): {}. Retrying...", + primary_namespace, + secondary_namespace, + tries + 1, + e + ); + tries += 1; + tokio::time::sleep(delay).await; + delay = std::cmp::min( + delay.mul_f32(self.retry_config.backoff_multiplier), + maximum_delay, + ); + }, + Err(e) => { + log_error!(self.logger, "Failed to list from primary store for namespace {}/{}: {}. Falling back to backup.", + primary_namespace, secondary_namespace, e); + return self.list_from_backup(primary_namespace, secondary_namespace); + }, + } + } + } + + /// Writes data to the primary store with retry logic. + /// + /// For transient errors, retries up to a maximum delay time with exponential + /// backoff. + async fn retry_write_with_backoff( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + let mut delay = Duration::from_millis(self.retry_config.initial_retry_delay_ms as u64); + let maximum_delay = Duration::from_millis(self.retry_config.maximum_delay_ms as u64); + let mut tries = 0_u16; + + loop { + match KVStore::write( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + buf.clone(), + ) + .await + { + Ok(res) => { + log_info!( + self.logger, + "Write succeeded after {} retries for key: {}/{}/{}", + tries, + primary_namespace, + secondary_namespace, + key + ); + return Ok(res); + }, + Err(e) if is_possibly_transient(&e) && (delay < maximum_delay) => { + log_warn!( + self.logger, + "Possible transient error writing key {}/{}/{} (attempt {}): {}. Retrying...", + primary_namespace, + secondary_namespace, + key, + tries + 1, + e + ); + tries += 1; + tokio::time::sleep(delay).await; + delay = std::cmp::min( + delay.mul_f32(self.retry_config.backoff_multiplier), + maximum_delay, + ); + }, + Err(e) => { + log_error!( + self.logger, + "Failed to write to primary store for key {}/{}/{}: {}", + primary_namespace, + secondary_namespace, + key, + e + ); + return Err(e); + }, + } + } + } + + /// Removes data from the primary store with retry logic. + /// + /// For transient errors, retries up to a maximum delay time with exponential + /// backoff. + async fn retry_remove_with_backoff( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + let mut delay = Duration::from_millis(self.retry_config.initial_retry_delay_ms as u64); + let maximum_delay = Duration::from_millis(self.retry_config.maximum_delay_ms as u64); + let mut tries = 0_u16; + + loop { + match KVStore::remove( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + lazy, + ) + .await + { + Ok(res) => { + log_info!( + self.logger, + "Successfully removed data from primary store after {} retries for key: {}/{}/{}", + tries, + primary_namespace, + secondary_namespace, + key + ); + return Ok(res); + }, + Err(e) if is_possibly_transient(&e) && (delay < maximum_delay) => { + log_warn!( + self.logger, + "Possible transient error removing key {}/{}/{} from primary store (attempt {}): {}. Retrying...", + primary_namespace, + secondary_namespace, + key, + tries + 1, + e + ); + tries += 1; + tokio::time::sleep(delay).await; + delay = std::cmp::min( + delay.mul_f32(self.retry_config.backoff_multiplier), + maximum_delay, + ); + }, + Err(e) => { + log_error!( + self.logger, + "Failed to remove data from primary store for key {}/{}/{}: {}", + primary_namespace, + secondary_namespace, + key, + e + ); + return Err(e); + }, + } + } + } + + async fn primary_write_then_schedule_backup( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + let primary_write_res = match KVStore::write( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + buf.clone(), + ) + .await + { + Ok(res) => Ok(res), + Err(e) if is_possibly_transient(&e) => { + self.retry_write_with_backoff( + primary_namespace, + secondary_namespace, + key, + buf.clone(), + ) + .await + }, + Err(e) => Err(e), + }; + + match primary_write_res { + Ok(res) => { + // We enqueue for backup only what we successfully write to primary. In doing + // this we avoid data inconsistencies across stores. + if let Err(e) = + self.enqueue_backup_write(primary_namespace, secondary_namespace, key, buf) + { + // We don't propagate backup errors here, opting to log only. + log_warn!( + self.logger, + "Failed to queue backup write for key: {}/{}/{}. Error: {}", + primary_namespace, + secondary_namespace, + key, + e + ) + } + + Ok(res) + }, + Err(e) => { + log_debug!( + self.logger, + "Skipping backup write due to primary write failure for key: {}/{}/{}.", + primary_namespace, + secondary_namespace, + key + ); + Err(e) + }, + } + } + + async fn primary_remove_then_schedule_backup( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + let primary_remove_res = match KVStore::remove( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + lazy, + ) + .await + { + Ok(res) => Ok(res), + Err(e) if is_possibly_transient(&e) => { + self.retry_remove_with_backoff(primary_namespace, secondary_namespace, key, lazy) + .await + }, + Err(e) => Err(e), + }; + + match primary_remove_res { + Ok(res) => { + if let Err(e) = + self.enqueue_backup_remove(primary_namespace, secondary_namespace, key, lazy) + { + // We don't propagate backup errors here, opting to silently log. + log_warn!( + self.logger, + "Failed to queue backup removal for key: {}/{}/{}. Error: {}", + primary_namespace, + secondary_namespace, + key, + e + ) + } + + Ok(res) + }, + Err(e) => { + log_debug!( + self.logger, + "Skipping backup removal due to primary removal failure for key: {}/{}/{}.", + primary_namespace, + secondary_namespace, + key + ); + Err(e) + }, + } + } + + async fn read_internal( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> io::Result> { + check_namespace_key_validity( + primary_namespace.as_str(), + secondary_namespace.as_str(), + Some(key.as_str()), + "read", + )?; + + match (primary_namespace.as_str(), secondary_namespace.as_str(), key.as_str()) { + (NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, _, NETWORK_GRAPH_PERSISTENCE_KEY) + | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _, SCORER_PERSISTENCE_KEY) => { + if let Some(eph_store) = self.ephemeral_store.as_ref() { + // We only try once here (without retry logic) because local failure might be indicative + // of a more serious issue (e.g. full memory, memory corruption, permissions change) that + // do not self-resolve such that retrying would negate the latency benefits. + + // The following questions remain: + // 1. Are there situations where local transient errors may warrant a retry? + // 2. Can we reliably identify/detect these transient errors? + // 3. Should we fall back to the primary or backup stores in the event of any error? + KVStoreSync::read( + eph_store.as_ref(), + &primary_namespace, + &secondary_namespace, + &key, + ) + } else { + log_debug!(self.logger, "Ephemeral store not configured. Reading non-critical data from primary or backup stores."); + self.read_primary_or_backup(&primary_namespace, &secondary_namespace, &key) + .await + } + }, + _ => self.read_primary_or_backup(&primary_namespace, &secondary_namespace, &key).await, + } + } + + async fn write_internal( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> io::Result<()> { + check_namespace_key_validity( + primary_namespace.as_str(), + secondary_namespace.as_str(), + Some(key.as_str()), + "write", + )?; + + match (primary_namespace.as_str(), secondary_namespace.as_str(), key.as_str()) { + (NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, _, NETWORK_GRAPH_PERSISTENCE_KEY) + | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _, SCORER_PERSISTENCE_KEY) => { + if let Some(eph_store) = &self.ephemeral_store { + self.execute_locked_write( + inner_lock_ref, + locking_key, + version, + async move || { + KVStoreSync::write( + eph_store.as_ref(), + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + buf, + ) + }, + ) + .await + } else { + log_debug!(self.logger, "Ephemeral store not configured. Writing non-critical data to primary and backup stores."); + + self.execute_locked_write( + inner_lock_ref, + locking_key, + version, + async move || { + self.primary_write_then_schedule_backup( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + buf, + ) + .await + }, + ) + .await + } + }, + _ => { + self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { + self.primary_write_then_schedule_backup( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + buf, + ) + .await + }) + .await + }, + } + } + + async fn remove_internal( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + ) -> io::Result<()> { + check_namespace_key_validity( + primary_namespace.as_str(), + secondary_namespace.as_str(), + Some(key.as_str()), + "remove", + )?; + + match (primary_namespace.as_str(), secondary_namespace.as_str(), key.as_str()) { + (NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, _, NETWORK_GRAPH_PERSISTENCE_KEY) + | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _, SCORER_PERSISTENCE_KEY) => { + if let Some(eph_store) = &self.ephemeral_store { + self.execute_locked_write( + inner_lock_ref, + locking_key, + version, + async move || { + KVStoreSync::remove( + eph_store.as_ref(), + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + lazy, + ) + }, + ) + .await + } else { + log_debug!(self.logger, "Ephemeral store not configured. Removing non-critical data from primary and backup stores."); + + self.execute_locked_write( + inner_lock_ref, + locking_key, + version, + async move || { + self.primary_remove_then_schedule_backup( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + lazy, + ) + .await + }, + ) + .await + } + }, + _ => { + self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { + self.primary_remove_then_schedule_backup( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + lazy, + ) + .await + }) + .await + }, + } + } + + async fn list_internal( + &self, primary_namespace: String, secondary_namespace: String, + ) -> io::Result> { + check_namespace_key_validity( + primary_namespace.as_str(), + secondary_namespace.as_str(), + None, + "list", + )?; + + match (primary_namespace.as_str(), secondary_namespace.as_str()) { + ( + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + ) + | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _) => { + if let Some(eph_store) = self.ephemeral_store.as_ref() { + KVStoreSync::list(eph_store.as_ref(), &primary_namespace, &secondary_namespace) + } else { + log_debug!( + self.logger, + "Ephemeral store not configured. Listing from primary and backup stores." + ); + self.list_primary_or_backup(&primary_namespace, &secondary_namespace).await + } + }, + _ => self.list_primary_or_backup(&primary_namespace, &secondary_namespace).await, + } + } + + fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { + let mut outer_lock = self.locks.lock().unwrap(); + Arc::clone(&outer_lock.entry(locking_key).or_default()) + } + + async fn execute_locked_write< + F: Future>, + FN: FnOnce() -> F, + >( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + callback: FN, + ) -> Result<(), lightning::io::Error> { + let res = { + let mut last_written_version = inner_lock_ref.lock().await; + + // Check if we already have a newer version written. This ensures eventual consistency. + let is_stale_version = version <= *last_written_version; + + if is_stale_version { + Ok(()) + } else { + callback().await.map(|_| { + *last_written_version = version; + }) + } + }; + + self.clean_locks(&inner_lock_ref, locking_key); + res + } + + fn clean_locks(&self, inner_lock_ref: &Arc>, locking_key: String) { + // If there no arcs in use elsewhere, this means that there are no in-flight writes. We can remove the map entry + // to prevent leaking memory. The two arcs that are expected are the one in the map and the one held here in + // inner_lock_ref. The outer lock is obtained first, to avoid a new arc being cloned after we've already + // counted. + let mut outer_lock = self.locks.lock().unwrap(); + + let strong_count = Arc::strong_count(&inner_lock_ref); + debug_assert!(strong_count >= 2, "Unexpected TierStore strong count"); + + if strong_count == 2 { + outer_lock.remove(&locking_key); + } + } +} + +enum BackupOp { + Write { primary_namespace: String, secondary_namespace: String, key: String, data: Vec }, + Remove { primary_namespace: String, secondary_namespace: String, key: String, lazy: bool }, +} + +impl BackupOp { + fn primary_namespace(&self) -> &str { + match self { + BackupOp::Write { primary_namespace, .. } + | BackupOp::Remove { primary_namespace, .. } => primary_namespace, + } + } + + fn secondary_namespace(&self) -> &str { + match self { + BackupOp::Write { secondary_namespace, .. } + | BackupOp::Remove { secondary_namespace, .. } => secondary_namespace, + } + } + + fn key(&self) -> &str { + match self { + BackupOp::Write { key, .. } | BackupOp::Remove { key, .. } => key, + } + } +} + +#[cfg(test)] +mod tests { + use crate::io::test_utils::random_storage_path; + use crate::io::tier_store::{RetryConfig, TierStore}; + use crate::logger::Logger; + use crate::runtime::Runtime; + #[cfg(not(feature = "uniffi"))] + use crate::types::DynStore; + use crate::types::DynStoreWrapper; + #[cfg(feature = "uniffi")] + use crate::DynStore; + + use lightning::util::logger::Level; + use lightning::util::persist::{ + KVStoreSync, NETWORK_GRAPH_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY, + }; + use lightning_persister::fs_store::FilesystemStore; + + use std::path::PathBuf; + use std::sync::Arc; + // use std::time::Duration; + + struct StorageFixture { + tier: TierStore, + primary: Arc, + ephemeral: Option>, + backup: Option>, + base_dir: PathBuf, + } + + impl Drop for StorageFixture { + fn drop(&mut self) { + drop(self.backup.take()); + drop(self.ephemeral.take()); + + if let Err(e) = std::fs::remove_dir_all(&self.base_dir) { + eprintln!("Failed to clean up test directory {:?}: {}", self.base_dir, e); + } + } + } + + fn setup_tier_store(ephemeral: bool, backup: bool) -> StorageFixture { + let base_dir = random_storage_path(); + let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + + let primary: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("primary_store")))); + let logger = Arc::new( + Logger::new_fs_writer(log_path, Level::Debug) + .expect("Failed to create filesystem logger"), + ); + let runtime = + Arc::new(Runtime::new(Arc::clone(&logger)).expect("Failed to create new runtime.")); + let retry_config = RetryConfig::default(); + let mut tier = + TierStore::new(Arc::clone(&primary), Arc::clone(&runtime), logger, retry_config); + + let ephemeral = if ephemeral { + let eph_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("eph_store")))); + tier.set_ephemeral_store(Arc::clone(&eph_store)); + Some(eph_store) + } else { + None + }; + + let backup = if backup { + let backup: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("backup_store")))); + tier.set_backup_store(Arc::clone(&backup)); + Some(backup) + } else { + None + }; + + StorageFixture { tier, primary, ephemeral, backup, base_dir } + } + + #[test] + fn writes_to_ephemeral_if_configured() { + let tier = setup_tier_store(true, false); + assert!(tier.ephemeral.is_some()); + + let primary_namespace = NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE; + let secondary_namespace = NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE; + let data = [42u8; 32].to_vec(); + + KVStoreSync::write( + &tier.tier, + primary_namespace, + secondary_namespace, + NETWORK_GRAPH_PERSISTENCE_KEY, + data.clone(), + ) + .unwrap(); + + KVStoreSync::write( + &tier.tier, + primary_namespace, + secondary_namespace, + SCORER_PERSISTENCE_KEY, + data.clone(), + ) + .unwrap(); + + let eph_store = tier.ephemeral.clone().unwrap(); + let ng_read = KVStoreSync::read( + &*eph_store, + primary_namespace, + secondary_namespace, + NETWORK_GRAPH_PERSISTENCE_KEY, + ) + .unwrap(); + + let sc_read = KVStoreSync::read( + &*eph_store, + primary_namespace, + secondary_namespace, + SCORER_PERSISTENCE_KEY, + ) + .unwrap(); + + assert_eq!(ng_read, data); + assert!(KVStoreSync::read( + &*tier.primary, + primary_namespace, + secondary_namespace, + NETWORK_GRAPH_PERSISTENCE_KEY + ) + .is_err()); + + assert_eq!(sc_read, data); + assert!(KVStoreSync::read( + &*tier.primary, + primary_namespace, + secondary_namespace, + SCORER_PERSISTENCE_KEY + ) + .is_err()); + } +} diff --git a/src/io/utils.rs b/src/io/utils.rs index d2f70377b..1b0456c45 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -20,6 +20,7 @@ use bdk_chain::tx_graph::ChangeSet as BdkTxGraphChangeSet; use bdk_chain::ConfirmationBlockTime; use bdk_wallet::ChangeSet as BdkWalletChangeSet; use bitcoin::Network; +use lightning::io::ErrorKind; use lightning::ln::msgs::DecodeError; use lightning::routing::gossip::NetworkGraph; use lightning::routing::scoring::{ @@ -704,6 +705,19 @@ where Ok(res) } +/// Checks if an error kind is possibly transient. +pub(crate) fn is_possibly_transient(error: &lightning::io::Error) -> bool { + match error.kind() { + ErrorKind::ConnectionRefused + | ErrorKind::ConnectionAborted + | ErrorKind::ConnectionReset + | ErrorKind::TimedOut + | ErrorKind::Interrupted + | ErrorKind::NotConnected => true, + _ => false, + } +} + #[cfg(test)] mod tests { use super::read_or_generate_seed_file; diff --git a/src/lib.rs b/src/lib.rs index 2b60307b0..75ab773de 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -136,6 +136,7 @@ use fee_estimator::{ConfirmationTarget, FeeEstimator, OnchainFeeEstimator}; use ffi::*; use gossip::GossipSource; use graph::NetworkGraph; +pub use io::tier_store::RetryConfig; use io::utils::write_node_metrics; use lightning::chain::BestBlock; use lightning::events::bump_transaction::{Input, Wallet as LdkWallet}; @@ -160,11 +161,12 @@ use peer_store::{PeerInfo, PeerStore}; use rand::Rng; use runtime::Runtime; use types::{ - Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, DynStore, Graph, - HRNResolver, KeysManager, OnionMessenger, PaymentStore, PeerManager, Router, Scorer, Sweeper, - Wallet, + Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, Graph, HRNResolver, + KeysManager, OnionMessenger, PaymentStore, PeerManager, Router, Scorer, Sweeper, Wallet, +}; +pub use types::{ + ChannelDetails, CustomTlvRecord, DynStore, PeerDetails, SyncAndAsyncKVStore, UserChannelId, }; -pub use types::{ChannelDetails, CustomTlvRecord, PeerDetails, SyncAndAsyncKVStore, UserChannelId}; pub use { bip39, bitcoin, lightning, lightning_invoice, lightning_liquidity, lightning_types, tokio, vss_client, diff --git a/src/types.rs b/src/types.rs index b5b1ffed7..7c80609b3 100644 --- a/src/types.rs +++ b/src/types.rs @@ -53,7 +53,7 @@ where { } -pub(crate) trait DynStoreTrait: Send + Sync { +pub trait DynStoreTrait: Send + Sync { fn read_async( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> Pin, bitcoin::io::Error>> + Send + 'static>>; @@ -133,7 +133,8 @@ impl<'a> KVStoreSync for dyn DynStoreTrait + 'a { } } -pub(crate) type DynStore = dyn DynStoreTrait; +/// Type alias for any store that implements DynStoreTrait. +pub type DynStore = dyn DynStoreTrait; pub(crate) struct DynStoreWrapper(pub(crate) T); From 2a0c5d04cc4786597920e04fdd44632a4b2029bb Mon Sep 17 00:00:00 2001 From: Enigbe Date: Mon, 20 Oct 2025 22:00:58 +0100 Subject: [PATCH 64/75] Add comprehensive testing for TierStore This commit adds unit, integration, and FFI tests for the TierStore implementation: - Unit tests for TierStore core functionality - Integration tests for nodes built with tiered storage - Python FFI tests for foreign key-value store --- benches/payments.rs | 1 + bindings/python/src/ldk_node/kv_store.py | 115 ++++++ bindings/python/src/ldk_node/test_ldk_node.py | 329 ++++++++++------- src/io/test_utils.rs | 170 ++++++++- src/io/tier_store.rs | 346 +++++++++++++----- src/lib.rs | 5 +- src/types.rs | 3 +- tests/common/mod.rs | 123 ++++++- tests/integration_tests_rust.rs | 86 +++++ 9 files changed, 947 insertions(+), 231 deletions(-) create mode 100644 bindings/python/src/ldk_node/kv_store.py diff --git a/benches/payments.rs b/benches/payments.rs index 52769d794..8ded1399e 100644 --- a/benches/payments.rs +++ b/benches/payments.rs @@ -127,6 +127,7 @@ fn payment_benchmark(c: &mut Criterion) { true, false, common::TestStoreType::Sqlite, + common::TestStoreType::Sqlite, ); let runtime = diff --git a/bindings/python/src/ldk_node/kv_store.py b/bindings/python/src/ldk_node/kv_store.py new file mode 100644 index 000000000..d871d7a6d --- /dev/null +++ b/bindings/python/src/ldk_node/kv_store.py @@ -0,0 +1,115 @@ +import threading + +from abc import ABC, abstractmethod +from typing import List + +from ldk_node import IoError + +class AbstractKvStore(ABC): + @abstractmethod + async def read_async(self, primary_namespace: "str",secondary_namespace: "str",key: "str") -> "typing.List[int]": + pass + + @abstractmethod + async def write_async(self, primary_namespace: "str",secondary_namespace: "str",key: "str",buf: "typing.List[int]") -> None: + pass + + @abstractmethod + async def remove_async(self, primary_namespace: "str",secondary_namespace: "str",key: "str",lazy: "bool") -> None: + pass + + @abstractmethod + async def list_async(self, primary_namespace: "str",secondary_namespace: "str") -> "typing.List[str]": + pass + + @abstractmethod + def read(self, primary_namespace: "str",secondary_namespace: "str",key: "str") -> "typing.List[int]": + pass + + @abstractmethod + def write(self, primary_namespace: "str",secondary_namespace: "str",key: "str",buf: "typing.List[int]") -> None: + pass + + @abstractmethod + def remove(self, primary_namespace: "str",secondary_namespace: "str",key: "str",lazy: "bool") -> None: + pass + + @abstractmethod + def list(self, primary_namespace: "str",secondary_namespace: "str") -> "typing.List[str]": + pass + +class TestKvStore(AbstractKvStore): + def __init__(self, name: str): + self.name = name + # Storage structure: {(primary_ns, secondary_ns): {key: [bytes]}} + self.storage = {} + self._lock = threading.Lock() + + def dump(self): + print(f"\n[{self.name}] Store contents:") + for (primary_ns, secondary_ns), keys_dict in self.storage.items(): + print(f" Namespace: ({primary_ns!r}, {secondary_ns!r})") + for key, data in keys_dict.items(): + print(f" Key: {key!r} -> {len(data)} bytes") + # Optionally show first few bytes + preview = data[:20] if len(data) > 20 else data + print(f" Data preview: {preview}...") + + def read(self, primary_namespace: str, secondary_namespace: str, key: str) -> List[int]: + with self._lock: + print(f"[{self.name}] READ: {primary_namespace}/{secondary_namespace}/{key}") + namespace_key = (primary_namespace, secondary_namespace) + + if namespace_key not in self.storage: + print(f" -> namespace not found, keys: {list(self.storage.keys())}") + raise IoError.NotFound(f"Namespace not found: {primary_namespace}/{secondary_namespace}") + + if key not in self.storage[namespace_key]: + print(f" -> key not found, keys: {list(self.storage[namespace_key].keys())}") + raise IoError.NotFound(f"Key not found: {key}") + + data = self.storage[namespace_key][key] + print(f" -> returning {len(data)} bytes") + return data + + def write(self, primary_namespace: str, secondary_namespace: str, key: str, buf: List[int]) -> None: + with self._lock: + namespace_key = (primary_namespace, secondary_namespace) + if namespace_key not in self.storage: + self.storage[namespace_key] = {} + + self.storage[namespace_key][key] = buf.copy() + + def remove(self, primary_namespace: str, secondary_namespace: str, key: str, lazy: bool) -> None: + with self._lock: + namespace_key = (primary_namespace, secondary_namespace) + if namespace_key not in self.storage: + raise IoError.NotFound(f"Namespace not found: {primary_namespace}/{secondary_namespace}") + + if key not in self.storage[namespace_key]: + raise IoError.NotFound(f"Key not found: {key}") + + del self.storage[namespace_key][key] + + if not self.storage[namespace_key]: + del self.storage[namespace_key] + + def list(self, primary_namespace: str, secondary_namespace: str) -> List[str]: + with self._lock: + namespace_key = (primary_namespace, secondary_namespace) + if namespace_key in self.storage: + return list(self.storage[namespace_key].keys()) + return [] + + async def read_async(self, primary_namespace: str, secondary_namespace: str, key: str) -> List[int]: + return self.read(primary_namespace, secondary_namespace, key) + + async def write_async(self, primary_namespace: str, secondary_namespace: str, key: str, buf: List[int]) -> None: + self.write(primary_namespace, secondary_namespace, key, buf) + + async def remove_async(self, primary_namespace: str, secondary_namespace: str, key: str, lazy: bool) -> None: + self.remove(primary_namespace, secondary_namespace, key, lazy) + + async def list_async(self, primary_namespace: str, secondary_namespace: str) -> List[str]: + return self.list(primary_namespace, secondary_namespace) + \ No newline at end of file diff --git a/bindings/python/src/ldk_node/test_ldk_node.py b/bindings/python/src/ldk_node/test_ldk_node.py index 0b73e6a47..a343cb8ce 100644 --- a/bindings/python/src/ldk_node/test_ldk_node.py +++ b/bindings/python/src/ldk_node/test_ldk_node.py @@ -5,13 +5,67 @@ import os import re import requests +import asyncio +import threading +import ldk_node from ldk_node import * +from kv_store import TestKvStore DEFAULT_ESPLORA_SERVER_URL = "http://127.0.0.1:3002" DEFAULT_TEST_NETWORK = Network.REGTEST DEFAULT_BITCOIN_CLI_BIN = "bitcoin-cli" +class NodeSetup: + def __init__(self, node, node_id, tmp_dir, listening_addresses, stores=None): + self.node = node + self.node_id = node_id + self.tmp_dir = tmp_dir + self.listening_addresses = listening_addresses + self.stores = stores # (primary, backup, ephemeral) or None + + def cleanup(self): + self.node.stop() + time.sleep(1) + self.tmp_dir.cleanup() + +def setup_two_nodes(esplora_endpoint, port_1=2323, port_2=2324, use_tier_store=False): + # Setup Node 1 + tmp_dir_1 = tempfile.TemporaryDirectory("_ldk_node_1") + print("TMP DIR 1:", tmp_dir_1.name) + + listening_addresses_1 = [f"127.0.0.1:{port_1}"] + if use_tier_store: + node_1, stores_1 = setup_node_with_tier_store(tmp_dir_1.name, esplora_endpoint, listening_addresses_1) + else: + node_1 = setup_node(tmp_dir_1.name, esplora_endpoint, listening_addresses_1) + stores_1 = None + + node_1.start() + node_id_1 = node_1.node_id() + print("Node ID 1:", node_id_1) + + setup_1 = NodeSetup(node_1, node_id_1, tmp_dir_1, listening_addresses_1, stores_1) + + # Setup Node 2 + tmp_dir_2 = tempfile.TemporaryDirectory("_ldk_node_2") + print("TMP DIR 2:", tmp_dir_2.name) + + listening_addresses_2 = [f"127.0.0.1:{port_2}"] + if use_tier_store: + node_2, stores_2 = setup_node_with_tier_store(tmp_dir_2.name, esplora_endpoint, listening_addresses_2) + else: + node_2 = setup_node(tmp_dir_2.name, esplora_endpoint, listening_addresses_2) + stores_2 = None + + node_2.start() + node_id_2 = node_2.node_id() + print("Node ID 2:", node_id_2) + + setup_2 = NodeSetup(node_2, node_id_2, tmp_dir_2, listening_addresses_2, stores_2) + + return setup_1, setup_2 + def bitcoin_cli(cmd): args = [] @@ -95,7 +149,6 @@ def send_to_address(address, amount_sats): print("SEND TX:", res) return res - def setup_node(tmp_dir, esplora_endpoint, listening_addresses): mnemonic = generate_entropy_mnemonic(None) node_entropy = NodeEntropy.from_bip39_mnemonic(mnemonic, None) @@ -107,6 +160,124 @@ def setup_node(tmp_dir, esplora_endpoint, listening_addresses): builder.set_listening_addresses(listening_addresses) return builder.build(node_entropy) +def setup_node_with_tier_store(tmp_dir, esplora_endpoint, listening_addresses): + mnemonic = generate_entropy_mnemonic(None) + node_entropy = NodeEntropy.from_bip39_mnemonic(mnemonic, None) + config = default_config() + + primary = TestKvStore("primary") + backup = TestKvStore("backup") + ephemeral = TestKvStore("ephemeral") + retry_config = RetryConfig( + initial_retry_delay_ms=10, + maximum_delay_ms=100, + backoff_multiplier=2.0 + ) + + # Set event loop for async Python callbacks from Rust + # (https://mozilla.github.io/uniffi-rs/0.27/futures.html#python-uniffi_set_event_loop) + loop = asyncio.new_event_loop() + + def run_loop(): + asyncio.set_event_loop(loop) + loop.run_forever() + + loop_thread = threading.Thread(target=run_loop, daemon=True) + loop_thread.start() + ldk_node.uniffi_set_event_loop(loop) + + builder = Builder.from_config(config) + builder.set_storage_dir_path(tmp_dir) + builder.set_chain_source_esplora(esplora_endpoint, None) + builder.set_network(DEFAULT_TEST_NETWORK) + builder.set_listening_addresses(listening_addresses) + builder.set_tier_store_retry_config(retry_config) + builder.set_tier_store_backup(FfiDynStore.from_store(backup)) + builder.set_tier_store_ephemeral(FfiDynStore.from_store(ephemeral)) + + return builder.build_with_tier_store(node_entropy, FfiDynStore.from_store(primary)), (primary, backup, ephemeral) + +def do_channel_full_cycle(setup_1, setup_2, esplora_endpoint): + # Fund both nodes + address_1 = setup_1.node.onchain_payment().new_address() + txid_1 = send_to_address(address_1, 100000) + address_2 = setup_2.node.onchain_payment().new_address() + txid_2 = send_to_address(address_2, 100000) + + wait_for_tx(esplora_endpoint, txid_1) + wait_for_tx(esplora_endpoint, txid_2) + mine_and_wait(esplora_endpoint, 6) + + setup_1.node.sync_wallets() + setup_2.node.sync_wallets() + + # Verify balances + spendable_balance_1 = setup_1.node.list_balances().spendable_onchain_balance_sats + spendable_balance_2 = setup_2.node.list_balances().spendable_onchain_balance_sats + assert spendable_balance_1 == 100000 + assert spendable_balance_2 == 100000 + + # Open channel + setup_1.node.open_channel(setup_2.node_id, setup_2.listening_addresses[0], 50000, None, None) + + channel_pending_event_1 = setup_1.node.wait_next_event() + assert isinstance(channel_pending_event_1, Event.CHANNEL_PENDING) + setup_1.node.event_handled() + + channel_pending_event_2 = setup_2.node.wait_next_event() + assert isinstance(channel_pending_event_2, Event.CHANNEL_PENDING) + setup_2.node.event_handled() + + funding_txid = channel_pending_event_1.funding_txo.txid + wait_for_tx(esplora_endpoint, funding_txid) + mine_and_wait(esplora_endpoint, 6) + + setup_1.node.sync_wallets() + setup_2.node.sync_wallets() + + channel_ready_event_1 = setup_1.node.wait_next_event() + assert isinstance(channel_ready_event_1, Event.CHANNEL_READY) + setup_1.node.event_handled() + + channel_ready_event_2 = setup_2.node.wait_next_event() + assert isinstance(channel_ready_event_2, Event.CHANNEL_READY) + setup_2.node.event_handled() + + # Make payment + description = Bolt11InvoiceDescription.DIRECT("asdf") + invoice = setup_2.node.bolt11_payment().receive(2500000, description, 9217) + setup_1.node.bolt11_payment().send(invoice, None) + + payment_successful_event_1 = setup_1.node.wait_next_event() + assert isinstance(payment_successful_event_1, Event.PAYMENT_SUCCESSFUL) + setup_1.node.event_handled() + + payment_received_event_2 = setup_2.node.wait_next_event() + assert isinstance(payment_received_event_2, Event.PAYMENT_RECEIVED) + setup_2.node.event_handled() + + # Close channel + setup_2.node.close_channel(channel_ready_event_2.user_channel_id, setup_1.node_id) + + channel_closed_event_1 = setup_1.node.wait_next_event() + assert isinstance(channel_closed_event_1, Event.CHANNEL_CLOSED) + setup_1.node.event_handled() + + channel_closed_event_2 = setup_2.node.wait_next_event() + assert isinstance(channel_closed_event_2, Event.CHANNEL_CLOSED) + setup_2.node.event_handled() + + mine_and_wait(esplora_endpoint, 1) + setup_1.node.sync_wallets() + setup_2.node.sync_wallets() + + # Verify final balances + spendable_balance_after_close_1 = setup_1.node.list_balances().spendable_onchain_balance_sats + assert spendable_balance_after_close_1 > 95000 + assert spendable_balance_after_close_1 < 100000 + spendable_balance_after_close_2 = setup_2.node.list_balances().spendable_onchain_balance_sats + assert spendable_balance_after_close_2 == 102500 + def get_esplora_endpoint(): if os.environ.get('ESPLORA_ENDPOINT'): return str(os.environ['ESPLORA_ENDPOINT']) @@ -122,132 +293,36 @@ def setUp(self): def test_channel_full_cycle(self): esplora_endpoint = get_esplora_endpoint() - - ## Setup Node 1 - tmp_dir_1 = tempfile.TemporaryDirectory("_ldk_node_1") - print("TMP DIR 1:", tmp_dir_1.name) - - listening_addresses_1 = ["127.0.0.1:2323"] - node_1 = setup_node(tmp_dir_1.name, esplora_endpoint, listening_addresses_1) - node_1.start() - node_id_1 = node_1.node_id() - print("Node ID 1:", node_id_1) - - # Setup Node 2 - tmp_dir_2 = tempfile.TemporaryDirectory("_ldk_node_2") - print("TMP DIR 2:", tmp_dir_2.name) - - listening_addresses_2 = ["127.0.0.1:2324"] - node_2 = setup_node(tmp_dir_2.name, esplora_endpoint, listening_addresses_2) - node_2.start() - node_id_2 = node_2.node_id() - print("Node ID 2:", node_id_2) - - address_1 = node_1.onchain_payment().new_address() - txid_1 = send_to_address(address_1, 100000) - address_2 = node_2.onchain_payment().new_address() - txid_2 = send_to_address(address_2, 100000) - - wait_for_tx(esplora_endpoint, txid_1) - wait_for_tx(esplora_endpoint, txid_2) - - mine_and_wait(esplora_endpoint, 6) - - node_1.sync_wallets() - node_2.sync_wallets() - - spendable_balance_1 = node_1.list_balances().spendable_onchain_balance_sats - spendable_balance_2 = node_2.list_balances().spendable_onchain_balance_sats - total_balance_1 = node_1.list_balances().total_onchain_balance_sats - total_balance_2 = node_2.list_balances().total_onchain_balance_sats - - print("SPENDABLE 1:", spendable_balance_1) - self.assertEqual(spendable_balance_1, 100000) - - print("SPENDABLE 2:", spendable_balance_2) - self.assertEqual(spendable_balance_2, 100000) - - print("TOTAL 1:", total_balance_1) - self.assertEqual(total_balance_1, 100000) - - print("TOTAL 2:", total_balance_2) - self.assertEqual(total_balance_2, 100000) - - node_1.open_channel(node_id_2, listening_addresses_2[0], 50000, None, None) - - channel_pending_event_1 = node_1.wait_next_event() - assert isinstance(channel_pending_event_1, Event.CHANNEL_PENDING) - print("EVENT:", channel_pending_event_1) - node_1.event_handled() - - channel_pending_event_2 = node_2.wait_next_event() - assert isinstance(channel_pending_event_2, Event.CHANNEL_PENDING) - print("EVENT:", channel_pending_event_2) - node_2.event_handled() - - funding_txid = channel_pending_event_1.funding_txo.txid - wait_for_tx(esplora_endpoint, funding_txid) - mine_and_wait(esplora_endpoint, 6) - - node_1.sync_wallets() - node_2.sync_wallets() - - channel_ready_event_1 = node_1.wait_next_event() - assert isinstance(channel_ready_event_1, Event.CHANNEL_READY) - print("EVENT:", channel_ready_event_1) - print("funding_txo:", funding_txid) - node_1.event_handled() - - channel_ready_event_2 = node_2.wait_next_event() - assert isinstance(channel_ready_event_2, Event.CHANNEL_READY) - print("EVENT:", channel_ready_event_2) - node_2.event_handled() - - description = Bolt11InvoiceDescription.DIRECT("asdf") - invoice = node_2.bolt11_payment().receive(2500000, description, 9217) - node_1.bolt11_payment().send(invoice, None) - - payment_successful_event_1 = node_1.wait_next_event() - assert isinstance(payment_successful_event_1, Event.PAYMENT_SUCCESSFUL) - print("EVENT:", payment_successful_event_1) - node_1.event_handled() - - payment_received_event_2 = node_2.wait_next_event() - assert isinstance(payment_received_event_2, Event.PAYMENT_RECEIVED) - print("EVENT:", payment_received_event_2) - node_2.event_handled() - - node_2.close_channel(channel_ready_event_2.user_channel_id, node_id_1) - - channel_closed_event_1 = node_1.wait_next_event() - assert isinstance(channel_closed_event_1, Event.CHANNEL_CLOSED) - print("EVENT:", channel_closed_event_1) - node_1.event_handled() - - channel_closed_event_2 = node_2.wait_next_event() - assert isinstance(channel_closed_event_2, Event.CHANNEL_CLOSED) - print("EVENT:", channel_closed_event_2) - node_2.event_handled() - - mine_and_wait(esplora_endpoint, 1) - - node_1.sync_wallets() - node_2.sync_wallets() - - spendable_balance_after_close_1 = node_1.list_balances().spendable_onchain_balance_sats - assert spendable_balance_after_close_1 > 95000 - assert spendable_balance_after_close_1 < 100000 - spendable_balance_after_close_2 = node_2.list_balances().spendable_onchain_balance_sats - self.assertEqual(spendable_balance_after_close_2, 102500) - - # Stop nodes - node_1.stop() - node_2.stop() - - # Cleanup - time.sleep(1) # Wait a sec so our logs can finish writing - tmp_dir_1.cleanup() - tmp_dir_2.cleanup() + setup_1, setup_2 = setup_two_nodes(esplora_endpoint) + + do_channel_full_cycle(setup_1, setup_2, esplora_endpoint) + + setup_1.cleanup() + setup_2.cleanup() + + def test_tier_store(self): + esplora_endpoint = get_esplora_endpoint() + setup_1, setup_2 = setup_two_nodes(esplora_endpoint, port_1=2325, port_2=2326, use_tier_store=True) + + do_channel_full_cycle(setup_1, setup_2, esplora_endpoint) + + primary, backup, ephemeral = setup_1.stores + + # Wait for async backup + time.sleep(2) + + self.assertGreater(len(primary.storage), 0, "Primary should have data") + self.assertGreater(len(backup.storage), 0, "Backup should have data") + self.assertEqual(list(primary.storage.keys()), list(backup.storage.keys()), + "Backup should mirror primary") + + self.assertGreater(len(ephemeral.storage), 0, "Ephemeral should have data") + ephemeral_keys = [key for namespace in ephemeral.storage.values() for key in namespace.keys()] + has_scorer_or_graph = any(key in ['scorer', 'network_graph'] for key in ephemeral_keys) + self.assertTrue(has_scorer_or_graph, "Ephemeral should contain scorer or network_graph data") + + setup_1.cleanup() + setup_2.cleanup() if __name__ == '__main__': unittest.main() diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index 9add2d6c1..f2b226a5f 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -9,7 +9,8 @@ use std::collections::{hash_map, HashMap}; use std::future::Future; use std::panic::RefUnwindSafe; use std::path::PathBuf; -use std::sync::Mutex; +use std::sync::{Arc, Mutex}; +use std::time::Duration; use lightning::events::ClosureReason; use lightning::ln::functional_test_utils::{ @@ -25,6 +26,8 @@ use lightning::{check_closed_broadcast, io}; use rand::distr::Alphanumeric; use rand::{rng, Rng}; +use crate::runtime::Runtime; + type TestMonitorUpdatePersister<'a, K> = MonitorUpdatingPersister< &'a K, &'a test_utils::TestLogger, @@ -352,3 +355,168 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { // Make sure everything is persisted as expected after close. check_persisted_data!(persister_0_max_pending_updates * 2 * EXPECTED_UPDATES_PER_PAYMENT + 1); } + +struct DelayedStoreInner { + storage: Mutex>>, + delay: Duration, +} + +impl DelayedStoreInner { + fn new(delay: Duration) -> Self { + Self { storage: Mutex::new(HashMap::new()), delay } + } + + fn make_key(pn: &str, sn: &str, key: &str) -> String { + format!("{}/{}/{}", pn, sn, key) + } + + async fn read_internal( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> Result, io::Error> { + tokio::time::sleep(self.delay).await; + + let full_key = Self::make_key(&primary_namespace, &secondary_namespace, &key); + let storage = self.storage.lock().unwrap(); + storage + .get(&full_key) + .cloned() + .ok_or_else(|| io::Error::new(io::ErrorKind::NotFound, "key not found")) + } + + async fn write_internal( + &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> Result<(), io::Error> { + tokio::time::sleep(self.delay).await; + + let full_key = Self::make_key(&primary_namespace, &secondary_namespace, &key); + let mut storage = self.storage.lock().unwrap(); + storage.insert(full_key, buf); + Ok(()) + } + + async fn remove_internal( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> Result<(), io::Error> { + tokio::time::sleep(self.delay).await; + + let full_key = Self::make_key(&primary_namespace, &secondary_namespace, &key); + let mut storage = self.storage.lock().unwrap(); + storage.remove(&full_key); + Ok(()) + } + + async fn list_internal( + &self, primary_namespace: String, secondary_namespace: String, + ) -> Result, io::Error> { + tokio::time::sleep(self.delay).await; + + let prefix = format!("{}/{}/", primary_namespace, secondary_namespace); + let storage = self.storage.lock().unwrap(); + Ok(storage + .keys() + .filter(|k| k.starts_with(&prefix)) + .map(|k| k.strip_prefix(&prefix).unwrap().to_string()) + .collect()) + } +} + +pub struct DelayedStore { + inner: Arc, + runtime: Arc, +} + +impl DelayedStore { + pub fn new(delay_ms: u64, runtime: Arc) -> Self { + Self { inner: Arc::new(DelayedStoreInner::new(Duration::from_millis(delay_ms))), runtime } + } +} + +impl KVStore for DelayedStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> impl Future, io::Error>> + 'static + Send { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + let key = key.to_string(); + + async move { inner.read_internal(pn, sn, key).await } + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> impl Future> + 'static + Send { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + let key = key.to_string(); + + async move { inner.write_internal(pn, sn, key, buf).await } + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + ) -> impl Future> + 'static + Send { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + let key = key.to_string(); + + async move { inner.remove_internal(pn, sn, key).await } + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> impl Future, io::Error>> + 'static + Send { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + + async move { inner.list_internal(pn, sn).await } + } +} + +impl KVStoreSync for DelayedStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Result, io::Error> { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + let key = key.to_string(); + + self.runtime.block_on(async move { inner.read_internal(pn, sn, key).await }) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Result<(), io::Error> { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + let key = key.to_string(); + + self.runtime.block_on(async move { inner.write_internal(pn, sn, key, buf).await }) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + ) -> Result<(), io::Error> { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + let key = key.to_string(); + + self.runtime.block_on(async move { inner.remove_internal(pn, sn, key).await }) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Result, io::Error> { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + + self.runtime.block_on(async move { inner.list_internal(pn, sn).await }) + } +} diff --git a/src/io/tier_store.rs b/src/io/tier_store.rs index 1f201e9f4..e91c9892c 100644 --- a/src/io/tier_store.rs +++ b/src/io/tier_store.rs @@ -28,7 +28,10 @@ use std::time::Duration; // todo(enigbe): Uncertain about appropriate queue size and if this would need // configuring. +#[cfg(not(test))] const BACKUP_QUEUE_CAPACITY: usize = 100; +#[cfg(test)] +const BACKUP_QUEUE_CAPACITY: usize = 5; const DEFAULT_INITIAL_RETRY_DELAY_MS: u16 = 10; const DEFAULT_MAXIMUM_RETRY_DELAY_MS: u16 = 500; @@ -1125,143 +1128,294 @@ impl BackupOp { #[cfg(test)] mod tests { - use crate::io::test_utils::random_storage_path; + use std::panic::RefUnwindSafe; + use std::path::PathBuf; + use std::sync::Arc; + use std::thread; + + use lightning::util::logger::Level; + use lightning::util::persist::{ + CHANNEL_MANAGER_PERSISTENCE_KEY, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + }; + use lightning_persister::fs_store::FilesystemStore; + + use crate::io::test_utils::{ + do_read_write_remove_list_persist, random_storage_path, DelayedStore, + }; use crate::io::tier_store::{RetryConfig, TierStore}; use crate::logger::Logger; use crate::runtime::Runtime; #[cfg(not(feature = "uniffi"))] use crate::types::DynStore; use crate::types::DynStoreWrapper; - #[cfg(feature = "uniffi")] - use crate::DynStore; - use lightning::util::logger::Level; - use lightning::util::persist::{ - KVStoreSync, NETWORK_GRAPH_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY, - }; - use lightning_persister::fs_store::FilesystemStore; + use super::*; - use std::path::PathBuf; - use std::sync::Arc; - // use std::time::Duration; - - struct StorageFixture { - tier: TierStore, - primary: Arc, - ephemeral: Option>, - backup: Option>, - base_dir: PathBuf, - } + impl RefUnwindSafe for TierStore {} - impl Drop for StorageFixture { + struct CleanupDir(PathBuf); + impl Drop for CleanupDir { fn drop(&mut self) { - drop(self.backup.take()); - drop(self.ephemeral.take()); - - if let Err(e) = std::fs::remove_dir_all(&self.base_dir) { - eprintln!("Failed to clean up test directory {:?}: {}", self.base_dir, e); - } + let _ = std::fs::remove_dir_all(&self.0); } } - fn setup_tier_store(ephemeral: bool, backup: bool) -> StorageFixture { + fn setup_tier_store( + primary_store: Arc, logger: Arc, runtime: Arc, + ) -> TierStore { + let retry_config = RetryConfig::default(); + TierStore::new(primary_store, runtime, logger, retry_config) + } + + #[test] + fn write_read_list_remove() { let base_dir = random_storage_path(); let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + let logger = Arc::new(Logger::new_fs_writer(log_path, Level::Trace).unwrap()); - let primary: Arc = - Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("primary_store")))); - let logger = Arc::new( - Logger::new_fs_writer(log_path, Level::Debug) - .expect("Failed to create filesystem logger"), - ); - let runtime = - Arc::new(Runtime::new(Arc::clone(&logger)).expect("Failed to create new runtime.")); - let retry_config = RetryConfig::default(); - let mut tier = - TierStore::new(Arc::clone(&primary), Arc::clone(&runtime), logger, retry_config); + let runtime = Arc::new(Runtime::new(Arc::clone(&logger)).unwrap()); + let _cleanup = CleanupDir(base_dir.clone()); - let ephemeral = if ephemeral { - let eph_store: Arc = - Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("eph_store")))); - tier.set_ephemeral_store(Arc::clone(&eph_store)); - Some(eph_store) - } else { - None - }; + let primary_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("primary")))); + let tier = setup_tier_store(primary_store, logger, runtime); - let backup = if backup { - let backup: Arc = - Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("backup_store")))); - tier.set_backup_store(Arc::clone(&backup)); - Some(backup) - } else { - None - }; - - StorageFixture { tier, primary, ephemeral, backup, base_dir } + do_read_write_remove_list_persist(&tier); } #[test] - fn writes_to_ephemeral_if_configured() { - let tier = setup_tier_store(true, false); - assert!(tier.ephemeral.is_some()); + fn ephemeral_routing() { + let base_dir = random_storage_path(); + let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + let logger = Arc::new(Logger::new_fs_writer(log_path, Level::Trace).unwrap()); + + let runtime = Arc::new(Runtime::new(Arc::clone(&logger)).unwrap()); + + let _cleanup = CleanupDir(base_dir.clone()); + + let primary_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("primary")))); + let mut tier = setup_tier_store(Arc::clone(&primary_store), logger, runtime); - let primary_namespace = NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE; - let secondary_namespace = NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE; - let data = [42u8; 32].to_vec(); + let ephemeral_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("ephemeral")))); + tier.set_ephemeral_store(Arc::clone(&ephemeral_store)); + let data = vec![42u8; 32]; + + // Non-critical KVStoreSync::write( - &tier.tier, - primary_namespace, - secondary_namespace, + &tier, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, data.clone(), ) .unwrap(); + // Critical KVStoreSync::write( - &tier.tier, - primary_namespace, - secondary_namespace, - SCORER_PERSISTENCE_KEY, + &tier, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, data.clone(), ) .unwrap(); - let eph_store = tier.ephemeral.clone().unwrap(); - let ng_read = KVStoreSync::read( - &*eph_store, - primary_namespace, - secondary_namespace, + let primary_read_ng = KVStoreSync::read( + &*primary_store, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, + ); + let ephemeral_read_ng = KVStoreSync::read( + &*ephemeral_store, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_KEY, + ); + + let primary_read_cm = KVStoreSync::read( + &*primary_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + let ephemeral_read_cm = KVStoreSync::read( + &*ephemeral_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + + assert!(primary_read_ng.is_err()); + assert_eq!(ephemeral_read_ng.unwrap(), data); + + assert!(ephemeral_read_cm.is_err()); + assert_eq!(primary_read_cm.unwrap(), data); + } + + #[test] + fn lazy_backup() { + let base_dir = random_storage_path(); + let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + let logger = Arc::new(Logger::new_fs_writer(log_path, Level::Trace).unwrap()); + let runtime = Arc::new(Runtime::new(Arc::clone(&logger)).unwrap()); + + let _cleanup = CleanupDir(base_dir.clone()); + + let primary_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("primary")))); + let mut tier = setup_tier_store(Arc::clone(&primary_store), logger, runtime); + + let backup_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("backup")))); + tier.set_backup_store(Arc::clone(&backup_store)); + + let data = vec![42u8; 32]; + + KVStoreSync::write( + &tier, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + data.clone(), ) .unwrap(); - let sc_read = KVStoreSync::read( - &*eph_store, - primary_namespace, - secondary_namespace, - SCORER_PERSISTENCE_KEY, + // Immediate read from backup should fail + let backup_read_cm = KVStoreSync::read( + &*backup_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + assert!(backup_read_cm.is_err()); + + // Primary not blocked by backup hence immediate read should succeed + let primary_read_cm = KVStoreSync::read( + &*primary_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + assert_eq!(primary_read_cm.unwrap(), data); + + // Delayed read from backup should succeed + thread::sleep(Duration::from_millis(50)); + let backup_read_cm = KVStoreSync::read( + &*backup_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + assert_eq!(backup_read_cm.unwrap(), data); + } + + #[test] + fn backup_overflow_doesnt_fail_writes() { + let base_dir = random_storage_path(); + let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + let logger = Arc::new(Logger::new_fs_writer(log_path.clone(), Level::Trace).unwrap()); + let runtime = Arc::new(Runtime::new(Arc::clone(&logger)).unwrap()); + + let _cleanup = CleanupDir(base_dir.clone()); + + let primary_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("primary")))); + let mut tier = + setup_tier_store(Arc::clone(&primary_store), Arc::clone(&logger), Arc::clone(&runtime)); + + let backup_store: Arc = + Arc::new(DynStoreWrapper(DelayedStore::new(100, runtime))); + tier.set_backup_store(Arc::clone(&backup_store)); + + let data = vec![42u8; 32]; + + let key = CHANNEL_MANAGER_PERSISTENCE_KEY; + for i in 0..=10 { + let result = KVStoreSync::write( + &tier, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + &format!("{}_{}", key, i), + data.clone(), + ); + + assert!(result.is_ok(), "Write {} should succeed", i); + } + + // Check logs for backup queue overflow message + let log_contents = std::fs::read_to_string(&log_path).unwrap(); + assert!( + log_contents.contains("Backup queue is full"), + "Logs should contain backup queue overflow message" + ); + } + + #[test] + fn lazy_removal() { + let base_dir = random_storage_path(); + let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + let logger = Arc::new(Logger::new_fs_writer(log_path.clone(), Level::Trace).unwrap()); + let runtime = Arc::new(Runtime::new(Arc::clone(&logger)).unwrap()); + + let _cleanup = CleanupDir(base_dir.clone()); + + let primary_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("primary")))); + let mut tier = + setup_tier_store(Arc::clone(&primary_store), Arc::clone(&logger), Arc::clone(&runtime)); + + let backup_store: Arc = + Arc::new(DynStoreWrapper(DelayedStore::new(100, runtime))); + tier.set_backup_store(Arc::clone(&backup_store)); + + let data = vec![42u8; 32]; + + let key = CHANNEL_MANAGER_PERSISTENCE_KEY; + let write_result = KVStoreSync::write( + &tier, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + key, + data.clone(), + ); + assert!(write_result.is_ok(), "Write should succeed"); + + thread::sleep(Duration::from_millis(10)); + + assert_eq!( + KVStoreSync::read( + &*backup_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + key, + ) + .unwrap(), + data + ); + + KVStoreSync::remove( + &tier, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + key, + true, ) .unwrap(); - assert_eq!(ng_read, data); - assert!(KVStoreSync::read( - &*tier.primary, - primary_namespace, - secondary_namespace, - NETWORK_GRAPH_PERSISTENCE_KEY - ) - .is_err()); + thread::sleep(Duration::from_millis(10)); - assert_eq!(sc_read, data); - assert!(KVStoreSync::read( - &*tier.primary, - primary_namespace, - secondary_namespace, - SCORER_PERSISTENCE_KEY - ) - .is_err()); + let res = KVStoreSync::read( + &*backup_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + key, + ); + + assert!(res.is_err()); } } diff --git a/src/lib.rs b/src/lib.rs index 75ab773de..55b96a5d7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -134,6 +134,8 @@ use event::{EventHandler, EventQueue}; use fee_estimator::{ConfirmationTarget, FeeEstimator, OnchainFeeEstimator}; #[cfg(feature = "uniffi")] use ffi::*; +#[cfg(feature = "uniffi")] +pub use ffi::{FfiDynStore, ForeignDynStoreTrait, IOError}; use gossip::GossipSource; use graph::NetworkGraph; pub use io::tier_store::RetryConfig; @@ -165,7 +167,8 @@ use types::{ KeysManager, OnionMessenger, PaymentStore, PeerManager, Router, Scorer, Sweeper, Wallet, }; pub use types::{ - ChannelDetails, CustomTlvRecord, DynStore, PeerDetails, SyncAndAsyncKVStore, UserChannelId, + ChannelDetails, CustomTlvRecord, DynStore, DynStoreWrapper, PeerDetails, SyncAndAsyncKVStore, + UserChannelId, }; pub use { bip39, bitcoin, lightning, lightning_invoice, lightning_liquidity, lightning_types, tokio, diff --git a/src/types.rs b/src/types.rs index 7c80609b3..a7ea89e48 100644 --- a/src/types.rs +++ b/src/types.rs @@ -136,7 +136,8 @@ impl<'a> KVStoreSync for dyn DynStoreTrait + 'a { /// Type alias for any store that implements DynStoreTrait. pub type DynStore = dyn DynStoreTrait; -pub(crate) struct DynStoreWrapper(pub(crate) T); +/// A wrapper that allows using any [`SyncAndAsyncKVStore`] implementor as a trait object. +pub struct DynStoreWrapper(pub T); impl DynStoreTrait for DynStoreWrapper { fn read_async( diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 41d22c690..39acdbba7 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -17,6 +17,8 @@ use std::path::PathBuf; use std::sync::{Arc, RwLock}; use std::time::Duration; +#[cfg(feature = "uniffi")] +use async_trait::async_trait; use bitcoin::hashes::hex::FromHex; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; @@ -31,8 +33,11 @@ use ldk_node::entropy::{generate_entropy_mnemonic, NodeEntropy}; use ldk_node::io::sqlite_store::SqliteStore; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; use ldk_node::{ - Builder, CustomTlvRecord, Event, LightningBalance, Node, NodeError, PendingSweepBalance, + Builder, CustomTlvRecord, DynStore, DynStoreWrapper, Event, LightningBalance, Node, NodeError, + PendingSweepBalance, RetryConfig, }; +#[cfg(feature = "uniffi")] +use ldk_node::{FfiDynStore, ForeignDynStoreTrait, IOError}; use lightning::io; use lightning::ln::msgs::SocketAddress; use lightning::routing::gossip::NodeAlias; @@ -299,10 +304,20 @@ pub(crate) enum TestChainSource<'a> { BitcoindRestSync(&'a BitcoinD), } -#[derive(Clone, Copy)] +#[cfg(feature = "uniffi")] +type TestDynStore = Arc; +#[cfg(not(feature = "uniffi"))] +type TestDynStore = Arc; + +#[derive(Clone)] pub(crate) enum TestStoreType { TestSyncStore, Sqlite, + TierStore { + primary: TestDynStore, + backup: Option, + ephemeral: Option, + }, } impl Default for TestStoreType { @@ -353,6 +368,93 @@ macro_rules! setup_builder { pub(crate) use setup_builder; +#[cfg(feature = "uniffi")] +struct TestForeignDynStoreAdapter(Arc); + +#[cfg(feature = "uniffi")] +#[async_trait] +impl ForeignDynStoreTrait for TestForeignDynStoreAdapter { + async fn read_async( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> Result, IOError> { + self.0.read_async(&primary_namespace, &secondary_namespace, &key).await.map_err(Into::into) + } + + async fn write_async( + &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> Result<(), IOError> { + self.0 + .write_async(&primary_namespace, &secondary_namespace, &key, buf) + .await + .map_err(Into::into) + } + + async fn remove_async( + &self, primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + ) -> Result<(), IOError> { + self.0 + .remove_async(&primary_namespace, &secondary_namespace, &key, lazy) + .await + .map_err(Into::into) + } + + async fn list_async( + &self, primary_namespace: String, secondary_namespace: String, + ) -> Result, IOError> { + self.0.list_async(&primary_namespace, &secondary_namespace).await.map_err(Into::into) + } + + fn read( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> Result, IOError> { + self.0.read(&primary_namespace, &secondary_namespace, &key).map_err(Into::into) + } + + fn write( + &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> Result<(), IOError> { + self.0.write(&primary_namespace, &secondary_namespace, &key, buf).map_err(Into::into) + } + + fn remove( + &self, primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + ) -> Result<(), IOError> { + self.0.remove(&primary_namespace, &secondary_namespace, &key, lazy).map_err(Into::into) + } + + fn list( + &self, primary_namespace: String, secondary_namespace: String, + ) -> Result, IOError> { + self.0.list(&primary_namespace, &secondary_namespace).map_err(Into::into) + } +} + +pub(crate) fn create_tier_stores(base_path: PathBuf) -> (TestDynStore, TestDynStore, TestDynStore) { + let primary = Arc::new(DynStoreWrapper( + SqliteStore::new( + base_path.join("primary"), + Some("primary_db".to_string()), + Some("primary_kv".to_string()), + ) + .unwrap(), + )); + let backup = Arc::new(DynStoreWrapper(FilesystemStore::new(base_path.join("backup")))); + let ephemeral = Arc::new(DynStoreWrapper(TestStore::new(false))); + + #[cfg(feature = "uniffi")] + { + ( + Arc::new(FfiDynStore::from_store(Arc::new(TestForeignDynStoreAdapter(primary)))), + Arc::new(FfiDynStore::from_store(Arc::new(TestForeignDynStoreAdapter(backup)))), + Arc::new(FfiDynStore::from_store(Arc::new(TestForeignDynStoreAdapter(ephemeral)))), + ) + } + #[cfg(not(feature = "uniffi"))] + { + (primary, backup, ephemeral) + } +} + pub(crate) fn setup_two_nodes( chain_source: &TestChainSource, allow_0conf: bool, anchor_channels: bool, anchors_trusted_no_reserve: bool, @@ -363,21 +465,22 @@ pub(crate) fn setup_two_nodes( anchor_channels, anchors_trusted_no_reserve, TestStoreType::TestSyncStore, + TestStoreType::TestSyncStore, ) } pub(crate) fn setup_two_nodes_with_store( chain_source: &TestChainSource, allow_0conf: bool, anchor_channels: bool, - anchors_trusted_no_reserve: bool, store_type: TestStoreType, + anchors_trusted_no_reserve: bool, store_type_a: TestStoreType, store_type_b: TestStoreType, ) -> (TestNode, TestNode) { println!("== Node A =="); let mut config_a = random_config(anchor_channels); - config_a.store_type = store_type; + config_a.store_type = store_type_a; let node_a = setup_node(chain_source, config_a); println!("\n== Node B =="); let mut config_b = random_config(anchor_channels); - config_b.store_type = store_type; + config_b.store_type = store_type_b; if allow_0conf { config_b.node_config.trusted_peers_0conf.push(node_a.node_id()); } @@ -460,6 +563,16 @@ pub(crate) fn setup_node(chain_source: &TestChainSource, config: TestConfig) -> builder.build_with_store(config.node_entropy.into(), kv_store).unwrap() }, TestStoreType::Sqlite => builder.build(config.node_entropy.into()).unwrap(), + TestStoreType::TierStore { primary, backup, ephemeral } => { + if let Some(backup) = backup { + builder.set_tier_store_backup(backup); + } + if let Some(ephemeral) = ephemeral { + builder.set_tier_store_ephemeral(ephemeral); + } + builder.set_tier_store_retry_config(RetryConfig::default()); + builder.build_with_tier_store(config.node_entropy.into(), primary).unwrap() + }, }; if config.recovery_mode { diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 9ea05aa1e..ddda59ea9 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -36,10 +36,17 @@ use ldk_node::{Builder, Event, NodeError}; use lightning::ln::channelmanager::PaymentId; use lightning::routing::gossip::{NodeAlias, NodeId}; use lightning::routing::router::RouteParametersConfig; +use lightning::util::persist::{ + KVStoreSync, CHANNEL_MANAGER_PERSISTENCE_KEY, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, +}; use lightning_invoice::{Bolt11InvoiceDescription, Description}; use lightning_types::payment::{PaymentHash, PaymentPreimage}; use log::LevelFilter; +use crate::common::{create_tier_stores, random_storage_path, setup_two_nodes_with_store}; + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn channel_full_cycle() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); @@ -49,6 +56,85 @@ async fn channel_full_cycle() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_tier_store() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = random_chain_source(&bitcoind, &electrsd); + let (primary_a, backup_a, ephemeral_a) = create_tier_stores(random_storage_path()); + let (primary_b, backup_b, ephemeral_b) = create_tier_stores(random_storage_path()); + + let (node_a, node_b) = setup_two_nodes_with_store( + &chain_source, + false, + true, + false, + TestStoreType::TierStore { + primary: Arc::clone(&primary_a), + backup: Some(Arc::clone(&backup_a)), + ephemeral: Some(Arc::clone(&ephemeral_a)), + }, + TestStoreType::TierStore { + primary: Arc::clone(&primary_b), + backup: Some(Arc::clone(&backup_b)), + ephemeral: Some(Arc::clone(&ephemeral_b)), + }, + ); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false) + .await; + + // Verify Primary store contains channel manager data + let primary_channel_manager = KVStoreSync::read( + primary_a.as_ref(), + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + assert!(primary_channel_manager.is_ok(), "Primary should have channel manager data"); + + // Verify Primary store contains payment info + let primary_payments = KVStoreSync::list(primary_a.as_ref(), "payments", ""); + assert!(primary_payments.is_ok(), "Primary should have payment data"); + assert!(!primary_payments.unwrap().is_empty(), "Primary should have payment entries"); + + // Verify Backup store synced critical data + let backup_channel_manager = KVStoreSync::read( + backup_a.as_ref(), + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + assert!(backup_channel_manager.is_ok(), "Backup should have synced channel manager"); + + // Verify backup is not empty + let backup_all_keys = KVStoreSync::list(backup_a.as_ref(), "", "").unwrap(); + assert!(!backup_all_keys.is_empty(), "Backup store should not be empty"); + + // Verify Ephemeral does NOT have channel manager + let ephemeral_channel_manager = KVStoreSync::read( + ephemeral_a.as_ref(), + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + assert!(ephemeral_channel_manager.is_err(), "Ephemeral should NOT have channel manager"); + + // Verify Ephemeral does NOT have payment info + let ephemeral_payments = KVStoreSync::list(ephemeral_a.as_ref(), "payments", ""); + assert!( + ephemeral_payments.is_err() || ephemeral_payments.unwrap().is_empty(), + "Ephemeral should NOT have payment data" + ); + + //Verify Ephemeral does have network graph + let ephemeral_network_graph = KVStoreSync::read( + ephemeral_a.as_ref(), + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_KEY, + ); + assert!(ephemeral_network_graph.is_ok(), "Ephemeral should have network graph"); +} + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn channel_full_cycle_force_close() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); From 7cbe154353eac8b4be9b5f20ff8367d43af98221 Mon Sep 17 00:00:00 2001 From: Enigbe Date: Mon, 2 Feb 2026 18:17:21 +0100 Subject: [PATCH 65/75] fixup! Introduce and configure node with tiered KVStore remove features relating to retry configuration and reading/listing from backup store as a fallback strategy --- bindings/ldk_node.udl | 7 - bindings/python/src/ldk_node/test_ldk_node.py | 8 +- src/builder.rs | 44 +- src/io/tier_store.rs | 400 ++++-------------- src/io/utils.rs | 14 - src/lib.rs | 1 - tests/common/mod.rs | 3 +- 7 files changed, 77 insertions(+), 400 deletions(-) diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index b479924b6..7ef7dc06b 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -79,12 +79,6 @@ enum WordCount { "Words24", }; -dictionary RetryConfig { - u16 initial_retry_delay_ms; - u16 maximum_delay_ms; - f32 backoff_multiplier; -}; - enum LogLevel { "Gossip", "Trace", @@ -180,7 +174,6 @@ interface Builder { void set_announcement_addresses(sequence announcement_addresses); [Throws=BuildError] void set_node_alias(string node_alias); - void set_tier_store_retry_config(RetryConfig retry_config); void set_tier_store_backup(FfiDynStore backup_store); void set_tier_store_ephemeral(FfiDynStore ephemeral_store); [Throws=BuildError] diff --git a/bindings/python/src/ldk_node/test_ldk_node.py b/bindings/python/src/ldk_node/test_ldk_node.py index a343cb8ce..3979ee784 100644 --- a/bindings/python/src/ldk_node/test_ldk_node.py +++ b/bindings/python/src/ldk_node/test_ldk_node.py @@ -168,12 +168,7 @@ def setup_node_with_tier_store(tmp_dir, esplora_endpoint, listening_addresses): primary = TestKvStore("primary") backup = TestKvStore("backup") ephemeral = TestKvStore("ephemeral") - retry_config = RetryConfig( - initial_retry_delay_ms=10, - maximum_delay_ms=100, - backoff_multiplier=2.0 - ) - + # Set event loop for async Python callbacks from Rust # (https://mozilla.github.io/uniffi-rs/0.27/futures.html#python-uniffi_set_event_loop) loop = asyncio.new_event_loop() @@ -191,7 +186,6 @@ def run_loop(): builder.set_chain_source_esplora(esplora_endpoint, None) builder.set_network(DEFAULT_TEST_NETWORK) builder.set_listening_addresses(listening_addresses) - builder.set_tier_store_retry_config(retry_config) builder.set_tier_store_backup(FfiDynStore.from_store(backup)) builder.set_tier_store_ephemeral(FfiDynStore.from_store(ephemeral)) diff --git a/src/builder.rs b/src/builder.rs index d4f4252b3..9ec60ab26 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -56,7 +56,7 @@ use crate::fee_estimator::OnchainFeeEstimator; use crate::ffi::FfiDynStore; use crate::gossip::GossipSource; use crate::io::sqlite_store::SqliteStore; -use crate::io::tier_store::{RetryConfig, TierStore}; +use crate::io::tier_store::TierStore; use crate::io::utils::{ read_event_queue, read_external_pathfinding_scores_from_cache, read_network_graph, read_node_metrics, read_output_sweeper, read_payments, read_peer_info, read_pending_payments, @@ -157,7 +157,6 @@ impl std::fmt::Debug for LogWriterConfig { struct TierStoreConfig { ephemeral: Option>, backup: Option>, - retry: Option, } impl std::fmt::Debug for TierStoreConfig { @@ -165,7 +164,6 @@ impl std::fmt::Debug for TierStoreConfig { f.debug_struct("TierStoreConfig") .field("ephemeral", &self.ephemeral.as_ref().map(|_| "Arc")) .field("backup", &self.backup.as_ref().map(|_| "Arc")) - .field("retry", &self.retry) .finish() } } @@ -580,21 +578,6 @@ impl NodeBuilder { self } - /// Configures retry behavior for transient errors when accessing the primary store. - /// - /// When building with [`build_with_tier_store`], controls the exponential backoff parameters - /// used when retrying failed operations on the primary store due to transient errors - /// (network issues, timeouts, etc.). - /// - /// If not set, default retry parameters are used. See [`RetryConfig`] for details. - /// - /// [`build_with_tier_store`]: Self::build_with_tier_store - pub fn set_tier_store_retry_config(&mut self, config: RetryConfig) -> &mut Self { - let tier_store_config = self.tier_store_config.get_or_insert(TierStoreConfig::default()); - tier_store_config.retry = Some(config); - self - } - /// Configures the backup store for local disaster recovery. /// /// When building with [`build_with_tier_store`], this store receives asynchronous copies @@ -757,16 +740,14 @@ impl NodeBuilder { /// /// ## Configuration /// - /// Use the setter methods to configure optional stores and retry behavior: + /// Use the setter methods to configure optional stores: /// - [`set_tier_store_ephemeral`] - Set local store for network graph and scorer /// - [`set_tier_store_backup`] - Set local backup store for disaster recovery - /// - [`set_tier_store_retry_config`] - Configure retry delays and backoff for transient errors /// /// ## Example /// /// ```ignore /// # use ldk_node::{Builder, Config}; - /// # use ldk_node::io::tier_store::RetryConfig; /// # use std::sync::Arc; /// let config = Config::default(); /// let mut builder = NodeBuilder::from_config(config); @@ -774,12 +755,10 @@ impl NodeBuilder { /// let primary = Arc::new(VssStore::new(...)); /// let ephemeral = Arc::new(FilesystemStore::new(...)); /// let backup = Arc::new(SqliteStore::new(...)); - /// let retry_config = RetryConfig::default(); /// /// builder /// .set_tier_store_ephemeral(ephemeral) - /// .set_tier_store_backup(backup) - /// .set_tier_store_retry_config(retry_config); + /// .set_tier_store_backup(backup); /// /// let node = builder.build_with_tier_store(primary)?; /// # Ok::<(), ldk_node::BuildError>(()) @@ -787,7 +766,6 @@ impl NodeBuilder { /// /// [`set_tier_store_ephemeral`]: Self::set_tier_store_ephemeral /// [`set_tier_store_backup`]: Self::set_tier_store_backup - /// [`set_tier_store_retry_config`]: Self::set_tier_store_retry_config #[cfg(not(feature = "uniffi"))] pub fn build_with_tier_store( &self, node_entropy: NodeEntropy, primary_store: Arc, @@ -809,10 +787,9 @@ impl NodeBuilder { }; let ts_config = self.tier_store_config.as_ref(); - let retry_config = ts_config.and_then(|c| c.retry).unwrap_or_default(); let mut tier_store = - TierStore::new(primary_store, Arc::clone(&runtime), Arc::clone(&logger), retry_config); + TierStore::new(primary_store, Arc::clone(&runtime), Arc::clone(&logger)); if let Some(config) = ts_config { config.ephemeral.as_ref().map(|s| tier_store.set_ephemeral_store(Arc::clone(s))); @@ -1097,19 +1074,6 @@ impl ArcedNodeBuilder { self.inner.write().unwrap().set_wallet_recovery_mode(); } - /// Configures retry behavior for transient errors when accessing the primary store. - /// - /// When building with [`build_with_tier_store`], controls the exponential backoff parameters - /// used when retrying failed operations on the primary store due to transient errors - /// (network issues, timeouts, etc.). - /// - /// If not set, default retry parameters are used. See [`RetryConfig`] for details. - /// - /// [`build_with_tier_store`]: Self::build_with_tier_store - pub fn set_tier_store_retry_config(&self, config: RetryConfig) { - self.inner.write().unwrap().set_tier_store_retry_config(config); - } - /// Configures the backup store for local disaster recovery. /// /// When building with [`build_with_tier_store`], this store receives asynchronous copies diff --git a/src/io/tier_store.rs b/src/io/tier_store.rs index e91c9892c..7816b5a1d 100644 --- a/src/io/tier_store.rs +++ b/src/io/tier_store.rs @@ -5,7 +5,7 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::io::utils::{check_namespace_key_validity, is_possibly_transient}; +use crate::io::utils::check_namespace_key_validity; use crate::logger::{LdkLogger, Logger}; use crate::runtime::Runtime; use crate::types::DynStore; @@ -24,7 +24,6 @@ use std::collections::HashMap; use std::future::Future; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; -use std::time::Duration; // todo(enigbe): Uncertain about appropriate queue size and if this would need // configuring. @@ -33,33 +32,6 @@ const BACKUP_QUEUE_CAPACITY: usize = 100; #[cfg(test)] const BACKUP_QUEUE_CAPACITY: usize = 5; -const DEFAULT_INITIAL_RETRY_DELAY_MS: u16 = 10; -const DEFAULT_MAXIMUM_RETRY_DELAY_MS: u16 = 500; -const DEFAULT_BACKOFF_MULTIPLIER: f32 = 1.5; - -/// Configuration for exponential backoff retry behavior. -#[derive(Debug, Copy, Clone)] -pub struct RetryConfig { - /// The initial delay before the first retry attempt, in milliseconds. - pub initial_retry_delay_ms: u16, - /// The maximum delay between retry attempts, in milliseconds. - pub maximum_delay_ms: u16, - /// The multiplier applied to the delay after each retry attempt. - /// - /// For example, a value of `2.0` doubles the delay after each failed retry. - pub backoff_multiplier: f32, -} - -impl Default for RetryConfig { - fn default() -> Self { - Self { - initial_retry_delay_ms: DEFAULT_INITIAL_RETRY_DELAY_MS, - maximum_delay_ms: DEFAULT_MAXIMUM_RETRY_DELAY_MS, - backoff_multiplier: DEFAULT_BACKOFF_MULTIPLIER, - } - } -} - /// A 3-tiered [`KVStoreSync`] implementation that manages data across /// three distinct storage locations, i.e. primary (preferably remote) /// store for all critical data, optional ephemeral (local) store for @@ -74,11 +46,8 @@ pub(crate) struct TierStore { } impl TierStore { - pub fn new( - primary_store: Arc, runtime: Arc, logger: Arc, - retry_config: RetryConfig, - ) -> Self { - let inner = Arc::new(TierStoreInner::new(primary_store, Arc::clone(&logger), retry_config)); + pub fn new(primary_store: Arc, runtime: Arc, logger: Arc) -> Self { + let inner = Arc::new(TierStoreInner::new(primary_store, Arc::clone(&logger))); Self { inner, next_version: AtomicU64::new(1), runtime, logger } } @@ -353,7 +322,6 @@ pub struct TierStoreInner { backup_store: Option>, backup_sender: Option>, logger: Arc, - retry_config: RetryConfig, /// Per-key locks for the available data tiers, i.e. (primary, backup, ephemeral), /// that ensures we don't have concurrent writes to the same namespace/key. locks: Mutex>>>, @@ -361,16 +329,13 @@ pub struct TierStoreInner { impl TierStoreInner { /// Creates a tier store with the primary (remote) data store. - pub fn new( - primary_store: Arc, logger: Arc, retry_config: RetryConfig, - ) -> Self { + pub fn new(primary_store: Arc, logger: Arc) -> Self { Self { primary_store, ephemeral_store: None, backup_store: None, backup_sender: None, logger, - retry_config, locks: Mutex::new(HashMap::new()), } } @@ -483,272 +448,75 @@ impl TierStoreInner { Ok(()) } - /// Reads data from the backup store (if configured). - fn read_from_backup( + /// Reads from the primary data store. + async fn read_primary( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result> { - if let Some(backup) = self.backup_store.as_ref() { - KVStoreSync::read(backup.as_ref(), primary_namespace, secondary_namespace, key) - } else { - Err(io::Error::new(io::ErrorKind::NotFound, "Backup store not previously configured.")) + match KVStore::read( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + ) + .await + { + Ok(data) => { + log_info!( + self.logger, + "Read succeeded for key: {}/{}/{}", + primary_namespace, + secondary_namespace, + key + ); + Ok(data) + }, + Err(e) => { + log_error!( + self.logger, + "Failed to read from primary store for key {}/{}/{}: {}.", + primary_namespace, + secondary_namespace, + key, + e + ); + Err(e) + }, } } - /// Lists keys from the given primary and secondary namespace pair from the backup - /// store (if configured). - fn list_from_backup( + /// Lists keys from the primary data store. + async fn list_primary( &self, primary_namespace: &str, secondary_namespace: &str, ) -> io::Result> { - if let Some(backup) = &self.backup_store { - KVStoreSync::list(backup.as_ref(), primary_namespace, secondary_namespace) - } else { - Err(io::Error::new(io::ErrorKind::NotFound, "Backup store not previously configured.")) - } - } - - /// Reads from the primary data store with basic retry logic, or falls back to backup. - /// - /// For transient errors, retries up to a maximum delay time with exponential - /// backoff. For any error (transient after exhaustion or non-transient), falls - /// to the backup store (if configured). - async fn read_primary_or_backup( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> io::Result> { - let mut delay = Duration::from_millis(self.retry_config.initial_retry_delay_ms as u64); - let maximum_delay = Duration::from_millis(self.retry_config.maximum_delay_ms as u64); - let mut tries = 0_u16; - - loop { - match KVStore::read( - self.primary_store.as_ref(), - primary_namespace, - secondary_namespace, - key, - ) + match KVStore::list(self.primary_store.as_ref(), primary_namespace, secondary_namespace) .await - { - Ok(data) => { - log_info!( - self.logger, - "Read succeeded after {} retries for key: {}/{}/{}", - tries, - primary_namespace, - secondary_namespace, - key - ); - return Ok(data); - }, - - Err(e) if is_possibly_transient(&e) && (delay < maximum_delay) => { - log_warn!( - self.logger, - "Possible transient error reading key {}/{}/{} (attempt {}): {}. Retrying...", - primary_namespace, - secondary_namespace, - key, - tries + 1, - e - ); - tries += 1; - tokio::time::sleep(delay).await; - delay = std::cmp::min( - delay.mul_f32(self.retry_config.backoff_multiplier), - maximum_delay, - ); - }, - - Err(e) => { - log_error!(self.logger, "Failed to read from primary store for key {}/{}/{}: {}. Falling back to backup.", - primary_namespace, secondary_namespace, key, e); - return self.read_from_backup(primary_namespace, secondary_namespace, key); - }, - } - } - } - - /// Lists keys from the primary data store with retry logic, or falls back to backup. - /// - /// For transient errors, retries up to a maximum delay time with exponential - /// backoff. For any error (transient after exhaustion or non-transient), falls - /// back to the backup store (if configured) for disaster recovery. - async fn list_primary_or_backup( - &self, primary_namespace: &str, secondary_namespace: &str, - ) -> io::Result> { - let mut delay = Duration::from_millis(self.retry_config.initial_retry_delay_ms as u64); - let maximum_delay = Duration::from_millis(self.retry_config.maximum_delay_ms as u64); - let mut tries = 0_u16; - - loop { - match KVStore::list(self.primary_store.as_ref(), primary_namespace, secondary_namespace) - .await - { - Ok(keys) => { - log_info!( - self.logger, - "List succeeded after {} retries for namespace: {}/{}", - tries, - primary_namespace, - secondary_namespace - ); - return Ok(keys); - }, - Err(e) if is_possibly_transient(&e) && (delay < maximum_delay) => { - log_warn!( - self.logger, - "Possible transient error listing namespace {}/{} (attempt {}): {}. Retrying...", - primary_namespace, - secondary_namespace, - tries + 1, - e - ); - tries += 1; - tokio::time::sleep(delay).await; - delay = std::cmp::min( - delay.mul_f32(self.retry_config.backoff_multiplier), - maximum_delay, - ); - }, - Err(e) => { - log_error!(self.logger, "Failed to list from primary store for namespace {}/{}: {}. Falling back to backup.", - primary_namespace, secondary_namespace, e); - return self.list_from_backup(primary_namespace, secondary_namespace); - }, - } - } - } - - /// Writes data to the primary store with retry logic. - /// - /// For transient errors, retries up to a maximum delay time with exponential - /// backoff. - async fn retry_write_with_backoff( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> io::Result<()> { - let mut delay = Duration::from_millis(self.retry_config.initial_retry_delay_ms as u64); - let maximum_delay = Duration::from_millis(self.retry_config.maximum_delay_ms as u64); - let mut tries = 0_u16; - - loop { - match KVStore::write( - self.primary_store.as_ref(), - primary_namespace, - secondary_namespace, - key, - buf.clone(), - ) - .await - { - Ok(res) => { - log_info!( - self.logger, - "Write succeeded after {} retries for key: {}/{}/{}", - tries, - primary_namespace, - secondary_namespace, - key - ); - return Ok(res); - }, - Err(e) if is_possibly_transient(&e) && (delay < maximum_delay) => { - log_warn!( - self.logger, - "Possible transient error writing key {}/{}/{} (attempt {}): {}. Retrying...", - primary_namespace, - secondary_namespace, - key, - tries + 1, - e - ); - tries += 1; - tokio::time::sleep(delay).await; - delay = std::cmp::min( - delay.mul_f32(self.retry_config.backoff_multiplier), - maximum_delay, - ); - }, - Err(e) => { - log_error!( - self.logger, - "Failed to write to primary store for key {}/{}/{}: {}", - primary_namespace, - secondary_namespace, - key, - e - ); - return Err(e); - }, - } - } - } - - /// Removes data from the primary store with retry logic. - /// - /// For transient errors, retries up to a maximum delay time with exponential - /// backoff. - async fn retry_remove_with_backoff( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> io::Result<()> { - let mut delay = Duration::from_millis(self.retry_config.initial_retry_delay_ms as u64); - let maximum_delay = Duration::from_millis(self.retry_config.maximum_delay_ms as u64); - let mut tries = 0_u16; - - loop { - match KVStore::remove( - self.primary_store.as_ref(), - primary_namespace, - secondary_namespace, - key, - lazy, - ) - .await - { - Ok(res) => { - log_info!( - self.logger, - "Successfully removed data from primary store after {} retries for key: {}/{}/{}", - tries, - primary_namespace, - secondary_namespace, - key - ); - return Ok(res); - }, - Err(e) if is_possibly_transient(&e) && (delay < maximum_delay) => { - log_warn!( - self.logger, - "Possible transient error removing key {}/{}/{} from primary store (attempt {}): {}. Retrying...", - primary_namespace, - secondary_namespace, - key, - tries + 1, - e - ); - tries += 1; - tokio::time::sleep(delay).await; - delay = std::cmp::min( - delay.mul_f32(self.retry_config.backoff_multiplier), - maximum_delay, - ); - }, - Err(e) => { - log_error!( - self.logger, - "Failed to remove data from primary store for key {}/{}/{}: {}", - primary_namespace, - secondary_namespace, - key, - e - ); - return Err(e); - }, - } + { + Ok(keys) => { + log_info!( + self.logger, + "List succeeded for namespace: {}/{}", + primary_namespace, + secondary_namespace + ); + return Ok(keys); + }, + Err(e) => { + log_error!( + self.logger, + "Failed to list from primary store for namespace {}/{}: {}.", + primary_namespace, + secondary_namespace, + e + ); + Err(e) + }, } } async fn primary_write_then_schedule_backup( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> io::Result<()> { - let primary_write_res = match KVStore::write( + match KVStore::write( self.primary_store.as_ref(), primary_namespace, secondary_namespace, @@ -757,23 +525,7 @@ impl TierStoreInner { ) .await { - Ok(res) => Ok(res), - Err(e) if is_possibly_transient(&e) => { - self.retry_write_with_backoff( - primary_namespace, - secondary_namespace, - key, - buf.clone(), - ) - .await - }, - Err(e) => Err(e), - }; - - match primary_write_res { - Ok(res) => { - // We enqueue for backup only what we successfully write to primary. In doing - // this we avoid data inconsistencies across stores. + Ok(()) => { if let Err(e) = self.enqueue_backup_write(primary_namespace, secondary_namespace, key, buf) { @@ -788,7 +540,7 @@ impl TierStoreInner { ) } - Ok(res) + Ok(()) }, Err(e) => { log_debug!( @@ -806,7 +558,7 @@ impl TierStoreInner { async fn primary_remove_then_schedule_backup( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> io::Result<()> { - let primary_remove_res = match KVStore::remove( + match KVStore::remove( self.primary_store.as_ref(), primary_namespace, secondary_namespace, @@ -815,16 +567,7 @@ impl TierStoreInner { ) .await { - Ok(res) => Ok(res), - Err(e) if is_possibly_transient(&e) => { - self.retry_remove_with_backoff(primary_namespace, secondary_namespace, key, lazy) - .await - }, - Err(e) => Err(e), - }; - - match primary_remove_res { - Ok(res) => { + Ok(()) => { if let Err(e) = self.enqueue_backup_remove(primary_namespace, secondary_namespace, key, lazy) { @@ -839,7 +582,7 @@ impl TierStoreInner { ) } - Ok(res) + Ok(()) }, Err(e) => { log_debug!( @@ -884,11 +627,10 @@ impl TierStoreInner { ) } else { log_debug!(self.logger, "Ephemeral store not configured. Reading non-critical data from primary or backup stores."); - self.read_primary_or_backup(&primary_namespace, &secondary_namespace, &key) - .await + self.read_primary(&primary_namespace, &secondary_namespace, &key).await } }, - _ => self.read_primary_or_backup(&primary_namespace, &secondary_namespace, &key).await, + _ => self.read_primary(&primary_namespace, &secondary_namespace, &key).await, } } @@ -1045,10 +787,10 @@ impl TierStoreInner { self.logger, "Ephemeral store not configured. Listing from primary and backup stores." ); - self.list_primary_or_backup(&primary_namespace, &secondary_namespace).await + self.list_primary(&primary_namespace, &secondary_namespace).await } }, - _ => self.list_primary_or_backup(&primary_namespace, &secondary_namespace).await, + _ => self.list_primary(&primary_namespace, &secondary_namespace).await, } } @@ -1132,6 +874,7 @@ mod tests { use std::path::PathBuf; use std::sync::Arc; use std::thread; + use std::time::Duration; use lightning::util::logger::Level; use lightning::util::persist::{ @@ -1143,7 +886,7 @@ mod tests { use crate::io::test_utils::{ do_read_write_remove_list_persist, random_storage_path, DelayedStore, }; - use crate::io::tier_store::{RetryConfig, TierStore}; + use crate::io::tier_store::TierStore; use crate::logger::Logger; use crate::runtime::Runtime; #[cfg(not(feature = "uniffi"))] @@ -1164,8 +907,7 @@ mod tests { fn setup_tier_store( primary_store: Arc, logger: Arc, runtime: Arc, ) -> TierStore { - let retry_config = RetryConfig::default(); - TierStore::new(primary_store, runtime, logger, retry_config) + TierStore::new(primary_store, runtime, logger) } #[test] diff --git a/src/io/utils.rs b/src/io/utils.rs index 1b0456c45..d2f70377b 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -20,7 +20,6 @@ use bdk_chain::tx_graph::ChangeSet as BdkTxGraphChangeSet; use bdk_chain::ConfirmationBlockTime; use bdk_wallet::ChangeSet as BdkWalletChangeSet; use bitcoin::Network; -use lightning::io::ErrorKind; use lightning::ln::msgs::DecodeError; use lightning::routing::gossip::NetworkGraph; use lightning::routing::scoring::{ @@ -705,19 +704,6 @@ where Ok(res) } -/// Checks if an error kind is possibly transient. -pub(crate) fn is_possibly_transient(error: &lightning::io::Error) -> bool { - match error.kind() { - ErrorKind::ConnectionRefused - | ErrorKind::ConnectionAborted - | ErrorKind::ConnectionReset - | ErrorKind::TimedOut - | ErrorKind::Interrupted - | ErrorKind::NotConnected => true, - _ => false, - } -} - #[cfg(test)] mod tests { use super::read_or_generate_seed_file; diff --git a/src/lib.rs b/src/lib.rs index 55b96a5d7..081c8b389 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -138,7 +138,6 @@ use ffi::*; pub use ffi::{FfiDynStore, ForeignDynStoreTrait, IOError}; use gossip::GossipSource; use graph::NetworkGraph; -pub use io::tier_store::RetryConfig; use io::utils::write_node_metrics; use lightning::chain::BestBlock; use lightning::events::bump_transaction::{Input, Wallet as LdkWallet}; diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 39acdbba7..301c136d2 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -34,7 +34,7 @@ use ldk_node::io::sqlite_store::SqliteStore; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; use ldk_node::{ Builder, CustomTlvRecord, DynStore, DynStoreWrapper, Event, LightningBalance, Node, NodeError, - PendingSweepBalance, RetryConfig, + PendingSweepBalance, }; #[cfg(feature = "uniffi")] use ldk_node::{FfiDynStore, ForeignDynStoreTrait, IOError}; @@ -570,7 +570,6 @@ pub(crate) fn setup_node(chain_source: &TestChainSource, config: TestConfig) -> if let Some(ephemeral) = ephemeral { builder.set_tier_store_ephemeral(ephemeral); } - builder.set_tier_store_retry_config(RetryConfig::default()); builder.build_with_tier_store(config.node_entropy.into(), primary).unwrap() }, }; From 30632b79d812a27a70ec6d7a73055da8c63c8a0b Mon Sep 17 00:00:00 2001 From: Enigbe Date: Mon, 2 Feb 2026 21:44:39 +0100 Subject: [PATCH 66/75] fixup! Introduce and configure node with tiered KVStore remove the execution of locked writes from tier store & remove inappropriate kvstoresync usage --- src/io/tier_store.rs | 270 +++++++------------------------------------ 1 file changed, 44 insertions(+), 226 deletions(-) diff --git a/src/io/tier_store.rs b/src/io/tier_store.rs index 7816b5a1d..8ad524c98 100644 --- a/src/io/tier_store.rs +++ b/src/io/tier_store.rs @@ -20,10 +20,8 @@ use lightning::{log_debug, log_error, log_info, log_warn}; use tokio::sync::mpsc::{self, error::TrySendError}; -use std::collections::HashMap; use std::future::Future; -use std::sync::atomic::{AtomicU64, Ordering}; -use std::sync::{Arc, Mutex}; +use std::sync::Arc; // todo(enigbe): Uncertain about appropriate queue size and if this would need // configuring. @@ -40,7 +38,6 @@ const BACKUP_QUEUE_CAPACITY: usize = 5; /// scenarios. pub(crate) struct TierStore { inner: Arc, - next_version: AtomicU64, runtime: Arc, logger: Arc, } @@ -49,7 +46,7 @@ impl TierStore { pub fn new(primary_store: Arc, runtime: Arc, logger: Arc) -> Self { let inner = Arc::new(TierStoreInner::new(primary_store, Arc::clone(&logger))); - Self { inner, next_version: AtomicU64::new(1), runtime, logger } + Self { inner, runtime, logger } } /// Configures the local backup store for disaster recovery. @@ -89,7 +86,7 @@ impl TierStore { mut receiver: mpsc::Receiver, backup_store: Arc, logger: Arc, ) { while let Some(op) = receiver.recv().await { - match Self::apply_backup_operation(&op, &backup_store) { + match Self::apply_backup_operation(&op, &backup_store).await { Ok(_) => { log_trace!( logger, @@ -113,25 +110,21 @@ impl TierStore { } } - fn apply_backup_operation(op: &BackupOp, store: &Arc) -> io::Result<()> { + async fn apply_backup_operation(op: &BackupOp, store: &Arc) -> io::Result<()> { match op { BackupOp::Write { primary_namespace, secondary_namespace, key, data } => { - KVStoreSync::write( + KVStore::write( store.as_ref(), primary_namespace, secondary_namespace, key, data.clone(), ) + .await }, BackupOp::Remove { primary_namespace, secondary_namespace, key, lazy } => { - KVStoreSync::remove( - store.as_ref(), - primary_namespace, - secondary_namespace, - key, - *lazy, - ) + KVStore::remove(store.as_ref(), primary_namespace, secondary_namespace, key, *lazy) + .await }, } } @@ -146,31 +139,6 @@ impl TierStore { inner.ephemeral_store = Some(ephemeral); } - - fn build_locking_key( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> String { - if primary_namespace.is_empty() { - key.to_owned() - } else { - format!("{}#{}#{}", primary_namespace, secondary_namespace, key) - } - } - - fn get_new_version_and_lock_ref( - &self, locking_key: String, - ) -> (Arc>, u64) { - let version = self.next_version.fetch_add(1, Ordering::Relaxed); - if version == u64::MAX { - panic!("TierStore version counter overflowed"); - } - - // Get a reference to the inner lock. We do this early so that the arc can double as an in-flight counter for - // cleaning up unused locks. - let inner_lock_ref = self.inner.get_inner_lock_ref(locking_key); - - (inner_lock_ref, version) - } } impl KVStore for TierStore { @@ -191,26 +159,11 @@ impl KVStore for TierStore { ) -> impl Future> + 'static + Send { let inner = Arc::clone(&self.inner); - let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); - let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); - let primary_namespace = primary_namespace.to_string(); let secondary_namespace = secondary_namespace.to_string(); let key = key.to_string(); - async move { - inner - .write_internal( - inner_lock_ref, - locking_key, - version, - primary_namespace, - secondary_namespace, - key, - buf, - ) - .await - } + async move { inner.write_internal(primary_namespace, secondary_namespace, key, buf).await } } fn remove( @@ -218,26 +171,11 @@ impl KVStore for TierStore { ) -> impl Future> + 'static + Send { let inner = Arc::clone(&self.inner); - let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); - let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); - let primary_namespace = primary_namespace.to_string(); let secondary_namespace = secondary_namespace.to_string(); let key = key.to_string(); - async move { - inner - .remove_internal( - inner_lock_ref, - locking_key, - version, - primary_namespace, - secondary_namespace, - key, - lazy, - ) - .await - } + async move { inner.remove_internal(primary_namespace, secondary_namespace, key, lazy).await } } fn list( @@ -266,17 +204,11 @@ impl KVStoreSync for TierStore { fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> io::Result<()> { - let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); - let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); - let primary_namespace = primary_namespace.to_string(); let secondary_namespace = secondary_namespace.to_string(); let key = key.to_string(); self.runtime.block_on(self.inner.write_internal( - inner_lock_ref, - locking_key, - version, primary_namespace, secondary_namespace, key, @@ -287,17 +219,11 @@ impl KVStoreSync for TierStore { fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> io::Result<()> { - let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); - let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); - let primary_namespace = primary_namespace.to_string(); let secondary_namespace = secondary_namespace.to_string(); let key = key.to_string(); self.runtime.block_on(self.inner.remove_internal( - inner_lock_ref, - locking_key, - version, primary_namespace, secondary_namespace, key, @@ -322,9 +248,6 @@ pub struct TierStoreInner { backup_store: Option>, backup_sender: Option>, logger: Arc, - /// Per-key locks for the available data tiers, i.e. (primary, backup, ephemeral), - /// that ensures we don't have concurrent writes to the same namespace/key. - locks: Mutex>>>, } impl TierStoreInner { @@ -336,7 +259,6 @@ impl TierStoreInner { backup_store: None, backup_sender: None, logger, - locks: Mutex::new(HashMap::new()), } } @@ -619,12 +541,13 @@ impl TierStoreInner { // 1. Are there situations where local transient errors may warrant a retry? // 2. Can we reliably identify/detect these transient errors? // 3. Should we fall back to the primary or backup stores in the event of any error? - KVStoreSync::read( + KVStore::read( eph_store.as_ref(), &primary_namespace, &secondary_namespace, &key, ) + .await } else { log_debug!(self.logger, "Ephemeral store not configured. Reading non-critical data from primary or backup stores."); self.read_primary(&primary_namespace, &secondary_namespace, &key).await @@ -635,57 +558,23 @@ impl TierStoreInner { } async fn write_internal( - &self, inner_lock_ref: Arc>, locking_key: String, version: u64, - primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, ) -> io::Result<()> { - check_namespace_key_validity( - primary_namespace.as_str(), - secondary_namespace.as_str(), - Some(key.as_str()), - "write", - )?; - match (primary_namespace.as_str(), secondary_namespace.as_str(), key.as_str()) { (NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, _, NETWORK_GRAPH_PERSISTENCE_KEY) | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _, SCORER_PERSISTENCE_KEY) => { if let Some(eph_store) = &self.ephemeral_store { - self.execute_locked_write( - inner_lock_ref, - locking_key, - version, - async move || { - KVStoreSync::write( - eph_store.as_ref(), - primary_namespace.as_str(), - secondary_namespace.as_str(), - key.as_str(), - buf, - ) - }, + KVStore::write( + eph_store.as_ref(), + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + buf, ) .await } else { log_debug!(self.logger, "Ephemeral store not configured. Writing non-critical data to primary and backup stores."); - self.execute_locked_write( - inner_lock_ref, - locking_key, - version, - async move || { - self.primary_write_then_schedule_backup( - primary_namespace.as_str(), - secondary_namespace.as_str(), - key.as_str(), - buf, - ) - .await - }, - ) - .await - } - }, - _ => { - self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { self.primary_write_then_schedule_backup( primary_namespace.as_str(), secondary_namespace.as_str(), @@ -693,64 +582,38 @@ impl TierStoreInner { buf, ) .await - }) + } + }, + _ => { + self.primary_write_then_schedule_backup( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + buf, + ) .await }, } } async fn remove_internal( - &self, inner_lock_ref: Arc>, locking_key: String, version: u64, - primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + &self, primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, ) -> io::Result<()> { - check_namespace_key_validity( - primary_namespace.as_str(), - secondary_namespace.as_str(), - Some(key.as_str()), - "remove", - )?; - match (primary_namespace.as_str(), secondary_namespace.as_str(), key.as_str()) { (NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, _, NETWORK_GRAPH_PERSISTENCE_KEY) | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _, SCORER_PERSISTENCE_KEY) => { if let Some(eph_store) = &self.ephemeral_store { - self.execute_locked_write( - inner_lock_ref, - locking_key, - version, - async move || { - KVStoreSync::remove( - eph_store.as_ref(), - primary_namespace.as_str(), - secondary_namespace.as_str(), - key.as_str(), - lazy, - ) - }, + KVStore::remove( + eph_store.as_ref(), + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + lazy, ) .await } else { log_debug!(self.logger, "Ephemeral store not configured. Removing non-critical data from primary and backup stores."); - self.execute_locked_write( - inner_lock_ref, - locking_key, - version, - async move || { - self.primary_remove_then_schedule_backup( - primary_namespace.as_str(), - secondary_namespace.as_str(), - key.as_str(), - lazy, - ) - .await - }, - ) - .await - } - }, - _ => { - self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { self.primary_remove_then_schedule_backup( primary_namespace.as_str(), secondary_namespace.as_str(), @@ -758,7 +621,15 @@ impl TierStoreInner { lazy, ) .await - }) + } + }, + _ => { + self.primary_remove_then_schedule_backup( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + lazy, + ) .await }, } @@ -767,13 +638,6 @@ impl TierStoreInner { async fn list_internal( &self, primary_namespace: String, secondary_namespace: String, ) -> io::Result> { - check_namespace_key_validity( - primary_namespace.as_str(), - secondary_namespace.as_str(), - None, - "list", - )?; - match (primary_namespace.as_str(), secondary_namespace.as_str()) { ( NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, @@ -793,52 +657,6 @@ impl TierStoreInner { _ => self.list_primary(&primary_namespace, &secondary_namespace).await, } } - - fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { - let mut outer_lock = self.locks.lock().unwrap(); - Arc::clone(&outer_lock.entry(locking_key).or_default()) - } - - async fn execute_locked_write< - F: Future>, - FN: FnOnce() -> F, - >( - &self, inner_lock_ref: Arc>, locking_key: String, version: u64, - callback: FN, - ) -> Result<(), lightning::io::Error> { - let res = { - let mut last_written_version = inner_lock_ref.lock().await; - - // Check if we already have a newer version written. This ensures eventual consistency. - let is_stale_version = version <= *last_written_version; - - if is_stale_version { - Ok(()) - } else { - callback().await.map(|_| { - *last_written_version = version; - }) - } - }; - - self.clean_locks(&inner_lock_ref, locking_key); - res - } - - fn clean_locks(&self, inner_lock_ref: &Arc>, locking_key: String) { - // If there no arcs in use elsewhere, this means that there are no in-flight writes. We can remove the map entry - // to prevent leaking memory. The two arcs that are expected are the one in the map and the one held here in - // inner_lock_ref. The outer lock is obtained first, to avoid a new arc being cloned after we've already - // counted. - let mut outer_lock = self.locks.lock().unwrap(); - - let strong_count = Arc::strong_count(&inner_lock_ref); - debug_assert!(strong_count >= 2, "Unexpected TierStore strong count"); - - if strong_count == 2 { - outer_lock.remove(&locking_key); - } - } } enum BackupOp { From e67f51a6dfece7194aa23e6da0ab56875e8f3442 Mon Sep 17 00:00:00 2001 From: Enigbe Date: Wed, 4 Feb 2026 00:01:15 +0100 Subject: [PATCH 67/75] Execute locked write for foreign dynstore Additionally: - Replace TestStore with FilesystemStore for ephemeral store (TestStore's async writes block until manually completed, causing test hangs) - Use build_with_store_internal to share Node's runtime with TierStore instead of creating separate runtimes --- src/builder.rs | 16 +- src/ffi/types.rs | 350 ++++++++++++++++++++++++++++++++++++++++---- tests/common/mod.rs | 2 +- 3 files changed, 340 insertions(+), 28 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index 9ec60ab26..eceb2fb6b 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -796,7 +796,21 @@ impl NodeBuilder { config.backup.as_ref().map(|s| tier_store.set_backup_store(Arc::clone(s))); } - self.build_with_store(node_entropy, tier_store) + let seed_bytes = node_entropy.to_seed_bytes(); + let config = Arc::new(self.config.clone()); + + build_with_store_internal( + config, + self.chain_data_source_config.as_ref(), + self.gossip_source_config.as_ref(), + self.liquidity_source_config.as_ref(), + self.pathfinding_scores_sync_config.as_ref(), + self.async_payments_role, + seed_bytes, + runtime, + logger, + Arc::new(DynStoreWrapper(tier_store)), + ) } /// Builds a [`Node`] instance according to the options previously configured. diff --git a/src/ffi/types.rs b/src/ffi/types.rs index f4afc19d4..3a197146c 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -10,11 +10,13 @@ // // Make sure to add any re-exported items that need to be used in uniffi below. +use std::collections::HashMap; use std::convert::TryInto; use std::future::Future; use std::ops::Deref; use std::str::FromStr; -use std::sync::Arc; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::{Arc, Mutex}; use std::time::Duration; use async_trait::async_trait; @@ -54,6 +56,7 @@ pub use crate::config::{ pub use crate::entropy::{generate_entropy_mnemonic, EntropyError, NodeEntropy, WordCount}; use crate::error::Error; pub use crate::graph::{ChannelInfo, ChannelUpdateInfo, NodeAnnouncementInfo, NodeInfo}; +use crate::io::utils::check_namespace_key_validity; pub use crate::liquidity::{LSPS1OrderStatus, LSPS2ServiceConfig}; pub use crate::logger::{LogLevel, LogRecord, LogWriter}; pub use crate::payment::store::{ @@ -190,12 +193,48 @@ pub trait ForeignDynStoreTrait: Send + Sync { #[derive(Clone)] pub struct FfiDynStore { - pub(crate) inner: Arc, + inner: Arc, + next_write_version: Arc, } impl FfiDynStore { pub fn from_store(store: Arc) -> Self { - Self { inner: store } + let inner = Arc::new(FfiDynStoreInner::new(store)); + Self { inner, next_write_version: Arc::new(AtomicU64::new(1)) } + } + + fn build_locking_key( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> String { + if primary_namespace.is_empty() { + key.to_owned() + } else { + format!("{}#{}#{}", primary_namespace, secondary_namespace, key) + } + } + + fn get_new_version_and_async_lock_ref( + &self, locking_key: String, + ) -> (Arc>, u64) { + let version = self.next_write_version.fetch_add(1, Ordering::Relaxed); + if version == u64::MAX { + panic!("FfiDynStore version counter overflowed"); + } + + let inner_lock_ref = self.inner.get_async_inner_lock_ref(locking_key); + + (inner_lock_ref, version) + } + + fn get_new_version_and_sync_lock_ref(&self, locking_key: String) -> (Arc>, u64) { + let version = self.next_write_version.fetch_add(1, Ordering::Relaxed); + if version == u64::MAX { + panic!("FfiDynStore version counter overflowed"); + } + + let inner_lock_ref = self.inner.get_sync_inner_lock_ref(locking_key); + + (inner_lock_ref, version) } } @@ -208,7 +247,9 @@ impl KVStore for FfiDynStore { let secondary_namespace = secondary_namespace.to_string(); let key = key.to_string(); async move { - this.read_async(primary_namespace, secondary_namespace, key).await.map_err(|e| e.into()) + this.read_internal_async(primary_namespace, secondary_namespace, key) + .await + .map_err(|e| e.into()) } } @@ -216,13 +257,26 @@ impl KVStore for FfiDynStore { &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> impl Future> + 'static + Send { let this = Arc::clone(&self.inner); + let primary_namespace = primary_namespace.to_string(); let secondary_namespace = secondary_namespace.to_string(); let key = key.to_string(); + + let locking_key = self.build_locking_key(&primary_namespace, &secondary_namespace, &key); + let (inner_lock_ref, version) = + self.get_new_version_and_async_lock_ref(locking_key.clone()); async move { - this.write_async(primary_namespace, secondary_namespace, key, buf) - .await - .map_err(|e| e.into()) + this.write_internal_async( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + buf, + ) + .await + .map_err(|e| e.into()) } } @@ -230,13 +284,27 @@ impl KVStore for FfiDynStore { &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> impl Future> + 'static + Send { let this = Arc::clone(&self.inner); + let primary_namespace = primary_namespace.to_string(); let secondary_namespace = secondary_namespace.to_string(); let key = key.to_string(); + + let locking_key = self.build_locking_key(&primary_namespace, &secondary_namespace, &key); + let (inner_lock_ref, version) = + self.get_new_version_and_async_lock_ref(locking_key.clone()); + async move { - this.remove_async(primary_namespace, secondary_namespace, key, lazy) - .await - .map_err(|e| e.into()) + this.remove_internal_async( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + lazy, + ) + .await + .map_err(|e| e.into()) } } @@ -244,10 +312,14 @@ impl KVStore for FfiDynStore { &self, primary_namespace: &str, secondary_namespace: &str, ) -> impl Future, lightning::io::Error>> + 'static + Send { let this = Arc::clone(&self.inner); + let primary_namespace = primary_namespace.to_string(); let secondary_namespace = secondary_namespace.to_string(); + async move { - this.list_async(primary_namespace, secondary_namespace).await.map_err(|e| e.into()) + this.list_internal_async(primary_namespace, secondary_namespace) + .await + .map_err(|e| e.into()) } } } @@ -256,50 +328,276 @@ impl KVStoreSync for FfiDynStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> Result, lightning::io::Error> { - ForeignDynStoreTrait::read( - self.inner.as_ref(), + self.inner.read_internal( primary_namespace.to_string(), secondary_namespace.to_string(), key.to_string(), ) - .map_err(|e| e.into()) } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> Result<(), lightning::io::Error> { - ForeignDynStoreTrait::write( - self.inner.as_ref(), + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_sync_lock_ref(locking_key.clone()); + + self.inner.write_internal( + inner_lock_ref, + locking_key, + version, primary_namespace.to_string(), secondary_namespace.to_string(), key.to_string(), buf, ) - .map_err(|e| e.into()) } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> Result<(), lightning::io::Error> { - ForeignDynStoreTrait::remove( - self.inner.as_ref(), + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_sync_lock_ref(locking_key.clone()); + + self.inner.remove_internal( + inner_lock_ref, + locking_key, + version, primary_namespace.to_string(), secondary_namespace.to_string(), key.to_string(), lazy, ) - .map_err(|e| e.into()) } fn list( &self, primary_namespace: &str, secondary_namespace: &str, ) -> Result, lightning::io::Error> { - ForeignDynStoreTrait::list( - self.inner.as_ref(), - primary_namespace.to_string(), - secondary_namespace.to_string(), - ) - .map_err(|e| e.into()) + self.inner.list_internal(primary_namespace.to_string(), secondary_namespace.to_string()) + } +} + +struct FfiDynStoreInner { + ffi_store: Arc, + async_write_version_locks: Mutex>>>, + sync_write_version_locks: Mutex>>>, +} + +impl FfiDynStoreInner { + fn new(ffi_store: Arc) -> Self { + Self { + ffi_store, + async_write_version_locks: Mutex::new(HashMap::new()), + sync_write_version_locks: Mutex::new(HashMap::new()), + } + } + + fn get_async_inner_lock_ref(&self, locking_key: String) -> Arc> { + let mut outer_lock = self.async_write_version_locks.lock().unwrap(); + Arc::clone(&outer_lock.entry(locking_key).or_default()) + } + + fn get_sync_inner_lock_ref(&self, locking_key: String) -> Arc> { + let mut outer_lock = self.sync_write_version_locks.lock().unwrap(); + Arc::clone(&outer_lock.entry(locking_key).or_default()) + } + + async fn read_internal_async( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> bitcoin::io::Result> { + check_namespace_key_validity(&primary_namespace, &secondary_namespace, Some(&key), "read")?; + self.ffi_store + .read_async(primary_namespace, secondary_namespace, key) + .await + .map_err(|e| e.into()) + } + + fn read_internal( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> bitcoin::io::Result> { + check_namespace_key_validity(&primary_namespace, &secondary_namespace, Some(&key), "read")?; + self.ffi_store.read(primary_namespace, secondary_namespace, key).map_err(|e| e.into()) + } + + async fn write_internal_async( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> bitcoin::io::Result<()> { + check_namespace_key_validity( + &primary_namespace, + &secondary_namespace, + Some(&key), + "write", + )?; + + let store = Arc::clone(&self.ffi_store); + + self.execute_locked_write_async(inner_lock_ref, locking_key, version, async move || { + store + .write_async(primary_namespace, secondary_namespace, key, buf) + .await + .map_err(|e| >::into(e))?; + + Ok(()) + }) + .await + } + + fn write_internal( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> bitcoin::io::Result<()> { + check_namespace_key_validity( + &primary_namespace, + &secondary_namespace, + Some(&key), + "write", + )?; + + self.execute_locked_write(inner_lock_ref, locking_key, version, || { + self.ffi_store + .write(primary_namespace, secondary_namespace, key, buf) + .map_err(|e| >::into(e))?; + + Ok(()) + }) + } + + async fn remove_internal_async( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + ) -> bitcoin::io::Result<()> { + check_namespace_key_validity( + &primary_namespace, + &secondary_namespace, + Some(&key), + "remove", + )?; + + let store = Arc::clone(&self.ffi_store); + + self.execute_locked_write_async(inner_lock_ref, locking_key, version, async move || { + store + .remove_async(primary_namespace, secondary_namespace, key, lazy) + .await + .map_err(|e| >::into(e))?; + + Ok(()) + }) + .await + } + + fn remove_internal( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + ) -> bitcoin::io::Result<()> { + check_namespace_key_validity( + &primary_namespace, + &secondary_namespace, + Some(&key), + "remove", + )?; + + self.execute_locked_write(inner_lock_ref, locking_key, version, || { + self.ffi_store + .remove(primary_namespace, secondary_namespace, key, lazy) + .map_err(|e| >::into(e))?; + + Ok(()) + }) + } + + async fn list_internal_async( + &self, primary_namespace: String, secondary_namespace: String, + ) -> bitcoin::io::Result> { + check_namespace_key_validity(&primary_namespace, &secondary_namespace, None, "list")?; + self.ffi_store + .list_async(primary_namespace, secondary_namespace) + .await + .map_err(|e| e.into()) + } + + fn list_internal( + &self, primary_namespace: String, secondary_namespace: String, + ) -> bitcoin::io::Result> { + check_namespace_key_validity(&primary_namespace, &secondary_namespace, None, "list")?; + self.ffi_store.list(primary_namespace, secondary_namespace).map_err(|e| e.into()) + } + + async fn execute_locked_write_async< + F: Future>, + FN: FnOnce() -> F, + >( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + callback: FN, + ) -> Result<(), bitcoin::io::Error> { + let res = { + let mut last_written_version = inner_lock_ref.lock().await; + + // Check if we already have a newer version written/removed. This is used in async contexts to realize eventual + // consistency. + let is_stale_version = version <= *last_written_version; + + // If the version is not stale, we execute the callback. Otherwise we can and must skip writing. + if is_stale_version { + Ok(()) + } else { + callback().await.map(|_| { + *last_written_version = version; + }) + } + }; + + self.clean_locks_async(&inner_lock_ref, locking_key); + + res + } + + fn clean_locks_async( + &self, inner_lock_ref: &Arc>, locking_key: String, + ) { + // If there no arcs in use elsewhere, this means that there are no in-flight writes. We can remove the map entry + // to prevent leaking memory. The two arcs that are expected are the one in the map and the one held here in + // inner_lock_ref. The outer lock is obtained first, to avoid a new arc being cloned after we've already + // counted. + let mut outer_lock = self.async_write_version_locks.lock().unwrap(); + + let strong_count = Arc::strong_count(&inner_lock_ref); + debug_assert!(strong_count >= 2, "Unexpected FfiDynStore strong count"); + + if strong_count == 2 { + outer_lock.remove(&locking_key); + } + } + + fn execute_locked_write bitcoin::io::Result<()>>( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, callback: F, + ) -> bitcoin::io::Result<()> { + let res = { + let mut last_written_version = inner_lock_ref.lock().unwrap(); + + let is_stale_version = version <= *last_written_version; + + if is_stale_version { + Ok(()) + } else { + callback().map(|_| { + *last_written_version = version; + }) + } + }; + + self.clean_locks(&inner_lock_ref, locking_key); + + res + } + + fn clean_locks(&self, inner_lock_ref: &Arc>, locking_key: String) { + let mut outer_lock = self.sync_write_version_locks.lock().unwrap(); + let strong_count = Arc::strong_count(inner_lock_ref); + debug_assert!(strong_count >= 2, "Unexpected FfiDynStore sync strong count"); + if strong_count == 2 { + outer_lock.remove(&locking_key); + } } } diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 301c136d2..03d077adc 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -439,7 +439,7 @@ pub(crate) fn create_tier_stores(base_path: PathBuf) -> (TestDynStore, TestDynSt .unwrap(), )); let backup = Arc::new(DynStoreWrapper(FilesystemStore::new(base_path.join("backup")))); - let ephemeral = Arc::new(DynStoreWrapper(TestStore::new(false))); + let ephemeral = Arc::new(DynStoreWrapper(FilesystemStore::new(base_path.join("ephemeral")))); #[cfg(feature = "uniffi")] { From e7e6f73d313d9b6f871e267ebb1fc07b44b4ae7a Mon Sep 17 00:00:00 2001 From: Enigbe Date: Wed, 4 Feb 2026 11:31:05 +0100 Subject: [PATCH 68/75] fixup! Add comprehensive testing for TierStore delegate to kvstoresync for sync calls --- tests/common/mod.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 03d077adc..f31aaa719 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -407,25 +407,28 @@ impl ForeignDynStoreTrait for TestForeignDynStoreAdapter { fn read( &self, primary_namespace: String, secondary_namespace: String, key: String, ) -> Result, IOError> { - self.0.read(&primary_namespace, &secondary_namespace, &key).map_err(Into::into) + KVStoreSync::read(&*self.0, &primary_namespace, &secondary_namespace, &key) + .map_err(Into::into) } fn write( &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, ) -> Result<(), IOError> { - self.0.write(&primary_namespace, &secondary_namespace, &key, buf).map_err(Into::into) + KVStoreSync::write(&*self.0, &primary_namespace, &secondary_namespace, &key, buf) + .map_err(Into::into) } fn remove( &self, primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, ) -> Result<(), IOError> { - self.0.remove(&primary_namespace, &secondary_namespace, &key, lazy).map_err(Into::into) + KVStoreSync::remove(&*self.0, &primary_namespace, &secondary_namespace, &key, lazy) + .map_err(Into::into) } fn list( &self, primary_namespace: String, secondary_namespace: String, ) -> Result, IOError> { - self.0.list(&primary_namespace, &secondary_namespace).map_err(Into::into) + KVStoreSync::list(&*self.0, &primary_namespace, &secondary_namespace).map_err(Into::into) } } From ef88365af33242bb4e1af041a9cb343fe0475433 Mon Sep 17 00:00:00 2001 From: Enigbe Date: Mon, 16 Feb 2026 17:32:30 +0100 Subject: [PATCH 69/75] Add documentation for ForeignDynstoreTrait --- src/ffi/types.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/ffi/types.rs b/src/ffi/types.rs index 3a197146c..fb6532bf9 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -162,6 +162,9 @@ impl std::fmt::Display for IOError { } } +/// FFI-safe version of [`DynStoreTrait`]. +/// +/// [`DynStoreTrait`]: crate::types::DynStoreTrait #[async_trait] pub trait ForeignDynStoreTrait: Send + Sync { async fn read_async( From 0545bf964e69a9bcbcb9af192edf463ed20f62f5 Mon Sep 17 00:00:00 2001 From: Enigbe Date: Mon, 16 Feb 2026 18:08:38 +0100 Subject: [PATCH 70/75] fixup! Execute locked write for foreign dynstore Consolidate sync and async write version locks into one --- src/ffi/types.rs | 134 +++++++++++++++++------------------------------ 1 file changed, 48 insertions(+), 86 deletions(-) diff --git a/src/ffi/types.rs b/src/ffi/types.rs index fb6532bf9..79656f166 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -216,7 +216,7 @@ impl FfiDynStore { } } - fn get_new_version_and_async_lock_ref( + fn get_new_version_and_lock_ref( &self, locking_key: String, ) -> (Arc>, u64) { let version = self.next_write_version.fetch_add(1, Ordering::Relaxed); @@ -224,18 +224,7 @@ impl FfiDynStore { panic!("FfiDynStore version counter overflowed"); } - let inner_lock_ref = self.inner.get_async_inner_lock_ref(locking_key); - - (inner_lock_ref, version) - } - - fn get_new_version_and_sync_lock_ref(&self, locking_key: String) -> (Arc>, u64) { - let version = self.next_write_version.fetch_add(1, Ordering::Relaxed); - if version == u64::MAX { - panic!("FfiDynStore version counter overflowed"); - } - - let inner_lock_ref = self.inner.get_sync_inner_lock_ref(locking_key); + let inner_lock_ref = self.inner.get_inner_lock_ref(locking_key); (inner_lock_ref, version) } @@ -266,8 +255,7 @@ impl KVStore for FfiDynStore { let key = key.to_string(); let locking_key = self.build_locking_key(&primary_namespace, &secondary_namespace, &key); - let (inner_lock_ref, version) = - self.get_new_version_and_async_lock_ref(locking_key.clone()); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); async move { this.write_internal_async( inner_lock_ref, @@ -293,8 +281,7 @@ impl KVStore for FfiDynStore { let key = key.to_string(); let locking_key = self.build_locking_key(&primary_namespace, &secondary_namespace, &key); - let (inner_lock_ref, version) = - self.get_new_version_and_async_lock_ref(locking_key.clone()); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); async move { this.remove_internal_async( @@ -342,7 +329,7 @@ impl KVStoreSync for FfiDynStore { &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> Result<(), lightning::io::Error> { let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); - let (inner_lock_ref, version) = self.get_new_version_and_sync_lock_ref(locking_key.clone()); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); self.inner.write_internal( inner_lock_ref, @@ -359,7 +346,7 @@ impl KVStoreSync for FfiDynStore { &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> Result<(), lightning::io::Error> { let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); - let (inner_lock_ref, version) = self.get_new_version_and_sync_lock_ref(locking_key.clone()); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); self.inner.remove_internal( inner_lock_ref, @@ -381,26 +368,16 @@ impl KVStoreSync for FfiDynStore { struct FfiDynStoreInner { ffi_store: Arc, - async_write_version_locks: Mutex>>>, - sync_write_version_locks: Mutex>>>, + write_version_locks: Mutex>>>, } impl FfiDynStoreInner { fn new(ffi_store: Arc) -> Self { - Self { - ffi_store, - async_write_version_locks: Mutex::new(HashMap::new()), - sync_write_version_locks: Mutex::new(HashMap::new()), - } + Self { ffi_store, write_version_locks: Mutex::new(HashMap::new()) } } - fn get_async_inner_lock_ref(&self, locking_key: String) -> Arc> { - let mut outer_lock = self.async_write_version_locks.lock().unwrap(); - Arc::clone(&outer_lock.entry(locking_key).or_default()) - } - - fn get_sync_inner_lock_ref(&self, locking_key: String) -> Arc> { - let mut outer_lock = self.sync_write_version_locks.lock().unwrap(); + fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { + let mut outer_lock = self.write_version_locks.lock().unwrap(); Arc::clone(&outer_lock.entry(locking_key).or_default()) } @@ -434,7 +411,7 @@ impl FfiDynStoreInner { let store = Arc::clone(&self.ffi_store); - self.execute_locked_write_async(inner_lock_ref, locking_key, version, async move || { + self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { store .write_async(primary_namespace, secondary_namespace, key, buf) .await @@ -446,7 +423,7 @@ impl FfiDynStoreInner { } fn write_internal( - &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, ) -> bitcoin::io::Result<()> { check_namespace_key_validity( @@ -456,13 +433,22 @@ impl FfiDynStoreInner { "write", )?; - self.execute_locked_write(inner_lock_ref, locking_key, version, || { - self.ffi_store - .write(primary_namespace, secondary_namespace, key, buf) - .map_err(|e| >::into(e))?; + let res = { + let mut last_written_version = inner_lock_ref.blocking_lock(); + if version <= *last_written_version { + Ok(()) + } else { + self.ffi_store + .write(primary_namespace, secondary_namespace, key, buf) + .map_err(|e| e.into()) + .map(|_| { + *last_written_version = version; + }) + } + }; - Ok(()) - }) + self.clean_locks(&inner_lock_ref, locking_key); + res } async fn remove_internal_async( @@ -478,7 +464,7 @@ impl FfiDynStoreInner { let store = Arc::clone(&self.ffi_store); - self.execute_locked_write_async(inner_lock_ref, locking_key, version, async move || { + self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { store .remove_async(primary_namespace, secondary_namespace, key, lazy) .await @@ -490,7 +476,7 @@ impl FfiDynStoreInner { } fn remove_internal( - &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, ) -> bitcoin::io::Result<()> { check_namespace_key_validity( @@ -500,13 +486,22 @@ impl FfiDynStoreInner { "remove", )?; - self.execute_locked_write(inner_lock_ref, locking_key, version, || { - self.ffi_store - .remove(primary_namespace, secondary_namespace, key, lazy) - .map_err(|e| >::into(e))?; + let res = { + let mut last_written_version = inner_lock_ref.blocking_lock(); + if version <= *last_written_version { + Ok(()) + } else { + self.ffi_store + .remove(primary_namespace, secondary_namespace, key, lazy) + .map_err(|e| >::into(e)) + .map(|_| { + *last_written_version = version; + }) + } + }; - Ok(()) - }) + self.clean_locks(&inner_lock_ref, locking_key); + res } async fn list_internal_async( @@ -526,7 +521,7 @@ impl FfiDynStoreInner { self.ffi_store.list(primary_namespace, secondary_namespace).map_err(|e| e.into()) } - async fn execute_locked_write_async< + async fn execute_locked_write< F: Future>, FN: FnOnce() -> F, >( @@ -550,19 +545,17 @@ impl FfiDynStoreInner { } }; - self.clean_locks_async(&inner_lock_ref, locking_key); + self.clean_locks(&inner_lock_ref, locking_key); res } - fn clean_locks_async( - &self, inner_lock_ref: &Arc>, locking_key: String, - ) { + fn clean_locks(&self, inner_lock_ref: &Arc>, locking_key: String) { // If there no arcs in use elsewhere, this means that there are no in-flight writes. We can remove the map entry // to prevent leaking memory. The two arcs that are expected are the one in the map and the one held here in // inner_lock_ref. The outer lock is obtained first, to avoid a new arc being cloned after we've already // counted. - let mut outer_lock = self.async_write_version_locks.lock().unwrap(); + let mut outer_lock = self.write_version_locks.lock().unwrap(); let strong_count = Arc::strong_count(&inner_lock_ref); debug_assert!(strong_count >= 2, "Unexpected FfiDynStore strong count"); @@ -571,37 +564,6 @@ impl FfiDynStoreInner { outer_lock.remove(&locking_key); } } - - fn execute_locked_write bitcoin::io::Result<()>>( - &self, inner_lock_ref: Arc>, locking_key: String, version: u64, callback: F, - ) -> bitcoin::io::Result<()> { - let res = { - let mut last_written_version = inner_lock_ref.lock().unwrap(); - - let is_stale_version = version <= *last_written_version; - - if is_stale_version { - Ok(()) - } else { - callback().map(|_| { - *last_written_version = version; - }) - } - }; - - self.clean_locks(&inner_lock_ref, locking_key); - - res - } - - fn clean_locks(&self, inner_lock_ref: &Arc>, locking_key: String) { - let mut outer_lock = self.sync_write_version_locks.lock().unwrap(); - let strong_count = Arc::strong_count(inner_lock_ref); - debug_assert!(strong_count >= 2, "Unexpected FfiDynStore sync strong count"); - if strong_count == 2 { - outer_lock.remove(&locking_key); - } - } } impl UniffiCustomTypeConverter for PublicKey { From 97736fd770ea4c438c6141119331293ab2afdca4 Mon Sep 17 00:00:00 2001 From: Enigbe Date: Mon, 16 Feb 2026 18:28:36 +0100 Subject: [PATCH 71/75] Impl From FfiDynStore for DynStoreWrapper --- src/builder.rs | 12 ++++++------ src/ffi/types.rs | 8 +++++++- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/src/builder.rs b/src/builder.rs index eceb2fb6b..2909371d4 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1098,8 +1098,8 @@ impl ArcedNodeBuilder { /// /// [`build_with_tier_store`]: Self::build_with_tier_store pub fn set_tier_store_backup(&self, backup_store: Arc) { - let wrapper = DynStoreWrapper((*backup_store).clone()); - let store: Arc = Arc::new(wrapper); + let store: Arc = + Arc::>::new((*backup_store).clone().into()); self.inner.write().unwrap().set_tier_store_backup(store); } @@ -1113,8 +1113,8 @@ impl ArcedNodeBuilder { /// /// [`build_with_tier_store`]: Self::build_with_tier_store pub fn set_tier_store_ephemeral(&self, ephemeral_store: Arc) { - let wrapper = DynStoreWrapper((*ephemeral_store).clone()); - let store: Arc = Arc::new(wrapper); + let store: Arc = + Arc::>::new((*ephemeral_store).clone().into()); self.inner.write().unwrap().set_tier_store_ephemeral(store); } @@ -1225,8 +1225,8 @@ impl ArcedNodeBuilder { pub fn build_with_tier_store( &self, node_entropy: Arc, primary_store: Arc, ) -> Result, BuildError> { - let wrapper = DynStoreWrapper((*primary_store).clone()); - let store: Arc = Arc::new(wrapper); + let store: Arc = + Arc::>::new((*primary_store).clone().into()); self.inner .read() .unwrap() diff --git a/src/ffi/types.rs b/src/ffi/types.rs index 79656f166..a3254df57 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -63,7 +63,7 @@ pub use crate::payment::store::{ ConfirmationStatus, LSPFeeLimits, PaymentDirection, PaymentKind, PaymentStatus, }; pub use crate::payment::UnifiedPaymentResult; -use crate::{hex_utils, SocketAddress, UniffiCustomTypeConverter, UserChannelId}; +use crate::{hex_utils, DynStoreWrapper, SocketAddress, UniffiCustomTypeConverter, UserChannelId}; #[derive(Debug)] pub enum IOError { @@ -366,6 +366,12 @@ impl KVStoreSync for FfiDynStore { } } +impl From for DynStoreWrapper { + fn from(ffi_store: FfiDynStore) -> Self { + DynStoreWrapper(ffi_store) + } +} + struct FfiDynStoreInner { ffi_store: Arc, write_version_locks: Mutex>>>, From 6eea99639f4e9554b916096f41da64777109385a Mon Sep 17 00:00:00 2001 From: Enigbe Date: Mon, 16 Feb 2026 18:36:52 +0100 Subject: [PATCH 72/75] Remove unnecessary uniffi gating in tier store --- src/io/tier_store.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/io/tier_store.rs b/src/io/tier_store.rs index 8ad524c98..f6870be55 100644 --- a/src/io/tier_store.rs +++ b/src/io/tier_store.rs @@ -707,7 +707,6 @@ mod tests { use crate::io::tier_store::TierStore; use crate::logger::Logger; use crate::runtime::Runtime; - #[cfg(not(feature = "uniffi"))] use crate::types::DynStore; use crate::types::DynStoreWrapper; From 3c86f2f1ce28094572b4f9f3deff71449ccce989 Mon Sep 17 00:00:00 2001 From: Enigbe Date: Mon, 16 Feb 2026 19:55:33 +0100 Subject: [PATCH 73/75] Drop DelayedStore and associated backup test These were created to test that our backup store does not impact the primary store writes but the boilerplate appears too much for the functionality being tested. --- src/io/test_utils.rs | 170 +------------------------------------------ src/io/tier_store.rs | 49 +------------ 2 files changed, 4 insertions(+), 215 deletions(-) diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index f2b226a5f..9add2d6c1 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -9,8 +9,7 @@ use std::collections::{hash_map, HashMap}; use std::future::Future; use std::panic::RefUnwindSafe; use std::path::PathBuf; -use std::sync::{Arc, Mutex}; -use std::time::Duration; +use std::sync::Mutex; use lightning::events::ClosureReason; use lightning::ln::functional_test_utils::{ @@ -26,8 +25,6 @@ use lightning::{check_closed_broadcast, io}; use rand::distr::Alphanumeric; use rand::{rng, Rng}; -use crate::runtime::Runtime; - type TestMonitorUpdatePersister<'a, K> = MonitorUpdatingPersister< &'a K, &'a test_utils::TestLogger, @@ -355,168 +352,3 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { // Make sure everything is persisted as expected after close. check_persisted_data!(persister_0_max_pending_updates * 2 * EXPECTED_UPDATES_PER_PAYMENT + 1); } - -struct DelayedStoreInner { - storage: Mutex>>, - delay: Duration, -} - -impl DelayedStoreInner { - fn new(delay: Duration) -> Self { - Self { storage: Mutex::new(HashMap::new()), delay } - } - - fn make_key(pn: &str, sn: &str, key: &str) -> String { - format!("{}/{}/{}", pn, sn, key) - } - - async fn read_internal( - &self, primary_namespace: String, secondary_namespace: String, key: String, - ) -> Result, io::Error> { - tokio::time::sleep(self.delay).await; - - let full_key = Self::make_key(&primary_namespace, &secondary_namespace, &key); - let storage = self.storage.lock().unwrap(); - storage - .get(&full_key) - .cloned() - .ok_or_else(|| io::Error::new(io::ErrorKind::NotFound, "key not found")) - } - - async fn write_internal( - &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, - ) -> Result<(), io::Error> { - tokio::time::sleep(self.delay).await; - - let full_key = Self::make_key(&primary_namespace, &secondary_namespace, &key); - let mut storage = self.storage.lock().unwrap(); - storage.insert(full_key, buf); - Ok(()) - } - - async fn remove_internal( - &self, primary_namespace: String, secondary_namespace: String, key: String, - ) -> Result<(), io::Error> { - tokio::time::sleep(self.delay).await; - - let full_key = Self::make_key(&primary_namespace, &secondary_namespace, &key); - let mut storage = self.storage.lock().unwrap(); - storage.remove(&full_key); - Ok(()) - } - - async fn list_internal( - &self, primary_namespace: String, secondary_namespace: String, - ) -> Result, io::Error> { - tokio::time::sleep(self.delay).await; - - let prefix = format!("{}/{}/", primary_namespace, secondary_namespace); - let storage = self.storage.lock().unwrap(); - Ok(storage - .keys() - .filter(|k| k.starts_with(&prefix)) - .map(|k| k.strip_prefix(&prefix).unwrap().to_string()) - .collect()) - } -} - -pub struct DelayedStore { - inner: Arc, - runtime: Arc, -} - -impl DelayedStore { - pub fn new(delay_ms: u64, runtime: Arc) -> Self { - Self { inner: Arc::new(DelayedStoreInner::new(Duration::from_millis(delay_ms))), runtime } - } -} - -impl KVStore for DelayedStore { - fn read( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> impl Future, io::Error>> + 'static + Send { - let inner = Arc::clone(&self.inner); - let pn = primary_namespace.to_string(); - let sn = secondary_namespace.to_string(); - let key = key.to_string(); - - async move { inner.read_internal(pn, sn, key).await } - } - - fn write( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> impl Future> + 'static + Send { - let inner = Arc::clone(&self.inner); - let pn = primary_namespace.to_string(); - let sn = secondary_namespace.to_string(); - let key = key.to_string(); - - async move { inner.write_internal(pn, sn, key, buf).await } - } - - fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, - ) -> impl Future> + 'static + Send { - let inner = Arc::clone(&self.inner); - let pn = primary_namespace.to_string(); - let sn = secondary_namespace.to_string(); - let key = key.to_string(); - - async move { inner.remove_internal(pn, sn, key).await } - } - - fn list( - &self, primary_namespace: &str, secondary_namespace: &str, - ) -> impl Future, io::Error>> + 'static + Send { - let inner = Arc::clone(&self.inner); - let pn = primary_namespace.to_string(); - let sn = secondary_namespace.to_string(); - - async move { inner.list_internal(pn, sn).await } - } -} - -impl KVStoreSync for DelayedStore { - fn read( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> Result, io::Error> { - let inner = Arc::clone(&self.inner); - let pn = primary_namespace.to_string(); - let sn = secondary_namespace.to_string(); - let key = key.to_string(); - - self.runtime.block_on(async move { inner.read_internal(pn, sn, key).await }) - } - - fn write( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> Result<(), io::Error> { - let inner = Arc::clone(&self.inner); - let pn = primary_namespace.to_string(); - let sn = secondary_namespace.to_string(); - let key = key.to_string(); - - self.runtime.block_on(async move { inner.write_internal(pn, sn, key, buf).await }) - } - - fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, - ) -> Result<(), io::Error> { - let inner = Arc::clone(&self.inner); - let pn = primary_namespace.to_string(); - let sn = secondary_namespace.to_string(); - let key = key.to_string(); - - self.runtime.block_on(async move { inner.remove_internal(pn, sn, key).await }) - } - - fn list( - &self, primary_namespace: &str, secondary_namespace: &str, - ) -> Result, io::Error> { - let inner = Arc::clone(&self.inner); - let pn = primary_namespace.to_string(); - let sn = secondary_namespace.to_string(); - - self.runtime.block_on(async move { inner.list_internal(pn, sn).await }) - } -} diff --git a/src/io/tier_store.rs b/src/io/tier_store.rs index f6870be55..7f7afcb76 100644 --- a/src/io/tier_store.rs +++ b/src/io/tier_store.rs @@ -701,9 +701,7 @@ mod tests { }; use lightning_persister::fs_store::FilesystemStore; - use crate::io::test_utils::{ - do_read_write_remove_list_persist, random_storage_path, DelayedStore, - }; + use crate::io::test_utils::{do_read_write_remove_list_persist, random_storage_path}; use crate::io::tier_store::TierStore; use crate::logger::Logger; use crate::runtime::Runtime; @@ -873,47 +871,6 @@ mod tests { assert_eq!(backup_read_cm.unwrap(), data); } - #[test] - fn backup_overflow_doesnt_fail_writes() { - let base_dir = random_storage_path(); - let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); - let logger = Arc::new(Logger::new_fs_writer(log_path.clone(), Level::Trace).unwrap()); - let runtime = Arc::new(Runtime::new(Arc::clone(&logger)).unwrap()); - - let _cleanup = CleanupDir(base_dir.clone()); - - let primary_store: Arc = - Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("primary")))); - let mut tier = - setup_tier_store(Arc::clone(&primary_store), Arc::clone(&logger), Arc::clone(&runtime)); - - let backup_store: Arc = - Arc::new(DynStoreWrapper(DelayedStore::new(100, runtime))); - tier.set_backup_store(Arc::clone(&backup_store)); - - let data = vec![42u8; 32]; - - let key = CHANNEL_MANAGER_PERSISTENCE_KEY; - for i in 0..=10 { - let result = KVStoreSync::write( - &tier, - CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, - &format!("{}_{}", key, i), - data.clone(), - ); - - assert!(result.is_ok(), "Write {} should succeed", i); - } - - // Check logs for backup queue overflow message - let log_contents = std::fs::read_to_string(&log_path).unwrap(); - assert!( - log_contents.contains("Backup queue is full"), - "Logs should contain backup queue overflow message" - ); - } - #[test] fn lazy_removal() { let base_dir = random_storage_path(); @@ -929,7 +886,7 @@ mod tests { setup_tier_store(Arc::clone(&primary_store), Arc::clone(&logger), Arc::clone(&runtime)); let backup_store: Arc = - Arc::new(DynStoreWrapper(DelayedStore::new(100, runtime))); + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("backup")))); tier.set_backup_store(Arc::clone(&backup_store)); let data = vec![42u8; 32]; @@ -944,7 +901,7 @@ mod tests { ); assert!(write_result.is_ok(), "Write should succeed"); - thread::sleep(Duration::from_millis(10)); + thread::sleep(Duration::from_millis(100)); assert_eq!( KVStoreSync::read( From 994886274128e53ce63c81840d98699508a744f5 Mon Sep 17 00:00:00 2001 From: Enigbe Date: Mon, 16 Feb 2026 20:27:57 +0100 Subject: [PATCH 74/75] Remove spammy logs --- src/io/tier_store.rs | 36 ++---------------------------------- 1 file changed, 2 insertions(+), 34 deletions(-) diff --git a/src/io/tier_store.rs b/src/io/tier_store.rs index 7f7afcb76..51fb03e0f 100644 --- a/src/io/tier_store.rs +++ b/src/io/tier_store.rs @@ -87,15 +87,7 @@ impl TierStore { ) { while let Some(op) = receiver.recv().await { match Self::apply_backup_operation(&op, &backup_store).await { - Ok(_) => { - log_trace!( - logger, - "Backup succeeded for key {}/{}/{}", - op.primary_namespace(), - op.secondary_namespace(), - op.key() - ); - }, + Ok(_) => {}, Err(e) => { log_error!( logger, @@ -382,16 +374,7 @@ impl TierStoreInner { ) .await { - Ok(data) => { - log_info!( - self.logger, - "Read succeeded for key: {}/{}/{}", - primary_namespace, - secondary_namespace, - key - ); - Ok(data) - }, + Ok(data) => Ok(data), Err(e) => { log_error!( self.logger, @@ -414,12 +397,6 @@ impl TierStoreInner { .await { Ok(keys) => { - log_info!( - self.logger, - "List succeeded for namespace: {}/{}", - primary_namespace, - secondary_namespace - ); return Ok(keys); }, Err(e) => { @@ -549,7 +526,6 @@ impl TierStoreInner { ) .await } else { - log_debug!(self.logger, "Ephemeral store not configured. Reading non-critical data from primary or backup stores."); self.read_primary(&primary_namespace, &secondary_namespace, &key).await } }, @@ -573,8 +549,6 @@ impl TierStoreInner { ) .await } else { - log_debug!(self.logger, "Ephemeral store not configured. Writing non-critical data to primary and backup stores."); - self.primary_write_then_schedule_backup( primary_namespace.as_str(), secondary_namespace.as_str(), @@ -612,8 +586,6 @@ impl TierStoreInner { ) .await } else { - log_debug!(self.logger, "Ephemeral store not configured. Removing non-critical data from primary and backup stores."); - self.primary_remove_then_schedule_backup( primary_namespace.as_str(), secondary_namespace.as_str(), @@ -647,10 +619,6 @@ impl TierStoreInner { if let Some(eph_store) = self.ephemeral_store.as_ref() { KVStoreSync::list(eph_store.as_ref(), &primary_namespace, &secondary_namespace) } else { - log_debug!( - self.logger, - "Ephemeral store not configured. Listing from primary and backup stores." - ); self.list_primary(&primary_namespace, &secondary_namespace).await } }, From 95285b09cf2ab5447c0403f2684cabf0e0c0db26 Mon Sep 17 00:00:00 2001 From: Enigbe Date: Mon, 16 Feb 2026 21:44:10 +0100 Subject: [PATCH 75/75] DRY ephemeral key matching, fix visibility and appropriate kvstore usage - Restrict `TierStoreInner` visibility from `pub` to `pub(crate)` - Extract repeated ephemeral key matching into a standalone `is_ephemerally_cached_key` helper to DRY up `read_internal`, `write_internal`, and `remove_internal` - Replace `KVStoreSync::list` with async `KVStore::list` in `list_internal` to avoid blocking the async runtime --- src/io/tier_store.rs | 167 +++++++++++++++++++------------------------ 1 file changed, 73 insertions(+), 94 deletions(-) diff --git a/src/io/tier_store.rs b/src/io/tier_store.rs index 51fb03e0f..d0f2d9b2d 100644 --- a/src/io/tier_store.rs +++ b/src/io/tier_store.rs @@ -10,13 +10,13 @@ use crate::logger::{LdkLogger, Logger}; use crate::runtime::Runtime; use crate::types::DynStore; +use lightning::io; use lightning::util::persist::{ KVStore, KVStoreSync, NETWORK_GRAPH_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY, SCORER_PERSISTENCE_PRIMARY_NAMESPACE, }; -use lightning::{io, log_trace}; -use lightning::{log_debug, log_error, log_info, log_warn}; +use lightning::{log_debug, log_error, log_warn}; use tokio::sync::mpsc::{self, error::TrySendError}; @@ -231,8 +231,8 @@ impl KVStoreSync for TierStore { } } -pub struct TierStoreInner { - /// For remote data. +struct TierStoreInner { + /// For local or remote data. primary_store: Arc, /// For local non-critical/ephemeral data. ephemeral_store: Option>, @@ -396,9 +396,7 @@ impl TierStoreInner { match KVStore::list(self.primary_store.as_ref(), primary_namespace, secondary_namespace) .await { - Ok(keys) => { - return Ok(keys); - }, + Ok(keys) => Ok(keys), Err(e) => { log_error!( self.logger, @@ -506,104 +504,76 @@ impl TierStoreInner { "read", )?; - match (primary_namespace.as_str(), secondary_namespace.as_str(), key.as_str()) { - (NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, _, NETWORK_GRAPH_PERSISTENCE_KEY) - | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _, SCORER_PERSISTENCE_KEY) => { - if let Some(eph_store) = self.ephemeral_store.as_ref() { - // We only try once here (without retry logic) because local failure might be indicative - // of a more serious issue (e.g. full memory, memory corruption, permissions change) that - // do not self-resolve such that retrying would negate the latency benefits. - - // The following questions remain: - // 1. Are there situations where local transient errors may warrant a retry? - // 2. Can we reliably identify/detect these transient errors? - // 3. Should we fall back to the primary or backup stores in the event of any error? - KVStore::read( - eph_store.as_ref(), - &primary_namespace, - &secondary_namespace, - &key, - ) - .await - } else { - self.read_primary(&primary_namespace, &secondary_namespace, &key).await - } - }, - _ => self.read_primary(&primary_namespace, &secondary_namespace, &key).await, + if let Some(eph_store) = self + .ephemeral_store + .as_ref() + .filter(|_s| is_ephemeral_cached_key(&primary_namespace, &secondary_namespace, &key)) + { + // We only try once here (without retry logic) because local failure might be indicative + // of a more serious issue (e.g. full memory, memory corruption, permissions change) that + // do not self-resolve such that retrying would negate the latency benefits. + + // The following questions remain: + // 1. Are there situations where local transient errors may warrant a retry? + // 2. Can we reliably identify/detect these transient errors? + // 3. Should we fall back to the primary or backup stores in the event of any error? + KVStore::read(eph_store.as_ref(), &primary_namespace, &secondary_namespace, &key).await + } else { + self.read_primary(&primary_namespace, &secondary_namespace, &key).await } } async fn write_internal( &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, ) -> io::Result<()> { - match (primary_namespace.as_str(), secondary_namespace.as_str(), key.as_str()) { - (NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, _, NETWORK_GRAPH_PERSISTENCE_KEY) - | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _, SCORER_PERSISTENCE_KEY) => { - if let Some(eph_store) = &self.ephemeral_store { - KVStore::write( - eph_store.as_ref(), - primary_namespace.as_str(), - secondary_namespace.as_str(), - key.as_str(), - buf, - ) - .await - } else { - self.primary_write_then_schedule_backup( - primary_namespace.as_str(), - secondary_namespace.as_str(), - key.as_str(), - buf, - ) - .await - } - }, - _ => { - self.primary_write_then_schedule_backup( - primary_namespace.as_str(), - secondary_namespace.as_str(), - key.as_str(), - buf, - ) - .await - }, + if let Some(eph_store) = self + .ephemeral_store + .as_ref() + .filter(|_s| is_ephemeral_cached_key(&primary_namespace, &secondary_namespace, &key)) + { + KVStore::write( + eph_store.as_ref(), + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + buf, + ) + .await + } else { + self.primary_write_then_schedule_backup( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + buf, + ) + .await } } async fn remove_internal( &self, primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, ) -> io::Result<()> { - match (primary_namespace.as_str(), secondary_namespace.as_str(), key.as_str()) { - (NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, _, NETWORK_GRAPH_PERSISTENCE_KEY) - | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _, SCORER_PERSISTENCE_KEY) => { - if let Some(eph_store) = &self.ephemeral_store { - KVStore::remove( - eph_store.as_ref(), - primary_namespace.as_str(), - secondary_namespace.as_str(), - key.as_str(), - lazy, - ) - .await - } else { - self.primary_remove_then_schedule_backup( - primary_namespace.as_str(), - secondary_namespace.as_str(), - key.as_str(), - lazy, - ) - .await - } - }, - _ => { - self.primary_remove_then_schedule_backup( - primary_namespace.as_str(), - secondary_namespace.as_str(), - key.as_str(), - lazy, - ) - .await - }, + if let Some(eph_store) = self + .ephemeral_store + .as_ref() + .filter(|_s| is_ephemeral_cached_key(&primary_namespace, &secondary_namespace, &key)) + { + KVStore::remove( + eph_store.as_ref(), + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + lazy, + ) + .await + } else { + self.primary_remove_then_schedule_backup( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + lazy, + ) + .await } } @@ -617,7 +587,8 @@ impl TierStoreInner { ) | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _) => { if let Some(eph_store) = self.ephemeral_store.as_ref() { - KVStoreSync::list(eph_store.as_ref(), &primary_namespace, &secondary_namespace) + KVStore::list(eph_store.as_ref(), &primary_namespace, &secondary_namespace) + .await } else { self.list_primary(&primary_namespace, &secondary_namespace).await } @@ -654,6 +625,14 @@ impl BackupOp { } } +fn is_ephemeral_cached_key(pn: &str, sn: &str, key: &str) -> bool { + matches!( + (pn, sn, key), + (NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, _, NETWORK_GRAPH_PERSISTENCE_KEY) + | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _, SCORER_PERSISTENCE_KEY) + ) +} + #[cfg(test)] mod tests { use std::panic::RefUnwindSafe;