diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 661703ded..1ccade444 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -80,11 +80,11 @@ jobs: - name: Test on Rust ${{ matrix.toolchain }} if: "matrix.platform != 'windows-latest'" run: | - RUSTFLAGS="--cfg no_download" cargo test + RUSTFLAGS="--cfg no_download --cfg cycle_tests" cargo test - name: Test with UniFFI support on Rust ${{ matrix.toolchain }} if: "matrix.platform != 'windows-latest' && matrix.build-uniffi" run: | - RUSTFLAGS="--cfg no_download" cargo test --features uniffi + RUSTFLAGS="--cfg no_download --cfg cycle_tests" cargo test --features uniffi doc: name: Documentation diff --git a/.github/workflows/semver.yml b/.github/workflows/semver.yml index 8472cbd2a..2a3b14ef8 100644 --- a/.github/workflows/semver.yml +++ b/.github/workflows/semver.yml @@ -7,9 +7,5 @@ jobs: steps: - name: Checkout source code uses: actions/checkout@v4 - - name: Install Rust stable toolchain - run: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain stable - rustup override set stable - name: Check SemVer uses: obi1kenobi/cargo-semver-checks-action@v2 diff --git a/.github/workflows/vss-integration.yml b/.github/workflows/vss-integration.yml index 8473ed413..b5c4e9a0b 100644 --- a/.github/workflows/vss-integration.yml +++ b/.github/workflows/vss-integration.yml @@ -45,4 +45,4 @@ jobs: cd ldk-node export TEST_VSS_BASE_URL="http://localhost:8080/vss" RUSTFLAGS="--cfg vss_test" cargo test io::vss_store - RUSTFLAGS="--cfg vss_test" cargo test --test integration_tests_vss + RUSTFLAGS="--cfg vss_test --cfg cycle_tests" cargo test --test integration_tests_vss diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 000000000..300342771 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,67 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Development Rules + +- Always ensure tests pass before committing. +- Run `cargo fmt --all` after every code change +- Never add new dependencies unless explicitly requested +- Please always disclose the use of any AI tools in commit messages and PR descriptions +- When adding new `.rs` files, please ensure to always add the licensing header as found in all other files. + +## Architecture Overview + +LDK-Node is a self-custodial Lightning Network node library built on top of **LDK** (Lightning Development Kit) for Lightning functionality and **BDK** (Bitcoin Development Kit) for on-chain wallet operations. It provides a simple, ready-to-go interface for building Lightning applications with language bindings for Swift, Kotlin, and Python via UniFFI. + +### Core Components + +| Component | Location | Responsibility | +|-----------|----------|----------------| +| `Node` | `src/lib.rs` | Central abstraction containing all subsystems; entry point for API | +| `Builder` | `src/builder.rs` | Fluent configuration interface for constructing `Node` instances | +| `Wallet` | `src/wallet/` | BDK-based on-chain wallet with SQLite persistence | +| `ChainSource` | `src/chain/` | Chain data abstraction (Esplora, Electrum, Bitcoin Core) | +| `EventHandler` | `src/event.rs` | Translates LDK events to user-facing `Node` events | +| `PaymentStore` | `src/payment/store.rs` | Persistent payment tracking with status and metadata | + +### Module Organization + +| Module | Purpose | +|--------|---------| +| `payment/` | Payment processing (BOLT11, BOLT12, on-chain, spontaneous, unified) | +| `wallet/` | On-chain wallet abstraction, serialization, persistence | +| `chain/` | Chain data sources, wallet syncing, transaction broadcasting | +| `io/` | Persistence layer (`SQLiteStore`, `VssStore`, KV-store utilities) | +| `liquidity.rs` | Liquidity provider integration (LSPS1/LSPS2) | +| `connection.rs` | Peer connection management and reconnection logic | +| `gossip.rs` | Gossip data source management (RGS, P2P) | +| `graph.rs` | Network graph querying and channel/node information | +| `types.rs` | Type aliases for LDK components (`ChannelManager`, `PeerManager`, etc.) | +| `ffi/` | UniFFI bindings for cross-language support | + +### Key Design Patterns + +- **Arc-based Shared Ownership**: Extensive use of `Arc` for thread-safe shared components enabling background task spawning +- **Event-Driven Architecture**: Events flow from LDK → `EventHandler` → `EventQueue` → User application +- **Trait-based Abstraction**: `KVStore`/`KVStoreSync` for storage, `ChainSource` for chain backends, `StorableObject` for persistence +- **Builder Pattern**: Fluent configuration with sensible defaults and validation during build phase +- **Background Tasks**: Multiple categories (wallet sync, gossip updates, peer reconnection, fee updates, event processing) + +### Lifecycle + +**Startup (`node.start()`)**: Acquires lock → starts chain source → updates fee rates → spawns background sync tasks → sets up gossip/listeners/peer reconnection → starts event processor → marks running + +**Shutdown (`node.stop()`)**: Acquires lock → signals stop → aborts cancellable tasks → waits for background tasks → disconnects peers → persists final state + +### Type Aliases (from `types.rs`) + +Key LDK type aliases used throughout the codebase: +- `ChannelManager` - LDK channel management +- `ChainMonitor` - LDK chain monitoring +- `PeerManager` - LDK peer connections +- `OnionMessenger` - LDK onion messaging +- `Router` - LDK pathfinding (`DefaultRouter`) +- `Scorer` - Combined probabilistic + external scoring +- `Graph` - `NetworkGraph` +- `Sweeper` - `OutputSweeper` diff --git a/Cargo.toml b/Cargo.toml index 59ad2b767..d4b050b80 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,30 +39,31 @@ default = [] #lightning-liquidity = { version = "0.2.0", features = ["std"] } #lightning-macros = { version = "0.2.0" } -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc", features = ["std"] } -lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc" } -lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc", features = ["std"] } -lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc" } -lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc", features = ["tokio"] } -lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc" } -lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc" } -lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } -lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc", features = ["std"] } -lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc" } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["std"] } +lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774" } +lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["std"] } +lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["tokio"] } +lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774" } +lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774" } +lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } +lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["std"] } +lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774" } bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} bdk_electrum = { version = "0.23.0", default-features = false, features = ["use-rustls-ring"]} -bdk_wallet = { version = "2.2.0", default-features = false, features = ["std", "keys-bip39"]} +bdk_wallet = { version = "2.3.0", default-features = false, features = ["std", "keys-bip39"]} -reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] } +bitreq = { version = "0.3", default-features = false, features = ["async-https"] } rustls = { version = "0.23", default-features = false } rusqlite = { version = "0.31.0", features = ["bundled"] } bitcoin = "0.32.7" bip39 = { version = "2.0.0", features = ["rand"] } bip21 = { version = "0.5", features = ["std"], default-features = false } +async-trait = {version = "0.1.89"} base64 = { version = "0.22.1", default-features = false, features = ["std"] } rand = { version = "0.9.2", default-features = false, features = ["std", "thread_rng", "os_rng"] } chrono = { version = "0.4", default-features = false, features = ["clock"] } @@ -77,15 +78,18 @@ log = { version = "0.4.22", default-features = false, features = ["std"]} vss-client = { package = "vss-client-ng", version = "0.4" } prost = { version = "0.11.6", default-features = false} +#bitcoin-payment-instructions = { version = "0.6" } +bitcoin-payment-instructions = { git = "https://github.com/tnull/bitcoin-payment-instructions", rev = "b9f9991b42e9d71b3ca966818a93b158cf8f6c40" } [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "5bf0d1e2427d759fc1ba4108ddc7e9b32e8bacfc", features = ["std", "_test_utils"] } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "7fe3268475551b0664d315bfbc860416ca8fc774", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" criterion = { version = "0.7.0", features = ["async_tokio"] } +ldk-node-062 = { package = "ldk-node", version = "=0.6.2" } [target.'cfg(not(no_download))'.dev-dependencies] electrsd = { version = "0.36.1", default-features = false, features = ["legacy", "esplora_a33e97e1", "corepc-node_27_2"] } @@ -101,9 +105,6 @@ clightningrpc = { version = "0.3.0-beta.8", default-features = false } lnd_grpc_rust = { version = "2.10.0", default-features = false } tokio = { version = "1.37", features = ["fs"] } -[target.'cfg(vss_test)'.dev-dependencies] -ldk-node-062 = { package = "ldk-node", version = "=0.6.2" } - [build-dependencies] uniffi = { version = "0.28.3", features = ["build"], optional = true } @@ -122,6 +123,7 @@ check-cfg = [ "cfg(tokio_unstable)", "cfg(cln_test)", "cfg(lnd_test)", + "cfg(cycle_tests)", ] [[bench]] @@ -167,3 +169,16 @@ harness = false #vss-client-ng = { path = "../vss-client" } #vss-client-ng = { git = "https://github.com/lightningdevkit/vss-client", branch = "main" } +# +#[patch."https://github.com/lightningdevkit/rust-lightning"] +#lightning = { path = "../rust-lightning/lightning" } +#lightning-types = { path = "../rust-lightning/lightning-types" } +#lightning-invoice = { path = "../rust-lightning/lightning-invoice" } +#lightning-net-tokio = { path = "../rust-lightning/lightning-net-tokio" } +#lightning-persister = { path = "../rust-lightning/lightning-persister" } +#lightning-background-processor = { path = "../rust-lightning/lightning-background-processor" } +#lightning-rapid-gossip-sync = { path = "../rust-lightning/lightning-rapid-gossip-sync" } +#lightning-block-sync = { path = "../rust-lightning/lightning-block-sync" } +#lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync" } +#lightning-liquidity = { path = "../rust-lightning/lightning-liquidity" } +#lightning-macros = { path = "../rust-lightning/lightning-macros" } diff --git a/README.md b/README.md index 4e60d3602..dd4f434db 100644 --- a/README.md +++ b/README.md @@ -67,7 +67,7 @@ LDK Node currently comes with a decidedly opinionated set of design choices: - Entropy for the Lightning and on-chain wallets may be sourced from raw bytes or a [BIP39](https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki) mnemonic. In addition, LDK Node offers the means to generate and persist the entropy bytes to disk. ## Language Support -LDK Node itself is written in [Rust][rust] and may therefore be natively added as a library dependency to any `std` Rust program. However, beyond its Rust API it also offers language bindings for [Swift][swift], [Kotlin][kotlin], and [Python][python] based on the [UniFFI](https://github.com/mozilla/uniffi-rs/). Moreover, [Flutter bindings][flutter_bindings] are also available. +LDK Node itself is written in [Rust][rust] and may therefore be natively added as a library dependency to any `std` Rust program. However, beyond its Rust API it also offers language bindings for [Swift][swift], [Kotlin][kotlin], and [Python][python] based on the [UniFFI](https://github.com/mozilla/uniffi-rs/). ## MSRV The Minimum Supported Rust Version (MSRV) is currently 1.85.0. @@ -85,4 +85,3 @@ The Minimum Supported Rust Version (MSRV) is currently 1.85.0. [swift]: https://www.swift.org/ [kotlin]: https://kotlinlang.org/ [python]: https://www.python.org/ -[flutter_bindings]: https://github.com/LtbLightning/ldk-node-flutter diff --git a/benches/payments.rs b/benches/payments.rs index ba69e046d..21bca8d72 100644 --- a/benches/payments.rs +++ b/benches/payments.rs @@ -127,6 +127,7 @@ fn payment_benchmark(c: &mut Criterion) { true, false, common::TestStoreType::Sqlite, + common::TestStoreType::Sqlite, ); let runtime = diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index c4ebf56a6..a07dc356d 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -26,12 +26,22 @@ dictionary BackgroundSyncConfig { u64 fee_rate_cache_update_interval_secs; }; +dictionary SyncTimeoutsConfig { + u64 onchain_wallet_sync_timeout_secs; + u64 lightning_wallet_sync_timeout_secs; + u64 fee_rate_cache_update_timeout_secs; + u64 tx_broadcast_timeout_secs; + u8 per_request_timeout_secs; +}; + dictionary EsploraSyncConfig { BackgroundSyncConfig? background_sync_config; + SyncTimeoutsConfig timeouts_config; }; dictionary ElectrumSyncConfig { BackgroundSyncConfig? background_sync_config; + SyncTimeoutsConfig timeouts_config; }; dictionary LSPS2ServiceConfig { @@ -83,6 +93,9 @@ dictionary LogRecord { string args; string module_path; u32 line; + PublicKey? peer_id; + ChannelId? channel_id; + PaymentHash? payment_hash; }; [Trait, WithForeign] @@ -90,6 +103,53 @@ interface LogWriter { void log(LogRecord record); }; +interface FfiDynStore { + [Name=from_store] + constructor(ForeignDynStoreTrait store); +}; + +[Trait, WithForeign] +interface ForeignDynStoreTrait { + [Throws=IOError] + sequence read(string primary_namespace, string secondary_namespace, string key); + [Throws=IOError] + void write(string primary_namespace, string secondary_namespace, string key, sequence buf); + [Throws=IOError] + void remove(string primary_namespace, string secondary_namespace, string key, boolean lazy); + [Throws=IOError] + sequence list(string primary_namespace, string secondary_namespace); + [Throws=IOError, Async] + sequence read_async(string primary_namespace, string secondary_namespace, string key); + [Throws=IOError, Async] + void write_async(string primary_namespace, string secondary_namespace, string key, sequence buf); + [Throws=IOError, Async] + void remove_async(string primary_namespace, string secondary_namespace, string key, boolean lazy); + [Throws=IOError, Async] + sequence list_async(string primary_namespace, string secondary_namespace); +}; + +[Error] +enum IOError { + "NotFound", + "PermissionDenied", + "ConnectionRefused", + "ConnectionReset", + "ConnectionAborted", + "NotConnected", + "AddrInUse", + "AddrNotAvailable", + "BrokenPipe", + "AlreadyExists", + "WouldBlock", + "InvalidInput", + "InvalidData", + "TimedOut", + "WriteZero", + "Interrupted", + "UnexpectedEof", + "Other", +}; + interface Builder { constructor(); [Name=from_config] @@ -114,6 +174,8 @@ interface Builder { void set_announcement_addresses(sequence announcement_addresses); [Throws=BuildError] void set_node_alias(string node_alias); + void set_tier_store_backup(FfiDynStore backup_store); + void set_tier_store_ephemeral(FfiDynStore ephemeral_store); [Throws=BuildError] void set_async_payments_role(AsyncPaymentsRole? role); [Throws=BuildError] @@ -126,6 +188,8 @@ interface Builder { Node build_with_vss_store_and_fixed_headers(NodeEntropy node_entropy, string vss_url, string store_id, record fixed_headers); [Throws=BuildError] Node build_with_vss_store_and_header_provider(NodeEntropy node_entropy, string vss_url, string store_id, VssHeaderProvider header_provider); + [Throws=BuildError] + Node build_with_tier_store(NodeEntropy node_entropy, FfiDynStore primary_store); }; interface Node { @@ -149,7 +213,7 @@ interface Node { Bolt12Payment bolt12_payment(); SpontaneousPayment spontaneous_payment(); OnchainPayment onchain_payment(); - UnifiedQrPayment unified_qr_payment(); + UnifiedPayment unified_payment(); LSPS1Liquidity lsps1_liquidity(); [Throws=NodeError] void connect(PublicKey node_id, SocketAddress address, boolean persist); @@ -275,11 +339,11 @@ interface FeeRate { u64 to_sat_per_vb_ceil(); }; -interface UnifiedQrPayment { +interface UnifiedPayment { [Throws=NodeError] string receive(u64 amount_sats, [ByRef]string message, u32 expiry_sec); - [Throws=NodeError] - QrPaymentResult send([ByRef]string uri_str, RouteParametersConfig? route_parameters); + [Throws=NodeError, Async] + UnifiedPaymentResult send([ByRef]string uri_str, u64? amount_msat, RouteParametersConfig? route_parameters); }; interface LSPS1Liquidity { @@ -339,6 +403,7 @@ enum NodeError { "InvalidNodeAlias", "InvalidDateTime", "InvalidFeeRate", + "InvalidScriptPubKey", "DuplicatePayment", "UnsupportedCurrency", "InsufficientFunds", @@ -346,6 +411,7 @@ enum NodeError { "LiquidityFeeTooHigh", "InvalidBlindedPaths", "AsyncPaymentServicesDisabled", + "HrnParsingFailed", }; dictionary NodeStatus { @@ -357,7 +423,6 @@ dictionary NodeStatus { u64? latest_rgs_snapshot_timestamp; u64? latest_pathfinding_scores_sync_timestamp; u64? latest_node_announcement_broadcast_timestamp; - u32? latest_channel_monitor_archival_height; }; dictionary BestBlock { @@ -455,7 +520,7 @@ interface PaymentKind { }; [Enum] -interface QrPaymentResult { +interface UnifiedPaymentResult { Onchain(Txid txid); Bolt11(PaymentId payment_id); Bolt12(PaymentId payment_id); @@ -575,6 +640,7 @@ dictionary ChannelDetails { ChannelId channel_id; PublicKey counterparty_node_id; OutPoint? funding_txo; + ScriptBuf? funding_redeem_script; u64? short_channel_id; u64? outbound_scid_alias; u64? inbound_scid_alias; @@ -807,6 +873,13 @@ interface Offer { PublicKey? issuer_signing_pubkey(); }; +interface HumanReadableName { + [Throws=NodeError, Name=from_encoded] + constructor([ByRef] string encoded); + string user(); + string domain(); +}; + [Traits=(Debug, Display, Eq)] interface Refund { [Throws=NodeError, Name=from_str] @@ -901,3 +974,6 @@ typedef string LSPS1OrderId; [Custom] typedef string LSPSDateTime; + +[Custom] +typedef string ScriptBuf; diff --git a/bindings/python/src/ldk_node/kv_store.py b/bindings/python/src/ldk_node/kv_store.py new file mode 100644 index 000000000..d871d7a6d --- /dev/null +++ b/bindings/python/src/ldk_node/kv_store.py @@ -0,0 +1,115 @@ +import threading + +from abc import ABC, abstractmethod +from typing import List + +from ldk_node import IoError + +class AbstractKvStore(ABC): + @abstractmethod + async def read_async(self, primary_namespace: "str",secondary_namespace: "str",key: "str") -> "typing.List[int]": + pass + + @abstractmethod + async def write_async(self, primary_namespace: "str",secondary_namespace: "str",key: "str",buf: "typing.List[int]") -> None: + pass + + @abstractmethod + async def remove_async(self, primary_namespace: "str",secondary_namespace: "str",key: "str",lazy: "bool") -> None: + pass + + @abstractmethod + async def list_async(self, primary_namespace: "str",secondary_namespace: "str") -> "typing.List[str]": + pass + + @abstractmethod + def read(self, primary_namespace: "str",secondary_namespace: "str",key: "str") -> "typing.List[int]": + pass + + @abstractmethod + def write(self, primary_namespace: "str",secondary_namespace: "str",key: "str",buf: "typing.List[int]") -> None: + pass + + @abstractmethod + def remove(self, primary_namespace: "str",secondary_namespace: "str",key: "str",lazy: "bool") -> None: + pass + + @abstractmethod + def list(self, primary_namespace: "str",secondary_namespace: "str") -> "typing.List[str]": + pass + +class TestKvStore(AbstractKvStore): + def __init__(self, name: str): + self.name = name + # Storage structure: {(primary_ns, secondary_ns): {key: [bytes]}} + self.storage = {} + self._lock = threading.Lock() + + def dump(self): + print(f"\n[{self.name}] Store contents:") + for (primary_ns, secondary_ns), keys_dict in self.storage.items(): + print(f" Namespace: ({primary_ns!r}, {secondary_ns!r})") + for key, data in keys_dict.items(): + print(f" Key: {key!r} -> {len(data)} bytes") + # Optionally show first few bytes + preview = data[:20] if len(data) > 20 else data + print(f" Data preview: {preview}...") + + def read(self, primary_namespace: str, secondary_namespace: str, key: str) -> List[int]: + with self._lock: + print(f"[{self.name}] READ: {primary_namespace}/{secondary_namespace}/{key}") + namespace_key = (primary_namespace, secondary_namespace) + + if namespace_key not in self.storage: + print(f" -> namespace not found, keys: {list(self.storage.keys())}") + raise IoError.NotFound(f"Namespace not found: {primary_namespace}/{secondary_namespace}") + + if key not in self.storage[namespace_key]: + print(f" -> key not found, keys: {list(self.storage[namespace_key].keys())}") + raise IoError.NotFound(f"Key not found: {key}") + + data = self.storage[namespace_key][key] + print(f" -> returning {len(data)} bytes") + return data + + def write(self, primary_namespace: str, secondary_namespace: str, key: str, buf: List[int]) -> None: + with self._lock: + namespace_key = (primary_namespace, secondary_namespace) + if namespace_key not in self.storage: + self.storage[namespace_key] = {} + + self.storage[namespace_key][key] = buf.copy() + + def remove(self, primary_namespace: str, secondary_namespace: str, key: str, lazy: bool) -> None: + with self._lock: + namespace_key = (primary_namespace, secondary_namespace) + if namespace_key not in self.storage: + raise IoError.NotFound(f"Namespace not found: {primary_namespace}/{secondary_namespace}") + + if key not in self.storage[namespace_key]: + raise IoError.NotFound(f"Key not found: {key}") + + del self.storage[namespace_key][key] + + if not self.storage[namespace_key]: + del self.storage[namespace_key] + + def list(self, primary_namespace: str, secondary_namespace: str) -> List[str]: + with self._lock: + namespace_key = (primary_namespace, secondary_namespace) + if namespace_key in self.storage: + return list(self.storage[namespace_key].keys()) + return [] + + async def read_async(self, primary_namespace: str, secondary_namespace: str, key: str) -> List[int]: + return self.read(primary_namespace, secondary_namespace, key) + + async def write_async(self, primary_namespace: str, secondary_namespace: str, key: str, buf: List[int]) -> None: + self.write(primary_namespace, secondary_namespace, key, buf) + + async def remove_async(self, primary_namespace: str, secondary_namespace: str, key: str, lazy: bool) -> None: + self.remove(primary_namespace, secondary_namespace, key, lazy) + + async def list_async(self, primary_namespace: str, secondary_namespace: str) -> List[str]: + return self.list(primary_namespace, secondary_namespace) + \ No newline at end of file diff --git a/bindings/python/src/ldk_node/test_ldk_node.py b/bindings/python/src/ldk_node/test_ldk_node.py index 0b73e6a47..3979ee784 100644 --- a/bindings/python/src/ldk_node/test_ldk_node.py +++ b/bindings/python/src/ldk_node/test_ldk_node.py @@ -5,13 +5,67 @@ import os import re import requests +import asyncio +import threading +import ldk_node from ldk_node import * +from kv_store import TestKvStore DEFAULT_ESPLORA_SERVER_URL = "http://127.0.0.1:3002" DEFAULT_TEST_NETWORK = Network.REGTEST DEFAULT_BITCOIN_CLI_BIN = "bitcoin-cli" +class NodeSetup: + def __init__(self, node, node_id, tmp_dir, listening_addresses, stores=None): + self.node = node + self.node_id = node_id + self.tmp_dir = tmp_dir + self.listening_addresses = listening_addresses + self.stores = stores # (primary, backup, ephemeral) or None + + def cleanup(self): + self.node.stop() + time.sleep(1) + self.tmp_dir.cleanup() + +def setup_two_nodes(esplora_endpoint, port_1=2323, port_2=2324, use_tier_store=False): + # Setup Node 1 + tmp_dir_1 = tempfile.TemporaryDirectory("_ldk_node_1") + print("TMP DIR 1:", tmp_dir_1.name) + + listening_addresses_1 = [f"127.0.0.1:{port_1}"] + if use_tier_store: + node_1, stores_1 = setup_node_with_tier_store(tmp_dir_1.name, esplora_endpoint, listening_addresses_1) + else: + node_1 = setup_node(tmp_dir_1.name, esplora_endpoint, listening_addresses_1) + stores_1 = None + + node_1.start() + node_id_1 = node_1.node_id() + print("Node ID 1:", node_id_1) + + setup_1 = NodeSetup(node_1, node_id_1, tmp_dir_1, listening_addresses_1, stores_1) + + # Setup Node 2 + tmp_dir_2 = tempfile.TemporaryDirectory("_ldk_node_2") + print("TMP DIR 2:", tmp_dir_2.name) + + listening_addresses_2 = [f"127.0.0.1:{port_2}"] + if use_tier_store: + node_2, stores_2 = setup_node_with_tier_store(tmp_dir_2.name, esplora_endpoint, listening_addresses_2) + else: + node_2 = setup_node(tmp_dir_2.name, esplora_endpoint, listening_addresses_2) + stores_2 = None + + node_2.start() + node_id_2 = node_2.node_id() + print("Node ID 2:", node_id_2) + + setup_2 = NodeSetup(node_2, node_id_2, tmp_dir_2, listening_addresses_2, stores_2) + + return setup_1, setup_2 + def bitcoin_cli(cmd): args = [] @@ -95,7 +149,6 @@ def send_to_address(address, amount_sats): print("SEND TX:", res) return res - def setup_node(tmp_dir, esplora_endpoint, listening_addresses): mnemonic = generate_entropy_mnemonic(None) node_entropy = NodeEntropy.from_bip39_mnemonic(mnemonic, None) @@ -107,6 +160,118 @@ def setup_node(tmp_dir, esplora_endpoint, listening_addresses): builder.set_listening_addresses(listening_addresses) return builder.build(node_entropy) +def setup_node_with_tier_store(tmp_dir, esplora_endpoint, listening_addresses): + mnemonic = generate_entropy_mnemonic(None) + node_entropy = NodeEntropy.from_bip39_mnemonic(mnemonic, None) + config = default_config() + + primary = TestKvStore("primary") + backup = TestKvStore("backup") + ephemeral = TestKvStore("ephemeral") + + # Set event loop for async Python callbacks from Rust + # (https://mozilla.github.io/uniffi-rs/0.27/futures.html#python-uniffi_set_event_loop) + loop = asyncio.new_event_loop() + + def run_loop(): + asyncio.set_event_loop(loop) + loop.run_forever() + + loop_thread = threading.Thread(target=run_loop, daemon=True) + loop_thread.start() + ldk_node.uniffi_set_event_loop(loop) + + builder = Builder.from_config(config) + builder.set_storage_dir_path(tmp_dir) + builder.set_chain_source_esplora(esplora_endpoint, None) + builder.set_network(DEFAULT_TEST_NETWORK) + builder.set_listening_addresses(listening_addresses) + builder.set_tier_store_backup(FfiDynStore.from_store(backup)) + builder.set_tier_store_ephemeral(FfiDynStore.from_store(ephemeral)) + + return builder.build_with_tier_store(node_entropy, FfiDynStore.from_store(primary)), (primary, backup, ephemeral) + +def do_channel_full_cycle(setup_1, setup_2, esplora_endpoint): + # Fund both nodes + address_1 = setup_1.node.onchain_payment().new_address() + txid_1 = send_to_address(address_1, 100000) + address_2 = setup_2.node.onchain_payment().new_address() + txid_2 = send_to_address(address_2, 100000) + + wait_for_tx(esplora_endpoint, txid_1) + wait_for_tx(esplora_endpoint, txid_2) + mine_and_wait(esplora_endpoint, 6) + + setup_1.node.sync_wallets() + setup_2.node.sync_wallets() + + # Verify balances + spendable_balance_1 = setup_1.node.list_balances().spendable_onchain_balance_sats + spendable_balance_2 = setup_2.node.list_balances().spendable_onchain_balance_sats + assert spendable_balance_1 == 100000 + assert spendable_balance_2 == 100000 + + # Open channel + setup_1.node.open_channel(setup_2.node_id, setup_2.listening_addresses[0], 50000, None, None) + + channel_pending_event_1 = setup_1.node.wait_next_event() + assert isinstance(channel_pending_event_1, Event.CHANNEL_PENDING) + setup_1.node.event_handled() + + channel_pending_event_2 = setup_2.node.wait_next_event() + assert isinstance(channel_pending_event_2, Event.CHANNEL_PENDING) + setup_2.node.event_handled() + + funding_txid = channel_pending_event_1.funding_txo.txid + wait_for_tx(esplora_endpoint, funding_txid) + mine_and_wait(esplora_endpoint, 6) + + setup_1.node.sync_wallets() + setup_2.node.sync_wallets() + + channel_ready_event_1 = setup_1.node.wait_next_event() + assert isinstance(channel_ready_event_1, Event.CHANNEL_READY) + setup_1.node.event_handled() + + channel_ready_event_2 = setup_2.node.wait_next_event() + assert isinstance(channel_ready_event_2, Event.CHANNEL_READY) + setup_2.node.event_handled() + + # Make payment + description = Bolt11InvoiceDescription.DIRECT("asdf") + invoice = setup_2.node.bolt11_payment().receive(2500000, description, 9217) + setup_1.node.bolt11_payment().send(invoice, None) + + payment_successful_event_1 = setup_1.node.wait_next_event() + assert isinstance(payment_successful_event_1, Event.PAYMENT_SUCCESSFUL) + setup_1.node.event_handled() + + payment_received_event_2 = setup_2.node.wait_next_event() + assert isinstance(payment_received_event_2, Event.PAYMENT_RECEIVED) + setup_2.node.event_handled() + + # Close channel + setup_2.node.close_channel(channel_ready_event_2.user_channel_id, setup_1.node_id) + + channel_closed_event_1 = setup_1.node.wait_next_event() + assert isinstance(channel_closed_event_1, Event.CHANNEL_CLOSED) + setup_1.node.event_handled() + + channel_closed_event_2 = setup_2.node.wait_next_event() + assert isinstance(channel_closed_event_2, Event.CHANNEL_CLOSED) + setup_2.node.event_handled() + + mine_and_wait(esplora_endpoint, 1) + setup_1.node.sync_wallets() + setup_2.node.sync_wallets() + + # Verify final balances + spendable_balance_after_close_1 = setup_1.node.list_balances().spendable_onchain_balance_sats + assert spendable_balance_after_close_1 > 95000 + assert spendable_balance_after_close_1 < 100000 + spendable_balance_after_close_2 = setup_2.node.list_balances().spendable_onchain_balance_sats + assert spendable_balance_after_close_2 == 102500 + def get_esplora_endpoint(): if os.environ.get('ESPLORA_ENDPOINT'): return str(os.environ['ESPLORA_ENDPOINT']) @@ -122,132 +287,36 @@ def setUp(self): def test_channel_full_cycle(self): esplora_endpoint = get_esplora_endpoint() - - ## Setup Node 1 - tmp_dir_1 = tempfile.TemporaryDirectory("_ldk_node_1") - print("TMP DIR 1:", tmp_dir_1.name) - - listening_addresses_1 = ["127.0.0.1:2323"] - node_1 = setup_node(tmp_dir_1.name, esplora_endpoint, listening_addresses_1) - node_1.start() - node_id_1 = node_1.node_id() - print("Node ID 1:", node_id_1) - - # Setup Node 2 - tmp_dir_2 = tempfile.TemporaryDirectory("_ldk_node_2") - print("TMP DIR 2:", tmp_dir_2.name) - - listening_addresses_2 = ["127.0.0.1:2324"] - node_2 = setup_node(tmp_dir_2.name, esplora_endpoint, listening_addresses_2) - node_2.start() - node_id_2 = node_2.node_id() - print("Node ID 2:", node_id_2) - - address_1 = node_1.onchain_payment().new_address() - txid_1 = send_to_address(address_1, 100000) - address_2 = node_2.onchain_payment().new_address() - txid_2 = send_to_address(address_2, 100000) - - wait_for_tx(esplora_endpoint, txid_1) - wait_for_tx(esplora_endpoint, txid_2) - - mine_and_wait(esplora_endpoint, 6) - - node_1.sync_wallets() - node_2.sync_wallets() - - spendable_balance_1 = node_1.list_balances().spendable_onchain_balance_sats - spendable_balance_2 = node_2.list_balances().spendable_onchain_balance_sats - total_balance_1 = node_1.list_balances().total_onchain_balance_sats - total_balance_2 = node_2.list_balances().total_onchain_balance_sats - - print("SPENDABLE 1:", spendable_balance_1) - self.assertEqual(spendable_balance_1, 100000) - - print("SPENDABLE 2:", spendable_balance_2) - self.assertEqual(spendable_balance_2, 100000) - - print("TOTAL 1:", total_balance_1) - self.assertEqual(total_balance_1, 100000) - - print("TOTAL 2:", total_balance_2) - self.assertEqual(total_balance_2, 100000) - - node_1.open_channel(node_id_2, listening_addresses_2[0], 50000, None, None) - - channel_pending_event_1 = node_1.wait_next_event() - assert isinstance(channel_pending_event_1, Event.CHANNEL_PENDING) - print("EVENT:", channel_pending_event_1) - node_1.event_handled() - - channel_pending_event_2 = node_2.wait_next_event() - assert isinstance(channel_pending_event_2, Event.CHANNEL_PENDING) - print("EVENT:", channel_pending_event_2) - node_2.event_handled() - - funding_txid = channel_pending_event_1.funding_txo.txid - wait_for_tx(esplora_endpoint, funding_txid) - mine_and_wait(esplora_endpoint, 6) - - node_1.sync_wallets() - node_2.sync_wallets() - - channel_ready_event_1 = node_1.wait_next_event() - assert isinstance(channel_ready_event_1, Event.CHANNEL_READY) - print("EVENT:", channel_ready_event_1) - print("funding_txo:", funding_txid) - node_1.event_handled() - - channel_ready_event_2 = node_2.wait_next_event() - assert isinstance(channel_ready_event_2, Event.CHANNEL_READY) - print("EVENT:", channel_ready_event_2) - node_2.event_handled() - - description = Bolt11InvoiceDescription.DIRECT("asdf") - invoice = node_2.bolt11_payment().receive(2500000, description, 9217) - node_1.bolt11_payment().send(invoice, None) - - payment_successful_event_1 = node_1.wait_next_event() - assert isinstance(payment_successful_event_1, Event.PAYMENT_SUCCESSFUL) - print("EVENT:", payment_successful_event_1) - node_1.event_handled() - - payment_received_event_2 = node_2.wait_next_event() - assert isinstance(payment_received_event_2, Event.PAYMENT_RECEIVED) - print("EVENT:", payment_received_event_2) - node_2.event_handled() - - node_2.close_channel(channel_ready_event_2.user_channel_id, node_id_1) - - channel_closed_event_1 = node_1.wait_next_event() - assert isinstance(channel_closed_event_1, Event.CHANNEL_CLOSED) - print("EVENT:", channel_closed_event_1) - node_1.event_handled() - - channel_closed_event_2 = node_2.wait_next_event() - assert isinstance(channel_closed_event_2, Event.CHANNEL_CLOSED) - print("EVENT:", channel_closed_event_2) - node_2.event_handled() - - mine_and_wait(esplora_endpoint, 1) - - node_1.sync_wallets() - node_2.sync_wallets() - - spendable_balance_after_close_1 = node_1.list_balances().spendable_onchain_balance_sats - assert spendable_balance_after_close_1 > 95000 - assert spendable_balance_after_close_1 < 100000 - spendable_balance_after_close_2 = node_2.list_balances().spendable_onchain_balance_sats - self.assertEqual(spendable_balance_after_close_2, 102500) - - # Stop nodes - node_1.stop() - node_2.stop() - - # Cleanup - time.sleep(1) # Wait a sec so our logs can finish writing - tmp_dir_1.cleanup() - tmp_dir_2.cleanup() + setup_1, setup_2 = setup_two_nodes(esplora_endpoint) + + do_channel_full_cycle(setup_1, setup_2, esplora_endpoint) + + setup_1.cleanup() + setup_2.cleanup() + + def test_tier_store(self): + esplora_endpoint = get_esplora_endpoint() + setup_1, setup_2 = setup_two_nodes(esplora_endpoint, port_1=2325, port_2=2326, use_tier_store=True) + + do_channel_full_cycle(setup_1, setup_2, esplora_endpoint) + + primary, backup, ephemeral = setup_1.stores + + # Wait for async backup + time.sleep(2) + + self.assertGreater(len(primary.storage), 0, "Primary should have data") + self.assertGreater(len(backup.storage), 0, "Backup should have data") + self.assertEqual(list(primary.storage.keys()), list(backup.storage.keys()), + "Backup should mirror primary") + + self.assertGreater(len(ephemeral.storage), 0, "Ephemeral should have data") + ephemeral_keys = [key for namespace in ephemeral.storage.values() for key in namespace.keys()] + has_scorer_or_graph = any(key in ['scorer', 'network_graph'] for key in ephemeral_keys) + self.assertTrue(has_scorer_or_graph, "Ephemeral should contain scorer or network_graph data") + + setup_1.cleanup() + setup_2.cleanup() if __name__ == '__main__': unittest.main() diff --git a/src/builder.rs b/src/builder.rs index ff84505b4..40b17937d 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -19,8 +19,8 @@ use bitcoin::bip32::{ChildNumber, Xpriv}; use bitcoin::key::Secp256k1; use bitcoin::secp256k1::PublicKey; use bitcoin::{BlockHash, Network}; -use lightning::chain::{chainmonitor, BestBlock, Watch}; -use lightning::io::Cursor; +use bitcoin_payment_instructions::onion_message_resolver::LDKOnionMessageDNSSECHrnResolver; +use lightning::chain::{chainmonitor, BestBlock}; use lightning::ln::channelmanager::{self, ChainParameters, ChannelManagerReadArgs}; use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress}; use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler}; @@ -32,8 +32,9 @@ use lightning::routing::scoring::{ ProbabilisticScoringFeeParameters, }; use lightning::sign::{EntropySource, NodeSigner}; +use lightning::util::config::HTLCInterceptionFlags; use lightning::util::persist::{ - KVStoreSync, CHANNEL_MANAGER_PERSISTENCE_KEY, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + KVStore, CHANNEL_MANAGER_PERSISTENCE_KEY, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, }; use lightning::util::ser::ReadableArgs; @@ -51,14 +52,21 @@ use crate::connection::ConnectionManager; use crate::entropy::NodeEntropy; use crate::event::EventQueue; use crate::fee_estimator::OnchainFeeEstimator; +#[cfg(feature = "uniffi")] +use crate::ffi::FfiDynStore; use crate::gossip::GossipSource; use crate::io::sqlite_store::SqliteStore; +use crate::io::tier_store::TierStore; use crate::io::utils::{ - read_external_pathfinding_scores_from_cache, read_node_metrics, write_node_metrics, + read_event_queue, read_external_pathfinding_scores_from_cache, read_network_graph, + read_node_metrics, read_output_sweeper, read_payments, read_peer_info, read_pending_payments, + read_scorer, write_node_metrics, }; use crate::io::vss_store::VssStoreBuilder; use crate::io::{ self, PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + PENDING_PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PENDING_PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, }; use crate::liquidity::{ LSPS1ClientConfig, LSPS2ClientConfig, LSPS2ServiceConfig, LiquiditySourceBuilder, @@ -67,11 +75,12 @@ use crate::logger::{log_error, LdkLogger, LogLevel, LogWriter, Logger}; use crate::message_handler::NodeCustomMessageHandler; use crate::payment::asynchronous::om_mailbox::OnionMessageMailbox; use crate::peer_store::PeerStore; -use crate::runtime::Runtime; +use crate::runtime::{Runtime, RuntimeSpawner}; use crate::tx_broadcaster::TransactionBroadcaster; use crate::types::{ - ChainMonitor, ChannelManager, DynStore, DynStoreWrapper, GossipSync, Graph, KeysManager, - MessageRouter, OnionMessenger, PaymentStore, PeerManager, Persister, SyncAndAsyncKVStore, + AsyncPersister, ChainMonitor, ChannelManager, DynStore, DynStoreWrapper, GossipSync, Graph, + KeysManager, MessageRouter, OnionMessenger, PaymentStore, PeerManager, PendingPaymentStore, + Persister, SyncAndAsyncKVStore, }; use crate::wallet::persist::KVStoreWalletPersister; use crate::wallet::Wallet; @@ -144,6 +153,21 @@ impl std::fmt::Debug for LogWriterConfig { } } +#[derive(Default)] +struct TierStoreConfig { + ephemeral: Option>, + backup: Option>, +} + +impl std::fmt::Debug for TierStoreConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TierStoreConfig") + .field("ephemeral", &self.ephemeral.as_ref().map(|_| "Arc")) + .field("backup", &self.backup.as_ref().map(|_| "Arc")) + .finish() + } +} + /// An error encountered during building a [`Node`]. /// /// [`Node`]: crate::Node @@ -236,6 +260,7 @@ pub struct NodeBuilder { liquidity_source_config: Option, log_writer_config: Option, async_payments_role: Option, + tier_store_config: Option, runtime_handle: Option, pathfinding_scores_sync_config: Option, } @@ -253,6 +278,7 @@ impl NodeBuilder { let gossip_source_config = None; let liquidity_source_config = None; let log_writer_config = None; + let tier_store_config = None; let runtime_handle = None; let pathfinding_scores_sync_config = None; Self { @@ -261,6 +287,7 @@ impl NodeBuilder { gossip_source_config, liquidity_source_config, log_writer_config, + tier_store_config, runtime_handle, async_payments_role: None, pathfinding_scores_sync_config, @@ -538,6 +565,36 @@ impl NodeBuilder { Ok(self) } + /// Configures the backup store for local disaster recovery. + /// + /// When building with [`build_with_tier_store`], this store receives asynchronous copies + /// of all critical data written to the primary store. If the primary store becomes + /// unavailable, reads will fall back to this backup store. + /// + /// Backup writes are non-blocking and do not affect primary store operation performance. + /// + /// [`build_with_tier_store`]: Self::build_with_tier_store + pub fn set_tier_store_backup(&mut self, backup_store: Arc) -> &mut Self { + let tier_store_config = self.tier_store_config.get_or_insert(TierStoreConfig::default()); + tier_store_config.backup = Some(backup_store); + self + } + + /// Configures the ephemeral store for non-critical, frequently-accessed data. + /// + /// When building with [`build_with_tier_store`], this store is used for data like + /// the network graph and scorer data to reduce latency for reads. Data stored here + /// can be rebuilt if lost. + /// + /// If not set, non-critical data will be stored in the primary store. + /// + /// [`build_with_tier_store`]: Self::build_with_tier_store + pub fn set_tier_store_ephemeral(&mut self, ephemeral_store: Arc) -> &mut Self { + let tier_store_config = self.tier_store_config.get_or_insert(TierStoreConfig::default()); + tier_store_config.ephemeral = Some(ephemeral_store); + self + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self, node_entropy: NodeEntropy) -> Result { @@ -550,6 +607,7 @@ impl NodeBuilder { Some(io::sqlite_store::KV_TABLE_NAME.to_string()), ) .map_err(|_| BuildError::KVStoreSetupFailed)?; + self.build_with_store(node_entropy, kv_store) } @@ -562,6 +620,7 @@ impl NodeBuilder { fs::create_dir_all(storage_dir_path.clone()) .map_err(|_| BuildError::StoragePathAccessFailed)?; let kv_store = FilesystemStore::new(storage_dir_path); + self.build_with_store(node_entropy, kv_store) } @@ -648,6 +707,99 @@ impl NodeBuilder { self.build_with_store(node_entropy, vss_store) } + /// Builds a [`Node`] instance with tiered storage for managing data across multiple storage layers. + /// + /// This build method enables a three-tier storage architecture optimized for different data types + /// and access patterns: + /// + /// ### Storage Tiers + /// + /// - **Primary Store** (required): The authoritative store for critical channel state and payment data. + /// Typically a remote/cloud storage service for durability and accessibility across devices. + /// + /// - **Ephemeral Store** (optional): Local storage for non-critical, frequently-accessed data like + /// the network graph and scorer. Improves performance by reducing latency for data that can be + /// rebuilt if lost. Configure with [`set_tier_store_ephemeral`]. + /// + /// - **Backup Store** (optional): Local backup of critical data for disaster recovery scenarios. + /// Provides a safety net if the primary store becomes temporarily unavailable. Writes are + /// asynchronous to avoid blocking primary operations. Configure with [`set_tier_store_backup`]. + /// + /// ## Configuration + /// + /// Use the setter methods to configure optional stores: + /// - [`set_tier_store_ephemeral`] - Set local store for network graph and scorer + /// - [`set_tier_store_backup`] - Set local backup store for disaster recovery + /// + /// ## Example + /// + /// ```ignore + /// # use ldk_node::{Builder, Config}; + /// # use std::sync::Arc; + /// let config = Config::default(); + /// let mut builder = NodeBuilder::from_config(config); + /// + /// let primary = Arc::new(VssStore::new(...)); + /// let ephemeral = Arc::new(FilesystemStore::new(...)); + /// let backup = Arc::new(SqliteStore::new(...)); + /// + /// builder + /// .set_tier_store_ephemeral(ephemeral) + /// .set_tier_store_backup(backup); + /// + /// let node = builder.build_with_tier_store(primary)?; + /// # Ok::<(), ldk_node::BuildError>(()) + /// ``` + /// + /// [`set_tier_store_ephemeral`]: Self::set_tier_store_ephemeral + /// [`set_tier_store_backup`]: Self::set_tier_store_backup + #[cfg(not(feature = "uniffi"))] + pub fn build_with_tier_store( + &self, node_entropy: NodeEntropy, primary_store: Arc, + ) -> Result { + self.build_with_tier_store_internal(node_entropy, primary_store) + } + + fn build_with_tier_store_internal( + &self, node_entropy: NodeEntropy, primary_store: Arc, + ) -> Result { + let logger = setup_logger(&self.log_writer_config, &self.config)?; + let runtime = if let Some(handle) = self.runtime_handle.as_ref() { + Arc::new(Runtime::with_handle(handle.clone(), Arc::clone(&logger))) + } else { + Arc::new(Runtime::new(Arc::clone(&logger)).map_err(|e| { + log_error!(logger, "Failed to setup tokio runtime: {}", e); + BuildError::RuntimeSetupFailed + })?) + }; + + let ts_config = self.tier_store_config.as_ref(); + + let mut tier_store = + TierStore::new(primary_store, Arc::clone(&runtime), Arc::clone(&logger)); + + if let Some(config) = ts_config { + config.ephemeral.as_ref().map(|s| tier_store.set_ephemeral_store(Arc::clone(s))); + config.backup.as_ref().map(|s| tier_store.set_backup_store(Arc::clone(s))); + } + + let seed_bytes = node_entropy.to_seed_bytes(); + let config = Arc::new(self.config.clone()); + + build_with_store_internal( + config, + self.chain_data_source_config.as_ref(), + self.gossip_source_config.as_ref(), + self.liquidity_source_config.as_ref(), + self.pathfinding_scores_sync_config.as_ref(), + self.async_payments_role, + seed_bytes, + runtime, + logger, + Arc::new(DynStoreWrapper(tier_store)), + ) + } + /// Builds a [`Node`] instance according to the options previously configured. pub fn build_with_store( &self, node_entropy: NodeEntropy, kv_store: S, @@ -913,6 +1065,36 @@ impl ArcedNodeBuilder { self.inner.write().unwrap().set_async_payments_role(role).map(|_| ()) } + /// Configures the backup store for local disaster recovery. + /// + /// When building with [`build_with_tier_store`], this store receives asynchronous copies + /// of all critical data written to the primary store. If the primary store becomes + /// unavailable, reads will fall back to this backup store. + /// + /// Backup writes are non-blocking and do not affect primary store operation performance. + /// + /// [`build_with_tier_store`]: Self::build_with_tier_store + pub fn set_tier_store_backup(&self, backup_store: Arc) { + let wrapper = DynStoreWrapper((*backup_store).clone()); + let store: Arc = Arc::new(wrapper); + self.inner.write().unwrap().set_tier_store_backup(store); + } + + /// Configures the ephemeral store for non-critical, frequently-accessed data. + /// + /// When building with [`build_with_tier_store`], this store is used for data like + /// the network graph and scorer data to reduce latency for reads. Data stored here + /// can be rebuilt if lost. + /// + /// If not set, non-critical data will be stored in the primary store. + /// + /// [`build_with_tier_store`]: Self::build_with_tier_store + pub fn set_tier_store_ephemeral(&self, ephemeral_store: Arc) { + let wrapper = DynStoreWrapper((*ephemeral_store).clone()); + let store: Arc = Arc::new(wrapper); + self.inner.write().unwrap().set_tier_store_ephemeral(store); + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self, node_entropy: Arc) -> Result, BuildError> { @@ -1011,6 +1193,24 @@ impl ArcedNodeBuilder { .map(Arc::new) } + // pub fn build_with_tier_store( + // &self, node_entropy: Arc, primary_store: Arc, + // ) -> Result, BuildError> { + // self.inner.read().unwrap().build_with_tier_store(*node_entropy, primary_store).map(Arc::new) + // } + + pub fn build_with_tier_store( + &self, node_entropy: Arc, primary_store: Arc, + ) -> Result, BuildError> { + let wrapper = DynStoreWrapper((*primary_store).clone()); + let store: Arc = Arc::new(wrapper); + self.inner + .read() + .unwrap() + .build_with_tier_store_internal(*node_entropy, store) + .map(Arc::new) + } + /// Builds a [`Node`] instance according to the options previously configured. // Note that the generics here don't actually work for Uniffi, but we don't currently expose // this so its not needed. @@ -1049,8 +1249,22 @@ fn build_with_store_internal( } } + let tx_broadcaster = Arc::new(TransactionBroadcaster::new(Arc::clone(&logger))); + let fee_estimator = Arc::new(OnchainFeeEstimator::new()); + + let kv_store_ref = Arc::clone(&kv_store); + let logger_ref = Arc::clone(&logger); + let (payment_store_res, node_metris_res, pending_payment_store_res) = + runtime.block_on(async move { + tokio::join!( + read_payments(&*kv_store_ref, Arc::clone(&logger_ref)), + read_node_metrics(&*kv_store_ref, Arc::clone(&logger_ref)), + read_pending_payments(&*kv_store_ref, Arc::clone(&logger_ref)) + ) + }); + // Initialize the status fields. - let node_metrics = match read_node_metrics(Arc::clone(&kv_store), Arc::clone(&logger)) { + let node_metrics = match node_metris_res { Ok(metrics) => Arc::new(RwLock::new(metrics)), Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { @@ -1061,10 +1275,8 @@ fn build_with_store_internal( } }, }; - let tx_broadcaster = Arc::new(TransactionBroadcaster::new(Arc::clone(&logger))); - let fee_estimator = Arc::new(OnchainFeeEstimator::new()); - let payment_store = match io::utils::read_payments(Arc::clone(&kv_store), Arc::clone(&logger)) { + let payment_store = match payment_store_res { Ok(payments) => Arc::new(PaymentStore::new( payments, PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE.to_string(), @@ -1230,6 +1442,20 @@ fn build_with_store_internal( }, }; + let pending_payment_store = match pending_payment_store_res { + Ok(pending_payments) => Arc::new(PendingPaymentStore::new( + pending_payments, + PENDING_PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE.to_string(), + PENDING_PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE.to_string(), + Arc::clone(&kv_store), + Arc::clone(&logger), + )), + Err(e) => { + log_error!(logger, "Failed to read pending payment data from store: {}", e); + return Err(BuildError::ReadFailed); + }, + }; + let wallet = Arc::new(Wallet::new( bdk_wallet, wallet_persister, @@ -1238,6 +1464,7 @@ fn build_with_store_internal( Arc::clone(&payment_store), Arc::clone(&config), Arc::clone(&logger), + Arc::clone(&pending_payment_store), )); // Initialize the KeysManager @@ -1256,8 +1483,9 @@ fn build_with_store_internal( )); let peer_storage_key = keys_manager.get_peer_storage_key(); - let persister = Arc::new(Persister::new( + let monitor_reader = Arc::new(AsyncPersister::new( Arc::clone(&kv_store), + RuntimeSpawner::new(Arc::clone(&runtime)), Arc::clone(&logger), PERSISTER_MAX_PENDING_UPDATES, Arc::clone(&keys_manager), @@ -1266,8 +1494,18 @@ fn build_with_store_internal( Arc::clone(&fee_estimator), )); + // Read ChannelMonitors and the NetworkGraph + let kv_store_ref = Arc::clone(&kv_store); + let logger_ref = Arc::clone(&logger); + let (monitor_read_res, network_graph_res) = runtime.block_on(async move { + tokio::join!( + monitor_reader.read_all_channel_monitors_with_updates_parallel(), + read_network_graph(&*kv_store_ref, logger_ref), + ) + }); + // Read ChannelMonitor state from store - let channel_monitors = match persister.read_all_channel_monitors_with_updates() { + let channel_monitors = match monitor_read_res { Ok(monitors) => monitors, Err(e) => { if e.kind() == lightning::io::ErrorKind::NotFound { @@ -1279,6 +1517,16 @@ fn build_with_store_internal( }, }; + let persister = Arc::new(Persister::new( + Arc::clone(&kv_store), + Arc::clone(&logger), + PERSISTER_MAX_PENDING_UPDATES, + Arc::clone(&keys_manager), + Arc::clone(&keys_manager), + Arc::clone(&tx_broadcaster), + Arc::clone(&fee_estimator), + )); + // Initialize the ChainMonitor let chain_monitor: Arc = Arc::new(chainmonitor::ChainMonitor::new( Some(Arc::clone(&chain_source)), @@ -1291,24 +1539,54 @@ fn build_with_store_internal( )); // Initialize the network graph, scorer, and router - let network_graph = - match io::utils::read_network_graph(Arc::clone(&kv_store), Arc::clone(&logger)) { - Ok(graph) => Arc::new(graph), - Err(e) => { - if e.kind() == std::io::ErrorKind::NotFound { - Arc::new(Graph::new(config.network.into(), Arc::clone(&logger))) - } else { - log_error!(logger, "Failed to read network graph from store: {}", e); - return Err(BuildError::ReadFailed); - } - }, - }; + let network_graph = match network_graph_res { + Ok(graph) => Arc::new(graph), + Err(e) => { + if e.kind() == std::io::ErrorKind::NotFound { + Arc::new(Graph::new(config.network.into(), Arc::clone(&logger))) + } else { + log_error!(logger, "Failed to read network graph from store: {}", e); + return Err(BuildError::ReadFailed); + } + }, + }; - let local_scorer = match io::utils::read_scorer( - Arc::clone(&kv_store), - Arc::clone(&network_graph), - Arc::clone(&logger), - ) { + // Read various smaller LDK and ldk-node objects from the store + let kv_store_ref = Arc::clone(&kv_store); + let logger_ref = Arc::clone(&logger); + let network_graph_ref = Arc::clone(&network_graph); + let output_sweeper_future = read_output_sweeper( + Arc::clone(&tx_broadcaster), + Arc::clone(&fee_estimator), + Arc::clone(&chain_source), + Arc::clone(&keys_manager), + Arc::clone(&kv_store_ref), + Arc::clone(&logger_ref), + ); + let ( + scorer_res, + external_scores_res, + channel_manager_bytes_res, + sweeper_bytes_res, + event_queue_res, + peer_info_res, + ) = runtime.block_on(async move { + tokio::join!( + read_scorer(&*kv_store_ref, network_graph_ref, Arc::clone(&logger_ref)), + read_external_pathfinding_scores_from_cache(&*kv_store_ref, Arc::clone(&logger_ref)), + KVStore::read( + &*kv_store_ref, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ), + output_sweeper_future, + read_event_queue(Arc::clone(&kv_store_ref), Arc::clone(&logger_ref)), + read_peer_info(Arc::clone(&kv_store_ref), Arc::clone(&logger_ref)), + ) + }); + + let local_scorer = match scorer_res { Ok(scorer) => scorer, Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { @@ -1324,7 +1602,7 @@ fn build_with_store_internal( let scorer = Arc::new(Mutex::new(CombinedScorer::new(local_scorer))); // Restore external pathfinding scores from cache if possible. - match read_external_pathfinding_scores_from_cache(Arc::clone(&kv_store), Arc::clone(&logger)) { + match external_scores_res { Ok(external_scores) => { scorer.lock().unwrap().merge(external_scores, cur_time); log_trace!(logger, "External scores from cache merged successfully"); @@ -1349,11 +1627,11 @@ fn build_with_store_internal( let mut user_config = default_user_config(&config); if liquidity_source_config.and_then(|lsc| lsc.lsps2_service.as_ref()).is_some() { - // If we act as an LSPS2 service, we need to to be able to intercept HTLCs and forward the + // If we act as an LSPS2 service, we need to be able to intercept HTLCs and forward the // information to the service handler. - user_config.accept_intercept_htlcs = true; + user_config.htlc_interception_flags = HTLCInterceptionFlags::ToInterceptSCIDs.into(); - // If we act as an LSPS2 service, we allow forwarding to unnannounced channels. + // If we act as an LSPS2 service, we allow forwarding to unannounced channels. user_config.accept_forwards_to_priv_channels = true; // If we act as an LSPS2 service, set the HTLC-value-in-flight to 100% of the channel value @@ -1377,13 +1655,7 @@ fn build_with_store_internal( // Initialize the ChannelManager let channel_manager = { - if let Ok(res) = KVStoreSync::read( - &*kv_store, - CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_KEY, - ) { - let mut reader = Cursor::new(res); + if let Ok(reader) = channel_manager_bytes_res { let channel_monitor_references = channel_monitors.iter().map(|(_, chanmon)| chanmon).collect(); let read_args = ChannelManagerReadArgs::new( @@ -1400,7 +1672,7 @@ fn build_with_store_internal( channel_monitor_references, ); let (_hash, channel_manager) = - <(BlockHash, ChannelManager)>::read(&mut reader, read_args).map_err(|e| { + <(BlockHash, ChannelManager)>::read(&mut &*reader, read_args).map_err(|e| { log_error!(logger, "Failed to read channel manager from store: {}", e); BuildError::ReadFailed })?; @@ -1433,12 +1705,14 @@ fn build_with_store_internal( // Give ChannelMonitors to ChainMonitor for (_blockhash, channel_monitor) in channel_monitors.into_iter() { let channel_id = channel_monitor.channel_id(); - chain_monitor.watch_channel(channel_id, channel_monitor).map_err(|e| { - log_error!(logger, "Failed to watch channel monitor: {:?}", e); + chain_monitor.load_existing_monitor(channel_id, channel_monitor).map_err(|e| { + log_error!(logger, "Failed to load channel monitor: {:?}", e); BuildError::InvalidChannelMonitor })?; } + let hrn_resolver = Arc::new(LDKOnionMessageDNSSECHrnResolver::new(Arc::clone(&network_graph))); + // Initialize the PeerManager let onion_messenger: Arc = if let Some(AsyncPaymentsRole::Server) = async_payments_role { @@ -1450,7 +1724,7 @@ fn build_with_store_internal( message_router, Arc::clone(&channel_manager), Arc::clone(&channel_manager), - IgnoringMessageHandler {}, + Arc::clone(&hrn_resolver), IgnoringMessageHandler {}, )) } else { @@ -1462,7 +1736,7 @@ fn build_with_store_internal( message_router, Arc::clone(&channel_manager), Arc::clone(&channel_manager), - IgnoringMessageHandler {}, + Arc::clone(&hrn_resolver), IgnoringMessageHandler {}, )) }; @@ -1474,22 +1748,22 @@ fn build_with_store_internal( let gossip_source = match gossip_source_config { GossipSourceConfig::P2PNetwork => { - let p2p_source = - Arc::new(GossipSource::new_p2p(Arc::clone(&network_graph), Arc::clone(&logger))); + let p2p_source = Arc::new(GossipSource::new_p2p( + Arc::clone(&network_graph), + Arc::clone(&chain_source), + Arc::clone(&runtime), + Arc::clone(&logger), + )); // Reset the RGS sync timestamp in case we somehow switch gossip sources { let mut locked_node_metrics = node_metrics.write().unwrap(); locked_node_metrics.latest_rgs_snapshot_timestamp = None; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&kv_store), - Arc::clone(&logger), - ) - .map_err(|e| { - log_error!(logger, "Failed writing to store: {}", e); - BuildError::WriteFailed - })?; + write_node_metrics(&*locked_node_metrics, &*kv_store, Arc::clone(&logger)) + .map_err(|e| { + log_error!(logger, "Failed writing to store: {}", e); + BuildError::WriteFailed + })?; } p2p_source }, @@ -1594,25 +1868,19 @@ fn build_with_store_internal( Arc::clone(&keys_manager), )); - liquidity_source.as_ref().map(|l| l.set_peer_manager(Arc::clone(&peer_manager))); + let peer_manager_clone = Arc::downgrade(&peer_manager); + hrn_resolver.register_post_queue_action(Box::new(move || { + if let Some(upgraded_pointer) = peer_manager_clone.upgrade() { + upgraded_pointer.process_events(); + } + })); - gossip_source.set_gossip_verifier( - Arc::clone(&chain_source), - Arc::clone(&peer_manager), - Arc::clone(&runtime), - ); + liquidity_source.as_ref().map(|l| l.set_peer_manager(Arc::downgrade(&peer_manager))); let connection_manager = Arc::new(ConnectionManager::new(Arc::clone(&peer_manager), Arc::clone(&logger))); - let output_sweeper = match io::utils::read_output_sweeper( - Arc::clone(&tx_broadcaster), - Arc::clone(&fee_estimator), - Arc::clone(&chain_source), - Arc::clone(&keys_manager), - Arc::clone(&kv_store), - Arc::clone(&logger), - ) { + let output_sweeper = match sweeper_bytes_res { Ok(output_sweeper) => Arc::new(output_sweeper), Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { @@ -1633,8 +1901,7 @@ fn build_with_store_internal( }, }; - let event_queue = match io::utils::read_event_queue(Arc::clone(&kv_store), Arc::clone(&logger)) - { + let event_queue = match event_queue_res { Ok(event_queue) => Arc::new(event_queue), Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { @@ -1646,7 +1913,7 @@ fn build_with_store_internal( }, }; - let peer_store = match io::utils::read_peer_info(Arc::clone(&kv_store), Arc::clone(&logger)) { + let peer_store = match peer_info_res { Ok(peer_store) => Arc::new(peer_store), Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { @@ -1670,6 +1937,18 @@ fn build_with_store_internal( let pathfinding_scores_sync_url = pathfinding_scores_sync_config.map(|c| c.url.clone()); + #[cfg(cycle_tests)] + let mut _leak_checker = crate::LeakChecker(Vec::new()); + #[cfg(cycle_tests)] + { + use std::any::Any; + use std::sync::Weak; + + _leak_checker.0.push(Arc::downgrade(&channel_manager) as Weak); + _leak_checker.0.push(Arc::downgrade(&network_graph) as Weak); + _leak_checker.0.push(Arc::downgrade(&wallet) as Weak); + } + Ok(Node { runtime, stop_sender, @@ -1701,6 +1980,9 @@ fn build_with_store_internal( node_metrics, om_mailbox, async_payments_role, + hrn_resolver, + #[cfg(cycle_tests)] + _leak_checker, }) } diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index 0c3b644ca..8a7167022 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -29,16 +29,17 @@ use lightning_block_sync::{ }; use serde::Serialize; -use super::{periodically_archive_fully_resolved_monitors, WalletSyncStatus}; +use super::WalletSyncStatus; use crate::config::{ - BitcoindRestClientConfig, Config, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, + BitcoindRestClientConfig, Config, DEFAULT_FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, + DEFAULT_TX_BROADCAST_TIMEOUT_SECS, }; use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, ConfirmationTarget, OnchainFeeEstimator, }; use crate::io::utils::write_node_metrics; -use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::logger::{log_bytes, log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; @@ -205,14 +206,10 @@ impl BitcoindChainSource { unix_time_secs_opt; locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - ) - .unwrap_or_else(|e| { - log_error!(self.logger, "Failed to persist node metrics: {}", e); - }); + write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger) + .unwrap_or_else(|e| { + log_error!(self.logger, "Failed to persist node metrics: {}", e); + }); } break; }, @@ -368,7 +365,7 @@ impl BitcoindChainSource { }; if let Some(mut sync_receiver) = receiver_res { - log_info!(self.logger, "Sync in progress, skipping."); + log_debug!(self.logger, "Sync in progress, skipping."); return sync_receiver.recv().await.map_err(|e| { debug_assert!(false, "Failed to receive wallet polling result: {:?}", e); log_error!(self.logger, "Failed to receive wallet polling result: {:?}", e); @@ -418,14 +415,6 @@ impl BitcoindChainSource { now.elapsed().unwrap().as_millis() ); *self.latest_chain_tip.write().unwrap() = Some(tip); - - periodically_archive_fully_resolved_monitors( - Arc::clone(&channel_manager), - chain_monitor, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - Arc::clone(&self.node_metrics), - )?; }, Ok(_) => {}, Err(e) => { @@ -469,11 +458,7 @@ impl BitcoindChainSource { locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - )?; + write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; Ok(()) } @@ -482,7 +467,7 @@ impl BitcoindChainSource { macro_rules! get_fee_rate_update { ($estimation_fut:expr) => {{ let update_res = tokio::time::timeout( - Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), + Duration::from_secs(DEFAULT_FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), $estimation_fut, ) .await @@ -574,7 +559,7 @@ impl BitcoindChainSource { if self.fee_estimator.set_fee_rate_cache(new_fee_rate_cache) { // We only log if the values changed, as it might be very spammy otherwise. - log_info!( + log_debug!( self.logger, "Fee rate cache update finished in {}ms.", now.elapsed().as_millis() @@ -586,11 +571,7 @@ impl BitcoindChainSource { { let mut locked_node_metrics = self.node_metrics.write().unwrap(); locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - )?; + write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; } Ok(()) @@ -604,7 +585,7 @@ impl BitcoindChainSource { for tx in &package { let txid = tx.compute_txid(); let timeout_fut = tokio::time::timeout( - Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), + Duration::from_secs(DEFAULT_TX_BROADCAST_TIMEOUT_SECS), self.api_client.broadcast_transaction(tx), ); match timeout_fut.await { @@ -646,13 +627,6 @@ pub(crate) enum UtxoSourceClient { Rest(Arc), } -impl std::ops::Deref for UtxoSourceClient { - type Target = Self; - fn deref(&self) -> &Self { - self - } -} - impl BlockSource for UtxoSourceClient { fn get_header<'a>( &'a self, header_hash: &'a BlockHash, height_hint: Option, diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index 9e05dfaee..7b08c3845 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -23,25 +23,21 @@ use lightning::chain::{Confirm, Filter, WatchedOutput}; use lightning::util::ser::Writeable; use lightning_transaction_sync::ElectrumSyncClient; -use super::{periodically_archive_fully_resolved_monitors, WalletSyncStatus}; -use crate::config::{ - Config, ElectrumSyncConfig, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, - FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, -}; +use super::WalletSyncStatus; +use crate::config::{Config, ElectrumSyncConfig, BDK_CLIENT_STOP_GAP}; use crate::error::Error; use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, ConfirmationTarget, OnchainFeeEstimator, }; use crate::io::utils::write_node_metrics; -use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::logger::{log_bytes, log_debug, log_error, log_trace, LdkLogger, Logger}; use crate::runtime::Runtime; use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::NodeMetrics; const BDK_ELECTRUM_CLIENT_BATCH_SIZE: usize = 5; const ELECTRUM_CLIENT_NUM_RETRIES: u8 = 3; -const ELECTRUM_CLIENT_TIMEOUT_SECS: u8 = 10; pub(super) struct ElectrumChainSource { server_url: String, @@ -82,6 +78,7 @@ impl ElectrumChainSource { pub(super) fn start(&self, runtime: Arc) -> Result<(), Error> { self.electrum_runtime_status.write().unwrap().start( self.server_url.clone(), + self.sync_config.clone(), Arc::clone(&runtime), Arc::clone(&self.config), Arc::clone(&self.logger), @@ -100,7 +97,7 @@ impl ElectrumChainSource { status_lock.register_or_subscribe_pending_sync() }; if let Some(mut sync_receiver) = receiver_res { - log_info!(self.logger, "Sync in progress, skipping."); + log_debug!(self.logger, "Sync in progress, skipping."); return sync_receiver.recv().await.map_err(|e| { debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); @@ -135,7 +132,7 @@ impl ElectrumChainSource { |update_res: Result, now: Instant| match update_res { Ok(update) => match onchain_wallet.apply_update(update) { Ok(()) => { - log_info!( + log_debug!( self.logger, "{} of on-chain wallet finished in {}ms.", if incremental_sync { "Incremental sync" } else { "Sync" }, @@ -149,8 +146,8 @@ impl ElectrumChainSource { unix_time_secs_opt; write_node_metrics( &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), + &*self.kv_store, + &*self.logger, )?; } Ok(()) @@ -191,7 +188,7 @@ impl ElectrumChainSource { status_lock.register_or_subscribe_pending_sync() }; if let Some(mut sync_receiver) = receiver_res { - log_info!(self.logger, "Sync in progress, skipping."); + log_debug!(self.logger, "Sync in progress, skipping."); return sync_receiver.recv().await.map_err(|e| { debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); @@ -239,20 +236,8 @@ impl ElectrumChainSource { { let mut locked_node_metrics = self.node_metrics.write().unwrap(); locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - )?; + write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; } - - periodically_archive_fully_resolved_monitors( - Arc::clone(&channel_manager), - Arc::clone(&chain_monitor), - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - Arc::clone(&self.node_metrics), - )?; } res @@ -273,7 +258,7 @@ impl ElectrumChainSource { let new_fee_rate_cache = electrum_client.get_fee_rate_cache_update().await?; self.fee_estimator.set_fee_rate_cache(new_fee_rate_cache); - log_info!( + log_debug!( self.logger, "Fee rate cache update finished in {}ms.", now.elapsed().as_millis() @@ -284,11 +269,7 @@ impl ElectrumChainSource { { let mut locked_node_metrics = self.node_metrics.write().unwrap(); locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - )?; + write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; } Ok(()) @@ -334,13 +315,14 @@ impl ElectrumRuntimeStatus { } pub(super) fn start( - &mut self, server_url: String, runtime: Arc, config: Arc, - logger: Arc, + &mut self, server_url: String, sync_config: ElectrumSyncConfig, runtime: Arc, + config: Arc, logger: Arc, ) -> Result<(), Error> { match self { Self::Stopped { pending_registered_txs, pending_registered_outputs } => { let client = Arc::new(ElectrumRuntimeClient::new( - server_url.clone(), + server_url, + sync_config, runtime, config, logger, @@ -396,6 +378,7 @@ impl ElectrumRuntimeStatus { struct ElectrumRuntimeClient { electrum_client: Arc, + sync_config: ElectrumSyncConfig, bdk_electrum_client: Arc>>, tx_sync: Arc>>, runtime: Arc, @@ -405,11 +388,12 @@ struct ElectrumRuntimeClient { impl ElectrumRuntimeClient { fn new( - server_url: String, runtime: Arc, config: Arc, logger: Arc, + server_url: String, sync_config: ElectrumSyncConfig, runtime: Arc, + config: Arc, logger: Arc, ) -> Result { let electrum_config = ElectrumConfigBuilder::new() .retry(ELECTRUM_CLIENT_NUM_RETRIES) - .timeout(Some(ELECTRUM_CLIENT_TIMEOUT_SECS)) + .timeout(Some(sync_config.timeouts_config.per_request_timeout_secs)) .build(); let electrum_client = Arc::new( @@ -425,7 +409,15 @@ impl ElectrumRuntimeClient { Error::ConnectionFailed })?, ); - Ok(Self { electrum_client, bdk_electrum_client, tx_sync, runtime, config, logger }) + Ok(Self { + electrum_client, + sync_config, + bdk_electrum_client, + tx_sync, + runtime, + config, + logger, + }) } async fn sync_confirmables( @@ -435,8 +427,12 @@ impl ElectrumRuntimeClient { let tx_sync = Arc::clone(&self.tx_sync); let spawn_fut = self.runtime.spawn_blocking(move || tx_sync.sync(confirmables)); - let timeout_fut = - tokio::time::timeout(Duration::from_secs(LDK_WALLET_SYNC_TIMEOUT_SECS), spawn_fut); + let timeout_fut = tokio::time::timeout( + Duration::from_secs( + self.sync_config.timeouts_config.lightning_wallet_sync_timeout_secs, + ), + spawn_fut, + ); let res = timeout_fut .await @@ -453,7 +449,7 @@ impl ElectrumRuntimeClient { Error::TxSyncFailed })?; - log_info!( + log_debug!( self.logger, "Sync of Lightning wallet finished in {}ms.", now.elapsed().as_millis() @@ -477,8 +473,10 @@ impl ElectrumRuntimeClient { true, ) }); - let wallet_sync_timeout_fut = - tokio::time::timeout(Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), spawn_fut); + let wallet_sync_timeout_fut = tokio::time::timeout( + Duration::from_secs(self.sync_config.timeouts_config.onchain_wallet_sync_timeout_secs), + spawn_fut, + ); wallet_sync_timeout_fut .await @@ -506,8 +504,10 @@ impl ElectrumRuntimeClient { let spawn_fut = self.runtime.spawn_blocking(move || { bdk_electrum_client.sync(request, BDK_ELECTRUM_CLIENT_BATCH_SIZE, true) }); - let wallet_sync_timeout_fut = - tokio::time::timeout(Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), spawn_fut); + let wallet_sync_timeout_fut = tokio::time::timeout( + Duration::from_secs(self.sync_config.timeouts_config.onchain_wallet_sync_timeout_secs), + spawn_fut, + ); wallet_sync_timeout_fut .await @@ -533,8 +533,10 @@ impl ElectrumRuntimeClient { let spawn_fut = self.runtime.spawn_blocking(move || electrum_client.transaction_broadcast(&tx)); - let timeout_fut = - tokio::time::timeout(Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), spawn_fut); + let timeout_fut = tokio::time::timeout( + Duration::from_secs(self.sync_config.timeouts_config.tx_broadcast_timeout_secs), + spawn_fut, + ); match timeout_fut.await { Ok(res) => match res { @@ -581,7 +583,9 @@ impl ElectrumRuntimeClient { let spawn_fut = self.runtime.spawn_blocking(move || electrum_client.batch_call(&batch)); let timeout_fut = tokio::time::timeout( - Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), + Duration::from_secs( + self.sync_config.timeouts_config.fee_rate_cache_update_timeout_secs, + ), spawn_fut, ); diff --git a/src/chain/esplora.rs b/src/chain/esplora.rs index f6f313955..245db72f6 100644 --- a/src/chain/esplora.rs +++ b/src/chain/esplora.rs @@ -16,18 +16,14 @@ use lightning::chain::{Confirm, Filter, WatchedOutput}; use lightning::util::ser::Writeable; use lightning_transaction_sync::EsploraSyncClient; -use super::{periodically_archive_fully_resolved_monitors, WalletSyncStatus}; -use crate::config::{ - Config, EsploraSyncConfig, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, - BDK_WALLET_SYNC_TIMEOUT_SECS, DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS, - FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, -}; +use super::WalletSyncStatus; +use crate::config::{Config, EsploraSyncConfig, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP}; use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, OnchainFeeEstimator, }; use crate::io::utils::write_node_metrics; -use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::logger::{log_bytes, log_debug, log_error, log_trace, LdkLogger, Logger}; use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; @@ -51,7 +47,8 @@ impl EsploraChainSource { logger: Arc, node_metrics: Arc>, ) -> Self { let mut client_builder = esplora_client::Builder::new(&server_url); - client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); + client_builder = + client_builder.timeout(sync_config.timeouts_config.per_request_timeout_secs as u64); for (header_name, header_value) in &headers { client_builder = client_builder.header(header_name, header_value); @@ -85,7 +82,7 @@ impl EsploraChainSource { status_lock.register_or_subscribe_pending_sync() }; if let Some(mut sync_receiver) = receiver_res { - log_info!(self.logger, "Sync in progress, skipping."); + log_debug!(self.logger, "Sync in progress, skipping."); return sync_receiver.recv().await.map_err(|e| { debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); @@ -113,7 +110,7 @@ impl EsploraChainSource { Ok(res) => match res { Ok(update) => match onchain_wallet.apply_update(update) { Ok(()) => { - log_info!( + log_debug!( self.logger, "{} of on-chain wallet finished in {}ms.", if incremental_sync { "Incremental sync" } else { "Sync" }, @@ -128,8 +125,8 @@ impl EsploraChainSource { locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; write_node_metrics( &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger) + &*self.kv_store, + &*self.logger )?; } Ok(()) @@ -183,14 +180,18 @@ impl EsploraChainSource { if incremental_sync { let sync_request = onchain_wallet.get_incremental_sync_request(); let wallet_sync_timeout_fut = tokio::time::timeout( - Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), + Duration::from_secs( + self.sync_config.timeouts_config.onchain_wallet_sync_timeout_secs, + ), self.esplora_client.sync(sync_request, BDK_CLIENT_CONCURRENCY), ); get_and_apply_wallet_update!(wallet_sync_timeout_fut) } else { let full_scan_request = onchain_wallet.get_full_scan_request(); let wallet_sync_timeout_fut = tokio::time::timeout( - Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), + Duration::from_secs( + self.sync_config.timeouts_config.onchain_wallet_sync_timeout_secs, + ), self.esplora_client.full_scan( full_scan_request, BDK_CLIENT_STOP_GAP, @@ -210,7 +211,7 @@ impl EsploraChainSource { status_lock.register_or_subscribe_pending_sync() }; if let Some(mut sync_receiver) = receiver_res { - log_info!(self.logger, "Sync in progress, skipping."); + log_debug!(self.logger, "Sync in progress, skipping."); return sync_receiver.recv().await.map_err(|e| { debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); @@ -240,14 +241,16 @@ impl EsploraChainSource { ]; let timeout_fut = tokio::time::timeout( - Duration::from_secs(LDK_WALLET_SYNC_TIMEOUT_SECS), + Duration::from_secs( + self.sync_config.timeouts_config.lightning_wallet_sync_timeout_secs as u64, + ), self.tx_sync.sync(confirmables), ); let now = Instant::now(); match timeout_fut.await { Ok(res) => match res { Ok(()) => { - log_info!( + log_debug!( self.logger, "Sync of Lightning wallet finished in {}ms.", now.elapsed().as_millis() @@ -259,20 +262,8 @@ impl EsploraChainSource { let mut locked_node_metrics = self.node_metrics.write().unwrap(); locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - )?; + write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; } - - periodically_archive_fully_resolved_monitors( - Arc::clone(&channel_manager), - Arc::clone(&chain_monitor), - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - Arc::clone(&self.node_metrics), - )?; Ok(()) }, Err(e) => { @@ -290,7 +281,9 @@ impl EsploraChainSource { pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { let now = Instant::now(); let estimates = tokio::time::timeout( - Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), + Duration::from_secs( + self.sync_config.timeouts_config.fee_rate_cache_update_timeout_secs, + ), self.esplora_client.get_fee_estimates(), ) .await @@ -343,7 +336,7 @@ impl EsploraChainSource { self.fee_estimator.set_fee_rate_cache(new_fee_rate_cache); - log_info!( + log_debug!( self.logger, "Fee rate cache update finished in {}ms.", now.elapsed().as_millis() @@ -353,11 +346,7 @@ impl EsploraChainSource { { let mut locked_node_metrics = self.node_metrics.write().unwrap(); locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&self.kv_store), - Arc::clone(&self.logger), - )?; + write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; } Ok(()) @@ -367,7 +356,7 @@ impl EsploraChainSource { for tx in &package { let txid = tx.compute_txid(); let timeout_fut = tokio::time::timeout( - Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), + Duration::from_secs(self.sync_config.timeouts_config.tx_broadcast_timeout_secs), self.esplora_client.broadcast(tx), ); match timeout_fut.await { diff --git a/src/chain/mod.rs b/src/chain/mod.rs index a73ce7418..afd502363 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -21,10 +21,9 @@ use crate::chain::electrum::ElectrumChainSource; use crate::chain::esplora::EsploraChainSource; use crate::config::{ BackgroundSyncConfig, BitcoindRestClientConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, - RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, WALLET_SYNC_INTERVAL_MINIMUM_SECS, + WALLET_SYNC_INTERVAL_MINIMUM_SECS, }; use crate::fee_estimator::OnchainFeeEstimator; -use crate::io::utils::write_node_metrics; use crate::logger::{log_debug, log_info, log_trace, LdkLogger, Logger}; use crate::runtime::Runtime; use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; @@ -486,22 +485,3 @@ impl Filter for ChainSource { } } } - -fn periodically_archive_fully_resolved_monitors( - channel_manager: Arc, chain_monitor: Arc, - kv_store: Arc, logger: Arc, node_metrics: Arc>, -) -> Result<(), Error> { - let mut locked_node_metrics = node_metrics.write().unwrap(); - let cur_height = channel_manager.current_best_block().height; - let should_archive = locked_node_metrics - .latest_channel_monitor_archival_height - .as_ref() - .map_or(true, |h| cur_height >= h + RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL); - - if should_archive { - chain_monitor.archive_fully_resolved_channel_monitors(); - locked_node_metrics.latest_channel_monitor_archival_height = Some(cur_height); - write_node_metrics(&*locked_node_metrics, kv_store, logger)?; - } - Ok(()) -} diff --git a/src/config.rs b/src/config.rs index 510bcc875..103b74657 100644 --- a/src/config.rs +++ b/src/config.rs @@ -29,6 +29,21 @@ const DEFAULT_FEE_RATE_CACHE_UPDATE_INTERVAL_SECS: u64 = 60 * 10; const DEFAULT_PROBING_LIQUIDITY_LIMIT_MULTIPLIER: u64 = 3; const DEFAULT_ANCHOR_PER_CHANNEL_RESERVE_SATS: u64 = 25_000; +// The default timeout after which we abort a wallet syncing operation. +const DEFAULT_BDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 60; + +// The default timeout after which we abort a wallet syncing operation. +const DEFAULT_LDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 30; + +// The default timeout after which we abort a fee rate cache update operation. +pub(crate) const DEFAULT_FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS: u64 = 10; + +// The default timeout after which we abort a transaction broadcast operation. +pub(crate) const DEFAULT_TX_BROADCAST_TIMEOUT_SECS: u64 = 10; + +// The default {Esplora,Electrum} client timeout we're using. +const DEFAULT_PER_REQUEST_TIMEOUT_SECS: u8 = 10; + /// The default log level. pub const DEFAULT_LOG_LEVEL: LogLevel = LogLevel::Debug; @@ -41,9 +56,6 @@ pub const DEFAULT_STORAGE_DIR_PATH: &str = "/tmp/ldk_node"; // The default Esplora server we're using. pub(crate) const DEFAULT_ESPLORA_SERVER_URL: &str = "https://blockstream.info/api"; -// The default Esplora client timeout we're using. -pub(crate) const DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS: u64 = 10; - // The 'stop gap' parameter used by BDK's wallet sync. This seems to configure the threshold // number of derivation indexes after which BDK stops looking for new scripts belonging to the wallet. pub(crate) const BDK_CLIENT_STOP_GAP: usize = 20; @@ -54,9 +66,6 @@ pub(crate) const BDK_CLIENT_CONCURRENCY: usize = 4; // The timeout after which we abandon retrying failed payments. pub(crate) const LDK_PAYMENT_RETRY_TIMEOUT: Duration = Duration::from_secs(10); -// The interval (in block height) after which we retry archiving fully resolved channel monitors. -pub(crate) const RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL: u32 = 6; - // The time in-between peer reconnection attempts. pub(crate) const PEER_RECONNECTION_INTERVAL: Duration = Duration::from_secs(60); @@ -72,23 +81,15 @@ pub(crate) const NODE_ANN_BCAST_INTERVAL: Duration = Duration::from_secs(60 * 60 // The lower limit which we apply to any configured wallet sync intervals. pub(crate) const WALLET_SYNC_INTERVAL_MINIMUM_SECS: u64 = 10; -// The timeout after which we abort a wallet syncing operation. -pub(crate) const BDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 20; - -// The timeout after which we abort a wallet syncing operation. -pub(crate) const LDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 10; - // The timeout after which we give up waiting on LDK's event handler to exit on shutdown. pub(crate) const LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS: u64 = 30; // The timeout after which we give up waiting on a background task to exit on shutdown. pub(crate) const BACKGROUND_TASK_SHUTDOWN_TIMEOUT_SECS: u64 = 5; -// The timeout after which we abort a fee rate cache update operation. -pub(crate) const FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS: u64 = 5; - -// The timeout after which we abort a transaction broadcast operation. -pub(crate) const TX_BROADCAST_TIMEOUT_SECS: u64 = 5; +// The maximum encoded size of an RGS snapshot we'll accept. +// In practice the maximum we see is around 4MiB. +pub(crate) const RGS_SNAPSHOT_MAX_SIZE: usize = 15 * 1024 * 1024; // The timeout after which we abort a RGS sync operation. pub(crate) const RGS_SYNC_TIMEOUT_SECS: u64 = 5; @@ -96,9 +97,16 @@ pub(crate) const RGS_SYNC_TIMEOUT_SECS: u64 = 5; /// The length in bytes of our wallets' keys seed. pub const WALLET_KEYS_SEED_LEN: usize = 64; +// The maximum encoded size of external scores we'll accept. +// In practice we see scores files in the 5MiB range. +pub(crate) const EXTERNAL_PATHFINDING_SCORES_MAX_SIZE: usize = 20 * 1024 * 1024; + // The timeout after which we abort a external scores sync operation. pub(crate) const EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS: u64 = 5; +// The timeout after which we abort a parsing/looking up an HRN resolution. +pub(crate) const HRN_RESOLUTION_TIMEOUT_SECS: u64 = 5; + #[derive(Debug, Clone)] /// Represents the configuration of an [`Node`] instance. /// @@ -373,6 +381,43 @@ impl Default for BackgroundSyncConfig { } } +/// Timeout-related parameters for syncing the Lightning and on-chain wallets. +/// +/// ### Defaults +/// +/// | Parameter | Value | +/// |----------------------------------------|--------------------| +/// | `onchain_wallet_sync_timeout_secs` | 60 | +/// | `lightning_wallet_sync_timeout_secs` | 30 | +/// | `fee_rate_cache_update_timeout_secs` | 10 | +/// | `tx_broadcast_timeout_secs` | 10 | +/// | `per_request_timeout_secs` | 10 | +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct SyncTimeoutsConfig { + /// The timeout after which we abort syncing the onchain wallet. + pub onchain_wallet_sync_timeout_secs: u64, + /// The timeout after which we abort syncing the LDK wallet. + pub lightning_wallet_sync_timeout_secs: u64, + /// The timeout after which we abort updating the fee rate cache. + pub fee_rate_cache_update_timeout_secs: u64, + /// The timeout after which we abort broadcasting a transaction. + pub tx_broadcast_timeout_secs: u64, + /// The per-request timeout after which we abort a single Electrum or Esplora API request. + pub per_request_timeout_secs: u8, +} + +impl Default for SyncTimeoutsConfig { + fn default() -> Self { + Self { + onchain_wallet_sync_timeout_secs: DEFAULT_BDK_WALLET_SYNC_TIMEOUT_SECS, + lightning_wallet_sync_timeout_secs: DEFAULT_LDK_WALLET_SYNC_TIMEOUT_SECS, + fee_rate_cache_update_timeout_secs: DEFAULT_FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, + tx_broadcast_timeout_secs: DEFAULT_TX_BROADCAST_TIMEOUT_SECS, + per_request_timeout_secs: DEFAULT_PER_REQUEST_TIMEOUT_SECS, + } + } +} + /// Configuration for syncing with an Esplora backend. /// /// Background syncing is enabled by default, using the default values specified in @@ -386,11 +431,16 @@ pub struct EsploraSyncConfig { /// /// [`Node::sync_wallets`]: crate::Node::sync_wallets pub background_sync_config: Option, + /// Sync timeouts configuration. + pub timeouts_config: SyncTimeoutsConfig, } impl Default for EsploraSyncConfig { fn default() -> Self { - Self { background_sync_config: Some(BackgroundSyncConfig::default()) } + Self { + background_sync_config: Some(BackgroundSyncConfig::default()), + timeouts_config: SyncTimeoutsConfig::default(), + } } } @@ -407,11 +457,16 @@ pub struct ElectrumSyncConfig { /// /// [`Node::sync_wallets`]: crate::Node::sync_wallets pub background_sync_config: Option, + /// Sync timeouts configuration. + pub timeouts_config: SyncTimeoutsConfig, } impl Default for ElectrumSyncConfig { fn default() -> Self { - Self { background_sync_config: Some(BackgroundSyncConfig::default()) } + Self { + background_sync_config: Some(BackgroundSyncConfig::default()), + timeouts_config: SyncTimeoutsConfig::default(), + } } } diff --git a/src/data_store.rs b/src/data_store.rs index d295ece51..ff09d9902 100644 --- a/src/data_store.rs +++ b/src/data_store.rs @@ -167,6 +167,10 @@ where })?; Ok(()) } + + pub(crate) fn contains_key(&self, id: &SO::Id) -> bool { + self.objects.lock().unwrap().contains_key(id) + } } #[cfg(test)] diff --git a/src/error.rs b/src/error.rs index 20b1cceab..ea0bcca3b 100644 --- a/src/error.rs +++ b/src/error.rs @@ -113,6 +113,8 @@ pub enum Error { InvalidDateTime, /// The given fee rate is invalid. InvalidFeeRate, + /// The given script public key is invalid. + InvalidScriptPubKey, /// A payment with the given hash has already been initiated. DuplicatePayment, /// The provided offer was denonminated in an unsupported currency. @@ -127,6 +129,8 @@ pub enum Error { InvalidBlindedPaths, /// Asynchronous payment services are disabled. AsyncPaymentServicesDisabled, + /// Parsing a Human-Readable Name has failed. + HrnParsingFailed, } impl fmt::Display for Error { @@ -186,6 +190,7 @@ impl fmt::Display for Error { Self::InvalidNodeAlias => write!(f, "The given node alias is invalid."), Self::InvalidDateTime => write!(f, "The given date time is invalid."), Self::InvalidFeeRate => write!(f, "The given fee rate is invalid."), + Self::InvalidScriptPubKey => write!(f, "The given script pubkey is invalid."), Self::DuplicatePayment => { write!(f, "A payment with the given hash has already been initiated.") }, @@ -205,6 +210,9 @@ impl fmt::Display for Error { Self::AsyncPaymentServicesDisabled => { write!(f, "Asynchronous payment services are disabled.") }, + Self::HrnParsingFailed => { + write!(f, "Failed to parse a human-readable name.") + }, } } } diff --git a/src/event.rs b/src/event.rs index 75270bf53..6f0ed8e09 100644 --- a/src/event.rs +++ b/src/event.rs @@ -165,7 +165,7 @@ pub enum Event { /// /// This needs to be manually claimed by supplying the correct preimage to [`claim_for_hash`]. /// - /// If the the provided parameters don't match the expectations or the preimage can't be + /// If the provided parameters don't match the expectations or the preimage can't be /// retrieved in time, should be failed-back via [`fail_for_hash`]. /// /// Note claiming will necessarily fail after the `claim_deadline` has been reached. diff --git a/src/ffi/types.rs b/src/ffi/types.rs index c69987c96..3a197146c 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -10,17 +10,21 @@ // // Make sure to add any re-exported items that need to be used in uniffi below. +use std::collections::HashMap; use std::convert::TryInto; +use std::future::Future; use std::ops::Deref; use std::str::FromStr; -use std::sync::Arc; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::{Arc, Mutex}; use std::time::Duration; +use async_trait::async_trait; pub use bip39::Mnemonic; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; use bitcoin::secp256k1::PublicKey; -pub use bitcoin::{Address, BlockHash, FeeRate, Network, OutPoint, Txid}; +pub use bitcoin::{Address, BlockHash, FeeRate, Network, OutPoint, ScriptBuf, Txid}; pub use lightning::chain::channelmonitor::BalanceSource; pub use lightning::events::{ClosureReason, PaymentFailureReason}; use lightning::ln::channelmanager::PaymentId; @@ -29,8 +33,10 @@ use lightning::offers::invoice::Bolt12Invoice as LdkBolt12Invoice; pub use lightning::offers::offer::OfferId; use lightning::offers::offer::{Amount as LdkAmount, Offer as LdkOffer}; use lightning::offers::refund::Refund as LdkRefund; +use lightning::onion_message::dns_resolution::HumanReadableName as LdkHumanReadableName; pub use lightning::routing::gossip::{NodeAlias, NodeId, RoutingFees}; pub use lightning::routing::router::RouteParametersConfig; +use lightning::util::persist::{KVStore, KVStoreSync}; use lightning::util::ser::Writeable; use lightning_invoice::{Bolt11Invoice as LdkBolt11Invoice, Bolt11InvoiceDescriptionRef}; pub use lightning_invoice::{Description, SignedRawBolt11Invoice}; @@ -45,19 +51,556 @@ pub use vss_client::headers::{VssHeaderProvider, VssHeaderProviderError}; use crate::builder::sanitize_alias; pub use crate::config::{ default_config, AnchorChannelsConfig, BackgroundSyncConfig, ElectrumSyncConfig, - EsploraSyncConfig, MaxDustHTLCExposure, + EsploraSyncConfig, MaxDustHTLCExposure, SyncTimeoutsConfig, }; pub use crate::entropy::{generate_entropy_mnemonic, EntropyError, NodeEntropy, WordCount}; use crate::error::Error; pub use crate::graph::{ChannelInfo, ChannelUpdateInfo, NodeAnnouncementInfo, NodeInfo}; +use crate::io::utils::check_namespace_key_validity; pub use crate::liquidity::{LSPS1OrderStatus, LSPS2ServiceConfig}; pub use crate::logger::{LogLevel, LogRecord, LogWriter}; pub use crate::payment::store::{ ConfirmationStatus, LSPFeeLimits, PaymentDirection, PaymentKind, PaymentStatus, }; -pub use crate::payment::QrPaymentResult; +pub use crate::payment::UnifiedPaymentResult; use crate::{hex_utils, SocketAddress, UniffiCustomTypeConverter, UserChannelId}; +#[derive(Debug)] +pub enum IOError { + NotFound, + PermissionDenied, + ConnectionRefused, + ConnectionReset, + ConnectionAborted, + NotConnected, + AddrInUse, + AddrNotAvailable, + BrokenPipe, + AlreadyExists, + WouldBlock, + InvalidInput, + InvalidData, + TimedOut, + WriteZero, + Interrupted, + UnexpectedEof, + Other, +} + +impl From for IOError { + fn from(error: bitcoin::io::Error) -> Self { + match error.kind() { + bitcoin::io::ErrorKind::NotFound => IOError::NotFound, + bitcoin::io::ErrorKind::PermissionDenied => IOError::PermissionDenied, + bitcoin::io::ErrorKind::ConnectionRefused => IOError::ConnectionRefused, + bitcoin::io::ErrorKind::ConnectionReset => IOError::ConnectionReset, + bitcoin::io::ErrorKind::ConnectionAborted => IOError::ConnectionAborted, + bitcoin::io::ErrorKind::NotConnected => IOError::NotConnected, + bitcoin::io::ErrorKind::AddrInUse => IOError::AddrInUse, + bitcoin::io::ErrorKind::AddrNotAvailable => IOError::AddrNotAvailable, + bitcoin::io::ErrorKind::BrokenPipe => IOError::BrokenPipe, + bitcoin::io::ErrorKind::AlreadyExists => IOError::AlreadyExists, + bitcoin::io::ErrorKind::WouldBlock => IOError::WouldBlock, + bitcoin::io::ErrorKind::InvalidInput => IOError::InvalidInput, + bitcoin::io::ErrorKind::InvalidData => IOError::InvalidData, + bitcoin::io::ErrorKind::TimedOut => IOError::TimedOut, + bitcoin::io::ErrorKind::WriteZero => IOError::WriteZero, + bitcoin::io::ErrorKind::Interrupted => IOError::Interrupted, + bitcoin::io::ErrorKind::UnexpectedEof => IOError::UnexpectedEof, + bitcoin::io::ErrorKind::Other => IOError::Other, + } + } +} + +impl From for bitcoin::io::Error { + fn from(error: IOError) -> Self { + match error { + IOError::NotFound => bitcoin::io::ErrorKind::NotFound.into(), + IOError::PermissionDenied => bitcoin::io::ErrorKind::PermissionDenied.into(), + IOError::ConnectionRefused => bitcoin::io::ErrorKind::ConnectionRefused.into(), + IOError::ConnectionReset => bitcoin::io::ErrorKind::ConnectionReset.into(), + IOError::ConnectionAborted => bitcoin::io::ErrorKind::ConnectionAborted.into(), + IOError::NotConnected => bitcoin::io::ErrorKind::NotConnected.into(), + IOError::AddrInUse => bitcoin::io::ErrorKind::AddrInUse.into(), + IOError::AddrNotAvailable => bitcoin::io::ErrorKind::AddrNotAvailable.into(), + IOError::BrokenPipe => bitcoin::io::ErrorKind::BrokenPipe.into(), + IOError::AlreadyExists => bitcoin::io::ErrorKind::AlreadyExists.into(), + IOError::WouldBlock => bitcoin::io::ErrorKind::WouldBlock.into(), + IOError::InvalidInput => bitcoin::io::ErrorKind::InvalidInput.into(), + IOError::InvalidData => bitcoin::io::ErrorKind::InvalidData.into(), + IOError::TimedOut => bitcoin::io::ErrorKind::TimedOut.into(), + IOError::WriteZero => bitcoin::io::ErrorKind::WriteZero.into(), + IOError::Interrupted => bitcoin::io::ErrorKind::Interrupted.into(), + IOError::UnexpectedEof => bitcoin::io::ErrorKind::UnexpectedEof.into(), + IOError::Other => bitcoin::io::ErrorKind::Other.into(), + } + } +} + +impl std::fmt::Display for IOError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + IOError::NotFound => write!(f, "NotFound"), + IOError::PermissionDenied => write!(f, "PermissionDenied"), + IOError::ConnectionRefused => write!(f, "ConnectionRefused"), + IOError::ConnectionReset => write!(f, "ConnectionReset"), + IOError::ConnectionAborted => write!(f, "ConnectionAborted"), + IOError::NotConnected => write!(f, "NotConnected"), + IOError::AddrInUse => write!(f, "AddrInUse"), + IOError::AddrNotAvailable => write!(f, "AddrNotAvailable"), + IOError::BrokenPipe => write!(f, "BrokenPipe"), + IOError::AlreadyExists => write!(f, "AlreadyExists"), + IOError::WouldBlock => write!(f, "WouldBlock"), + IOError::InvalidInput => write!(f, "InvalidInput"), + IOError::InvalidData => write!(f, "InvalidData"), + IOError::TimedOut => write!(f, "TimedOut"), + IOError::WriteZero => write!(f, "WriteZero"), + IOError::Interrupted => write!(f, "Interrupted"), + IOError::UnexpectedEof => write!(f, "UnexpectedEof"), + IOError::Other => write!(f, "Other"), + } + } +} + +#[async_trait] +pub trait ForeignDynStoreTrait: Send + Sync { + async fn read_async( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> Result, IOError>; + async fn write_async( + &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> Result<(), IOError>; + async fn remove_async( + &self, primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + ) -> Result<(), IOError>; + async fn list_async( + &self, primary_namespace: String, secondary_namespace: String, + ) -> Result, IOError>; + + fn read( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> Result, IOError>; + fn write( + &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> Result<(), IOError>; + fn remove( + &self, primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + ) -> Result<(), IOError>; + fn list( + &self, primary_namespace: String, secondary_namespace: String, + ) -> Result, IOError>; +} + +#[derive(Clone)] +pub struct FfiDynStore { + inner: Arc, + next_write_version: Arc, +} + +impl FfiDynStore { + pub fn from_store(store: Arc) -> Self { + let inner = Arc::new(FfiDynStoreInner::new(store)); + Self { inner, next_write_version: Arc::new(AtomicU64::new(1)) } + } + + fn build_locking_key( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> String { + if primary_namespace.is_empty() { + key.to_owned() + } else { + format!("{}#{}#{}", primary_namespace, secondary_namespace, key) + } + } + + fn get_new_version_and_async_lock_ref( + &self, locking_key: String, + ) -> (Arc>, u64) { + let version = self.next_write_version.fetch_add(1, Ordering::Relaxed); + if version == u64::MAX { + panic!("FfiDynStore version counter overflowed"); + } + + let inner_lock_ref = self.inner.get_async_inner_lock_ref(locking_key); + + (inner_lock_ref, version) + } + + fn get_new_version_and_sync_lock_ref(&self, locking_key: String) -> (Arc>, u64) { + let version = self.next_write_version.fetch_add(1, Ordering::Relaxed); + if version == u64::MAX { + panic!("FfiDynStore version counter overflowed"); + } + + let inner_lock_ref = self.inner.get_sync_inner_lock_ref(locking_key); + + (inner_lock_ref, version) + } +} + +impl KVStore for FfiDynStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> impl Future, lightning::io::Error>> + 'static + Send { + let this = Arc::clone(&self.inner); + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + async move { + this.read_internal_async(primary_namespace, secondary_namespace, key) + .await + .map_err(|e| e.into()) + } + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> impl Future> + 'static + Send { + let this = Arc::clone(&self.inner); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + let locking_key = self.build_locking_key(&primary_namespace, &secondary_namespace, &key); + let (inner_lock_ref, version) = + self.get_new_version_and_async_lock_ref(locking_key.clone()); + async move { + this.write_internal_async( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + buf, + ) + .await + .map_err(|e| e.into()) + } + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> impl Future> + 'static + Send { + let this = Arc::clone(&self.inner); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + let locking_key = self.build_locking_key(&primary_namespace, &secondary_namespace, &key); + let (inner_lock_ref, version) = + self.get_new_version_and_async_lock_ref(locking_key.clone()); + + async move { + this.remove_internal_async( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + lazy, + ) + .await + .map_err(|e| e.into()) + } + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> impl Future, lightning::io::Error>> + 'static + Send { + let this = Arc::clone(&self.inner); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + + async move { + this.list_internal_async(primary_namespace, secondary_namespace) + .await + .map_err(|e| e.into()) + } + } +} + +impl KVStoreSync for FfiDynStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Result, lightning::io::Error> { + self.inner.read_internal( + primary_namespace.to_string(), + secondary_namespace.to_string(), + key.to_string(), + ) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Result<(), lightning::io::Error> { + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_sync_lock_ref(locking_key.clone()); + + self.inner.write_internal( + inner_lock_ref, + locking_key, + version, + primary_namespace.to_string(), + secondary_namespace.to_string(), + key.to_string(), + buf, + ) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> Result<(), lightning::io::Error> { + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_sync_lock_ref(locking_key.clone()); + + self.inner.remove_internal( + inner_lock_ref, + locking_key, + version, + primary_namespace.to_string(), + secondary_namespace.to_string(), + key.to_string(), + lazy, + ) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Result, lightning::io::Error> { + self.inner.list_internal(primary_namespace.to_string(), secondary_namespace.to_string()) + } +} + +struct FfiDynStoreInner { + ffi_store: Arc, + async_write_version_locks: Mutex>>>, + sync_write_version_locks: Mutex>>>, +} + +impl FfiDynStoreInner { + fn new(ffi_store: Arc) -> Self { + Self { + ffi_store, + async_write_version_locks: Mutex::new(HashMap::new()), + sync_write_version_locks: Mutex::new(HashMap::new()), + } + } + + fn get_async_inner_lock_ref(&self, locking_key: String) -> Arc> { + let mut outer_lock = self.async_write_version_locks.lock().unwrap(); + Arc::clone(&outer_lock.entry(locking_key).or_default()) + } + + fn get_sync_inner_lock_ref(&self, locking_key: String) -> Arc> { + let mut outer_lock = self.sync_write_version_locks.lock().unwrap(); + Arc::clone(&outer_lock.entry(locking_key).or_default()) + } + + async fn read_internal_async( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> bitcoin::io::Result> { + check_namespace_key_validity(&primary_namespace, &secondary_namespace, Some(&key), "read")?; + self.ffi_store + .read_async(primary_namespace, secondary_namespace, key) + .await + .map_err(|e| e.into()) + } + + fn read_internal( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> bitcoin::io::Result> { + check_namespace_key_validity(&primary_namespace, &secondary_namespace, Some(&key), "read")?; + self.ffi_store.read(primary_namespace, secondary_namespace, key).map_err(|e| e.into()) + } + + async fn write_internal_async( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> bitcoin::io::Result<()> { + check_namespace_key_validity( + &primary_namespace, + &secondary_namespace, + Some(&key), + "write", + )?; + + let store = Arc::clone(&self.ffi_store); + + self.execute_locked_write_async(inner_lock_ref, locking_key, version, async move || { + store + .write_async(primary_namespace, secondary_namespace, key, buf) + .await + .map_err(|e| >::into(e))?; + + Ok(()) + }) + .await + } + + fn write_internal( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> bitcoin::io::Result<()> { + check_namespace_key_validity( + &primary_namespace, + &secondary_namespace, + Some(&key), + "write", + )?; + + self.execute_locked_write(inner_lock_ref, locking_key, version, || { + self.ffi_store + .write(primary_namespace, secondary_namespace, key, buf) + .map_err(|e| >::into(e))?; + + Ok(()) + }) + } + + async fn remove_internal_async( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + ) -> bitcoin::io::Result<()> { + check_namespace_key_validity( + &primary_namespace, + &secondary_namespace, + Some(&key), + "remove", + )?; + + let store = Arc::clone(&self.ffi_store); + + self.execute_locked_write_async(inner_lock_ref, locking_key, version, async move || { + store + .remove_async(primary_namespace, secondary_namespace, key, lazy) + .await + .map_err(|e| >::into(e))?; + + Ok(()) + }) + .await + } + + fn remove_internal( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + ) -> bitcoin::io::Result<()> { + check_namespace_key_validity( + &primary_namespace, + &secondary_namespace, + Some(&key), + "remove", + )?; + + self.execute_locked_write(inner_lock_ref, locking_key, version, || { + self.ffi_store + .remove(primary_namespace, secondary_namespace, key, lazy) + .map_err(|e| >::into(e))?; + + Ok(()) + }) + } + + async fn list_internal_async( + &self, primary_namespace: String, secondary_namespace: String, + ) -> bitcoin::io::Result> { + check_namespace_key_validity(&primary_namespace, &secondary_namespace, None, "list")?; + self.ffi_store + .list_async(primary_namespace, secondary_namespace) + .await + .map_err(|e| e.into()) + } + + fn list_internal( + &self, primary_namespace: String, secondary_namespace: String, + ) -> bitcoin::io::Result> { + check_namespace_key_validity(&primary_namespace, &secondary_namespace, None, "list")?; + self.ffi_store.list(primary_namespace, secondary_namespace).map_err(|e| e.into()) + } + + async fn execute_locked_write_async< + F: Future>, + FN: FnOnce() -> F, + >( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + callback: FN, + ) -> Result<(), bitcoin::io::Error> { + let res = { + let mut last_written_version = inner_lock_ref.lock().await; + + // Check if we already have a newer version written/removed. This is used in async contexts to realize eventual + // consistency. + let is_stale_version = version <= *last_written_version; + + // If the version is not stale, we execute the callback. Otherwise we can and must skip writing. + if is_stale_version { + Ok(()) + } else { + callback().await.map(|_| { + *last_written_version = version; + }) + } + }; + + self.clean_locks_async(&inner_lock_ref, locking_key); + + res + } + + fn clean_locks_async( + &self, inner_lock_ref: &Arc>, locking_key: String, + ) { + // If there no arcs in use elsewhere, this means that there are no in-flight writes. We can remove the map entry + // to prevent leaking memory. The two arcs that are expected are the one in the map and the one held here in + // inner_lock_ref. The outer lock is obtained first, to avoid a new arc being cloned after we've already + // counted. + let mut outer_lock = self.async_write_version_locks.lock().unwrap(); + + let strong_count = Arc::strong_count(&inner_lock_ref); + debug_assert!(strong_count >= 2, "Unexpected FfiDynStore strong count"); + + if strong_count == 2 { + outer_lock.remove(&locking_key); + } + } + + fn execute_locked_write bitcoin::io::Result<()>>( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, callback: F, + ) -> bitcoin::io::Result<()> { + let res = { + let mut last_written_version = inner_lock_ref.lock().unwrap(); + + let is_stale_version = version <= *last_written_version; + + if is_stale_version { + Ok(()) + } else { + callback().map(|_| { + *last_written_version = version; + }) + } + }; + + self.clean_locks(&inner_lock_ref, locking_key); + + res + } + + fn clean_locks(&self, inner_lock_ref: &Arc>, locking_key: String) { + let mut outer_lock = self.sync_write_version_locks.lock().unwrap(); + let strong_count = Arc::strong_count(inner_lock_ref); + debug_assert!(strong_count >= 2, "Unexpected FfiDynStore sync strong count"); + if strong_count == 2 { + outer_lock.remove(&locking_key); + } + } +} + impl UniffiCustomTypeConverter for PublicKey { type Builtin = String; @@ -106,6 +649,22 @@ impl UniffiCustomTypeConverter for Address { } } +impl UniffiCustomTypeConverter for ScriptBuf { + type Builtin = String; + + fn into_custom(val: Self::Builtin) -> uniffi::Result { + if let Ok(key) = ScriptBuf::from_hex(&val) { + return Ok(key); + } + + Err(Error::InvalidScriptPubKey.into()) + } + + fn from_custom(obj: Self) -> Self::Builtin { + obj.to_string() + } +} + #[derive(Debug, Clone, PartialEq, Eq)] pub enum OfferAmount { Bitcoin { amount_msats: u64 }, @@ -268,6 +827,72 @@ impl std::fmt::Display for Offer { } } +/// A struct containing the two parts of a BIP 353 Human-Readable Name - the user and domain parts. +/// +/// The `user` and `domain` parts combined cannot exceed 231 bytes in length; +/// each DNS label within them must be non-empty and no longer than 63 bytes. +/// +/// If you intend to handle non-ASCII `user` or `domain` parts, you must handle [Homograph Attacks] +/// and do punycode en-/de-coding yourself. This struct will always handle only plain ASCII `user` +/// and `domain` parts. +/// +/// This struct can also be used for LN-Address recipients. +/// +/// [Homograph Attacks]: https://en.wikipedia.org/wiki/IDN_homograph_attack +pub struct HumanReadableName { + pub(crate) inner: LdkHumanReadableName, +} + +impl HumanReadableName { + /// Constructs a new [`HumanReadableName`] from the standard encoding - `user`@`domain`. + /// + /// If `user` includes the standard BIP 353 ₿ prefix it is automatically removed as required by + /// BIP 353. + pub fn from_encoded(encoded: &str) -> Result { + let hrn = match LdkHumanReadableName::from_encoded(encoded) { + Ok(hrn) => Ok(hrn), + Err(_) => Err(Error::HrnParsingFailed), + }?; + + Ok(Self { inner: hrn }) + } + + /// Gets the `user` part of this Human-Readable Name + pub fn user(&self) -> String { + self.inner.user().to_string() + } + + /// Gets the `domain` part of this Human-Readable Name + pub fn domain(&self) -> String { + self.inner.domain().to_string() + } +} + +impl From for HumanReadableName { + fn from(ldk_hrn: LdkHumanReadableName) -> Self { + HumanReadableName { inner: ldk_hrn } + } +} + +impl From for LdkHumanReadableName { + fn from(wrapper: HumanReadableName) -> Self { + wrapper.inner + } +} + +impl Deref for HumanReadableName { + type Target = LdkHumanReadableName; + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl AsRef for HumanReadableName { + fn as_ref(&self) -> &LdkHumanReadableName { + self.deref() + } +} + /// A `Refund` is a request to send an [`Bolt12Invoice`] without a preceding [`Offer`]. /// /// Typically, after an invoice is paid, the recipient may publish a refund allowing the sender to @@ -933,7 +1558,7 @@ impl Bolt11Invoice { /// Returns the hash to which we will receive the preimage on completion of the payment pub fn payment_hash(&self) -> PaymentHash { - PaymentHash(self.inner.payment_hash().to_byte_array()) + self.inner.payment_hash() } /// Get the payment secret if one was included in the invoice @@ -1084,9 +1709,9 @@ pub struct LSPS1OnchainPaymentInfo { pub expires_at: LSPSDateTime, /// The total fee the LSP will charge to open this channel in satoshi. pub fee_total_sat: u64, - /// The amount the client needs to pay to have the requested channel openend. + /// The amount the client needs to pay to have the requested channel opened. pub order_total_sat: u64, - /// An on-chain address the client can send [`Self::order_total_sat`] to to have the channel + /// An on-chain address the client can send [`Self::order_total_sat`] to have the channel /// opened. pub address: bitcoin::Address, /// The minimum number of block confirmations that are required for the on-chain payment to be @@ -1351,10 +1976,7 @@ mod tests { let invoice_str = wrapped_invoice.to_string(); let parsed_invoice: LdkBolt11Invoice = invoice_str.parse().unwrap(); - assert_eq!( - ldk_invoice.payment_hash().to_byte_array().to_vec(), - parsed_invoice.payment_hash().to_byte_array().to_vec() - ); + assert_eq!(ldk_invoice.payment_hash(), parsed_invoice.payment_hash(),); } #[test] diff --git a/src/gossip.rs b/src/gossip.rs index 563d9e1ea..4ef280273 100644 --- a/src/gossip.rs +++ b/src/gossip.rs @@ -5,19 +5,16 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use std::future::Future; use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::Arc; -use std::time::Duration; -use lightning::util::native_async::FutureSpawner; use lightning_block_sync::gossip::GossipVerifier; use crate::chain::ChainSource; -use crate::config::RGS_SYNC_TIMEOUT_SECS; -use crate::logger::{log_trace, LdkLogger, Logger}; -use crate::runtime::Runtime; -use crate::types::{GossipSync, Graph, P2PGossipSync, PeerManager, RapidGossipSync, UtxoLookup}; +use crate::config::{RGS_SNAPSHOT_MAX_SIZE, RGS_SYNC_TIMEOUT_SECS}; +use crate::logger::{log_error, log_trace, LdkLogger, Logger}; +use crate::runtime::{Runtime, RuntimeSpawner}; +use crate::types::{GossipSync, Graph, P2PGossipSync, RapidGossipSync}; use crate::Error; pub(crate) enum GossipSource { @@ -33,12 +30,15 @@ pub(crate) enum GossipSource { } impl GossipSource { - pub fn new_p2p(network_graph: Arc, logger: Arc) -> Self { - let gossip_sync = Arc::new(P2PGossipSync::new( - network_graph, - None::>, - Arc::clone(&logger), - )); + pub fn new_p2p( + network_graph: Arc, chain_source: Arc, runtime: Arc, + logger: Arc, + ) -> Self { + let verifier = chain_source.as_utxo_source().map(|utxo_source| { + Arc::new(GossipVerifier::new(Arc::new(utxo_source), RuntimeSpawner::new(runtime))) + }); + + let gossip_sync = Arc::new(P2PGossipSync::new(network_graph, verifier, logger)); Self::P2PNetwork { gossip_sync } } @@ -62,27 +62,6 @@ impl GossipSource { } } - pub(crate) fn set_gossip_verifier( - &self, chain_source: Arc, peer_manager: Arc, - runtime: Arc, - ) { - match self { - Self::P2PNetwork { gossip_sync } => { - if let Some(utxo_source) = chain_source.as_utxo_source() { - let spawner = RuntimeSpawner::new(Arc::clone(&runtime)); - let gossip_verifier = Arc::new(GossipVerifier::new( - Arc::new(utxo_source), - spawner, - Arc::clone(gossip_sync), - peer_manager, - )); - gossip_sync.add_utxo_lookup(Some(gossip_verifier)); - } - }, - _ => (), - } - } - pub async fn update_rgs_snapshot(&self) -> Result { match self { Self::P2PNetwork { gossip_sync: _, .. } => Ok(0), @@ -90,29 +69,18 @@ impl GossipSource { let query_timestamp = latest_sync_timestamp.load(Ordering::Acquire); let query_url = format!("{}/{}", server_url, query_timestamp); - let response = tokio::time::timeout( - Duration::from_secs(RGS_SYNC_TIMEOUT_SECS), - reqwest::get(query_url), - ) - .await - .map_err(|e| { - log_trace!(logger, "Retrieving RGS gossip update timed out: {}", e); + let query = bitreq::get(query_url) + .with_max_body_size(Some(RGS_SNAPSHOT_MAX_SIZE)) + .with_timeout(RGS_SYNC_TIMEOUT_SECS); + let response = query.send_async().await.map_err(|e| { + log_error!(logger, "Failed to retrieve RGS gossip update: {e}"); Error::GossipUpdateTimeout - })? - .map_err(|e| { - log_trace!(logger, "Failed to retrieve RGS gossip update: {}", e); - Error::GossipUpdateFailed })?; - match response.error_for_status() { - Ok(res) => { - let update_data = res.bytes().await.map_err(|e| { - log_trace!(logger, "Failed to retrieve RGS gossip update: {}", e); - Error::GossipUpdateFailed - })?; - + match response.status_code { + 200 => { let new_latest_sync_timestamp = - gossip_sync.update_network_graph(&update_data).map_err(|e| { + gossip_sync.update_network_graph(response.as_bytes()).map_err(|e| { log_trace!( logger, "Failed to update network graph with RGS data: {:?}", @@ -123,8 +91,8 @@ impl GossipSource { latest_sync_timestamp.store(new_latest_sync_timestamp, Ordering::Release); Ok(new_latest_sync_timestamp) }, - Err(e) => { - log_trace!(logger, "Failed to retrieve RGS gossip update: {}", e); + code => { + log_trace!(logger, "Failed to retrieve RGS gossip update: HTTP {}", code); Err(Error::GossipUpdateFailed) }, } @@ -132,19 +100,3 @@ impl GossipSource { } } } - -pub(crate) struct RuntimeSpawner { - runtime: Arc, -} - -impl RuntimeSpawner { - pub(crate) fn new(runtime: Arc) -> Self { - Self { runtime } - } -} - -impl FutureSpawner for RuntimeSpawner { - fn spawn + Send + 'static>(&self, future: T) { - self.runtime.spawn_cancellable_background_task(future); - } -} diff --git a/src/io/mod.rs b/src/io/mod.rs index 7afd5bd40..bf6366c45 100644 --- a/src/io/mod.rs +++ b/src/io/mod.rs @@ -10,6 +10,7 @@ pub mod sqlite_store; #[cfg(test)] pub(crate) mod test_utils; +pub(crate) mod tier_store; pub(crate) mod utils; pub mod vss_store; @@ -78,3 +79,7 @@ pub(crate) const BDK_WALLET_INDEXER_KEY: &str = "indexer"; /// /// [`StaticInvoice`]: lightning::offers::static_invoice::StaticInvoice pub(crate) const STATIC_INVOICE_STORE_PRIMARY_NAMESPACE: &str = "static_invoices"; + +/// The pending payment information will be persisted under this prefix. +pub(crate) const PENDING_PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE: &str = "pending_payments"; +pub(crate) const PENDING_PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; diff --git a/src/io/sqlite_store/migrations.rs b/src/io/sqlite_store/migrations.rs index abfbdf6ef..ea809be08 100644 --- a/src/io/sqlite_store/migrations.rs +++ b/src/io/sqlite_store/migrations.rs @@ -124,7 +124,7 @@ mod tests { connection.execute(&sql, []).unwrap(); - // We write some data to to the table + // We write some data to the table let sql = format!( "INSERT OR REPLACE INTO {} (namespace, key, value) VALUES (:namespace, :key, :value);", kv_table_name diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index 6eb04df3f..0fe528d59 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -9,22 +9,25 @@ use std::collections::{hash_map, HashMap}; use std::future::Future; use std::panic::RefUnwindSafe; use std::path::PathBuf; -use std::sync::Mutex; +use std::sync::{Arc, Mutex}; +use std::time::Duration; use lightning::events::ClosureReason; use lightning::ln::functional_test_utils::{ - check_closed_event, connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, - create_dummy_block, create_network, create_node_cfgs, create_node_chanmgrs, send_payment, - TestChanMonCfg, + check_added_monitors, check_closed_event, connect_block, create_announced_chan_between_nodes, + create_chanmon_cfgs, create_dummy_block, create_network, create_node_cfgs, + create_node_chanmgrs, send_payment, TestChanMonCfg, }; use lightning::util::persist::{ KVStore, KVStoreSync, MonitorUpdatingPersister, KVSTORE_NAMESPACE_KEY_MAX_LEN, }; use lightning::util::test_utils; -use lightning::{check_added_monitors, check_closed_broadcast, io}; +use lightning::{check_closed_broadcast, io}; use rand::distr::Alphanumeric; use rand::{rng, Rng}; +use crate::runtime::Runtime; + type TestMonitorUpdatePersister<'a, K> = MonitorUpdatingPersister< &'a K, &'a test_utils::TestLogger, @@ -333,7 +336,7 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { 100000, ); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let node_txn = nodes[0].tx_broadcaster.txn_broadcast(); assert_eq!(node_txn.len(), 1); @@ -345,8 +348,173 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { let reason = ClosureReason::CommitmentTxConfirmed; let node_id_0 = nodes[0].node.get_our_node_id(); check_closed_event(&nodes[1], 1, reason, &[node_id_0], 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Make sure everything is persisted as expected after close. check_persisted_data!(persister_0_max_pending_updates * 2 * EXPECTED_UPDATES_PER_PAYMENT + 1); } + +struct DelayedStoreInner { + storage: Mutex>>, + delay: Duration, +} + +impl DelayedStoreInner { + fn new(delay: Duration) -> Self { + Self { storage: Mutex::new(HashMap::new()), delay } + } + + fn make_key(pn: &str, sn: &str, key: &str) -> String { + format!("{}/{}/{}", pn, sn, key) + } + + async fn read_internal( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> Result, io::Error> { + tokio::time::sleep(self.delay).await; + + let full_key = Self::make_key(&primary_namespace, &secondary_namespace, &key); + let storage = self.storage.lock().unwrap(); + storage + .get(&full_key) + .cloned() + .ok_or_else(|| io::Error::new(io::ErrorKind::NotFound, "key not found")) + } + + async fn write_internal( + &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> Result<(), io::Error> { + tokio::time::sleep(self.delay).await; + + let full_key = Self::make_key(&primary_namespace, &secondary_namespace, &key); + let mut storage = self.storage.lock().unwrap(); + storage.insert(full_key, buf); + Ok(()) + } + + async fn remove_internal( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> Result<(), io::Error> { + tokio::time::sleep(self.delay).await; + + let full_key = Self::make_key(&primary_namespace, &secondary_namespace, &key); + let mut storage = self.storage.lock().unwrap(); + storage.remove(&full_key); + Ok(()) + } + + async fn list_internal( + &self, primary_namespace: String, secondary_namespace: String, + ) -> Result, io::Error> { + tokio::time::sleep(self.delay).await; + + let prefix = format!("{}/{}/", primary_namespace, secondary_namespace); + let storage = self.storage.lock().unwrap(); + Ok(storage + .keys() + .filter(|k| k.starts_with(&prefix)) + .map(|k| k.strip_prefix(&prefix).unwrap().to_string()) + .collect()) + } +} + +pub struct DelayedStore { + inner: Arc, + runtime: Arc, +} + +impl DelayedStore { + pub fn new(delay_ms: u64, runtime: Arc) -> Self { + Self { inner: Arc::new(DelayedStoreInner::new(Duration::from_millis(delay_ms))), runtime } + } +} + +impl KVStore for DelayedStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> impl Future, io::Error>> + 'static + Send { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + let key = key.to_string(); + + async move { inner.read_internal(pn, sn, key).await } + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> impl Future> + 'static + Send { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + let key = key.to_string(); + + async move { inner.write_internal(pn, sn, key, buf).await } + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + ) -> impl Future> + 'static + Send { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + let key = key.to_string(); + + async move { inner.remove_internal(pn, sn, key).await } + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> impl Future, io::Error>> + 'static + Send { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + + async move { inner.list_internal(pn, sn).await } + } +} + +impl KVStoreSync for DelayedStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Result, io::Error> { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + let key = key.to_string(); + + self.runtime.block_on(async move { inner.read_internal(pn, sn, key).await }) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Result<(), io::Error> { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + let key = key.to_string(); + + self.runtime.block_on(async move { inner.write_internal(pn, sn, key, buf).await }) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + ) -> Result<(), io::Error> { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + let key = key.to_string(); + + self.runtime.block_on(async move { inner.remove_internal(pn, sn, key).await }) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Result, io::Error> { + let inner = Arc::clone(&self.inner); + let pn = primary_namespace.to_string(); + let sn = secondary_namespace.to_string(); + + self.runtime.block_on(async move { inner.list_internal(pn, sn).await }) + } +} diff --git a/src/io/tier_store.rs b/src/io/tier_store.rs new file mode 100644 index 000000000..8ad524c98 --- /dev/null +++ b/src/io/tier_store.rs @@ -0,0 +1,981 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use crate::io::utils::check_namespace_key_validity; +use crate::logger::{LdkLogger, Logger}; +use crate::runtime::Runtime; +use crate::types::DynStore; + +use lightning::util::persist::{ + KVStore, KVStoreSync, NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + SCORER_PERSISTENCE_KEY, SCORER_PERSISTENCE_PRIMARY_NAMESPACE, +}; +use lightning::{io, log_trace}; +use lightning::{log_debug, log_error, log_info, log_warn}; + +use tokio::sync::mpsc::{self, error::TrySendError}; + +use std::future::Future; +use std::sync::Arc; + +// todo(enigbe): Uncertain about appropriate queue size and if this would need +// configuring. +#[cfg(not(test))] +const BACKUP_QUEUE_CAPACITY: usize = 100; +#[cfg(test)] +const BACKUP_QUEUE_CAPACITY: usize = 5; + +/// A 3-tiered [`KVStoreSync`] implementation that manages data across +/// three distinct storage locations, i.e. primary (preferably remote) +/// store for all critical data, optional ephemeral (local) store for +/// non-critical and easily rebuildable data, and backup (preferably +/// local) to lazily backup the primary store for disaster recovery +/// scenarios. +pub(crate) struct TierStore { + inner: Arc, + runtime: Arc, + logger: Arc, +} + +impl TierStore { + pub fn new(primary_store: Arc, runtime: Arc, logger: Arc) -> Self { + let inner = Arc::new(TierStoreInner::new(primary_store, Arc::clone(&logger))); + + Self { inner, runtime, logger } + } + + /// Configures the local backup store for disaster recovery. + /// + /// This store serves as a local copy of the critical data for disaster + /// recovery scenarios. When configured, this method also spawns a background + /// task that asynchronously processes backup writes and removals to avoid + /// blocking primary store operations. + /// + /// The backup operates on a best-effort basis: + /// - Writes are queued asynchronously (non-blocking) + /// - No retry logic (We assume local store is unlikely to have transient failures). + /// - Failures are logged but don't propagate to all the way to caller. + pub fn set_backup_store(&mut self, backup: Arc) { + let (tx, rx) = mpsc::channel::(BACKUP_QUEUE_CAPACITY); + + let backup_clone = Arc::clone(&backup); + let logger = Arc::clone(&self.logger); + + self.runtime.spawn_background_task(Self::process_backup_operation( + rx, + backup_clone, + logger, + )); + + debug_assert_eq!(Arc::strong_count(&self.inner), 1); + + let inner = Arc::get_mut(&mut self.inner).expect( + "TierStore should not be shared during configuration. No other references should exist", + ); + + inner.backup_store = Some(backup); + inner.backup_sender = Some(tx); + } + + async fn process_backup_operation( + mut receiver: mpsc::Receiver, backup_store: Arc, logger: Arc, + ) { + while let Some(op) = receiver.recv().await { + match Self::apply_backup_operation(&op, &backup_store).await { + Ok(_) => { + log_trace!( + logger, + "Backup succeeded for key {}/{}/{}", + op.primary_namespace(), + op.secondary_namespace(), + op.key() + ); + }, + Err(e) => { + log_error!( + logger, + "Backup failed permanently for key {}/{}/{}: {}", + op.primary_namespace(), + op.secondary_namespace(), + op.key(), + e + ); + }, + } + } + } + + async fn apply_backup_operation(op: &BackupOp, store: &Arc) -> io::Result<()> { + match op { + BackupOp::Write { primary_namespace, secondary_namespace, key, data } => { + KVStore::write( + store.as_ref(), + primary_namespace, + secondary_namespace, + key, + data.clone(), + ) + .await + }, + BackupOp::Remove { primary_namespace, secondary_namespace, key, lazy } => { + KVStore::remove(store.as_ref(), primary_namespace, secondary_namespace, key, *lazy) + .await + }, + } + } + + /// Configures the local store for non-critical data storage. + pub fn set_ephemeral_store(&mut self, ephemeral: Arc) { + debug_assert_eq!(Arc::strong_count(&self.inner), 1); + + let inner = Arc::get_mut(&mut self.inner).expect( + "TierStore should not be shared during configuration. No other references should exist", + ); + + inner.ephemeral_store = Some(ephemeral); + } +} + +impl KVStore for TierStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> impl Future, io::Error>> + 'static + Send { + let inner = Arc::clone(&self.inner); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + async move { inner.read_internal(primary_namespace, secondary_namespace, key).await } + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> impl Future> + 'static + Send { + let inner = Arc::clone(&self.inner); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + async move { inner.write_internal(primary_namespace, secondary_namespace, key, buf).await } + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> impl Future> + 'static + Send { + let inner = Arc::clone(&self.inner); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + async move { inner.remove_internal(primary_namespace, secondary_namespace, key, lazy).await } + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> impl Future, io::Error>> + 'static + Send { + let inner = Arc::clone(&self.inner); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + + async move { inner.list_internal(primary_namespace, secondary_namespace).await } + } +} + +impl KVStoreSync for TierStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + self.runtime.block_on(self.inner.read_internal( + primary_namespace.to_string(), + secondary_namespace.to_string(), + key.to_string(), + )) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + self.runtime.block_on(self.inner.write_internal( + primary_namespace, + secondary_namespace, + key, + buf, + )) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + self.runtime.block_on(self.inner.remove_internal( + primary_namespace, + secondary_namespace, + key, + lazy, + )) + } + + fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { + self.runtime.block_on( + self.inner + .list_internal(primary_namespace.to_string(), secondary_namespace.to_string()), + ) + } +} + +pub struct TierStoreInner { + /// For remote data. + primary_store: Arc, + /// For local non-critical/ephemeral data. + ephemeral_store: Option>, + /// For redundancy (disaster recovery). + backup_store: Option>, + backup_sender: Option>, + logger: Arc, +} + +impl TierStoreInner { + /// Creates a tier store with the primary (remote) data store. + pub fn new(primary_store: Arc, logger: Arc) -> Self { + Self { + primary_store, + ephemeral_store: None, + backup_store: None, + backup_sender: None, + logger, + } + } + + /// Queues data for asynchronous backup/write to the configured backup store. + /// + /// We perform a non-blocking send to avoid impacting primary storage operations. + /// This is a no-op if backup store is not configured. + /// + /// ## Returns + /// - `Ok(())`: Backup was successfully queued or no backup is configured + /// - `Err(WouldBlock)`: Backup queue is full - data was not queued + /// - `Err(BrokenPipe)`: Backup queue is no longer available + fn enqueue_backup_write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + if let Some(backup_sender) = &self.backup_sender { + let backup_res = backup_sender.try_send(BackupOp::Write { + primary_namespace: primary_namespace.to_string(), + secondary_namespace: secondary_namespace.to_string(), + key: key.to_string(), + data: buf, + }); + if let Err(e) = backup_res { + match e { + // Assuming the channel is only full for a short time, should we explore + // retrying here to add some resiliency? + TrySendError::Full(op) => { + log_warn!( + self.logger, + "Backup queue is full. Cannot write data for key: {}/{}/{}", + op.primary_namespace(), + op.secondary_namespace(), + op.key() + ); + let e = io::Error::new( + io::ErrorKind::WouldBlock, + "Backup queue is currently full.", + ); + return Err(e); + }, + TrySendError::Closed(op) => { + log_error!( + self.logger, + "Backup queue is closed. Cannot write data for key: {}/{}/{}", + op.primary_namespace(), + op.secondary_namespace(), + op.key() + ); + let e = + io::Error::new(io::ErrorKind::BrokenPipe, "Backup queue is closed."); + return Err(e); + }, + } + } + } + Ok(()) + } + + /// Queues the removal of data from the configured backup store. + /// + /// We perform a non-blocking send to avoid impacting primary storage operations. + /// This is a no-op if backup store is not configured. + /// + /// # Returns + /// - `Ok(())`: Backup was successfully queued or no backup is configured + /// - `Err(WouldBlock)`: Backup queue is full - data was not queued + /// - `Err(BrokenPipe)`: Backup system is no longer available + fn enqueue_backup_remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + if let Some(backup_sender) = &self.backup_sender { + let removal_res = backup_sender.try_send(BackupOp::Remove { + primary_namespace: primary_namespace.to_string(), + secondary_namespace: secondary_namespace.to_string(), + key: key.to_string(), + lazy, + }); + if let Err(e) = removal_res { + match e { + TrySendError::Full(op) => { + log_warn!( + self.logger, + "Backup queue is full. Cannot remove data for key: {}/{}/{}", + op.primary_namespace(), + op.secondary_namespace(), + op.key() + ); + let e = io::Error::new( + io::ErrorKind::WouldBlock, + "Backup queue is currently full.", + ); + return Err(e); + }, + TrySendError::Closed(op) => { + log_error!( + self.logger, + "Backup queue is closed. Cannot remove data for key: {}/{}/{}", + op.primary_namespace(), + op.secondary_namespace(), + op.key() + ); + let e = + io::Error::new(io::ErrorKind::BrokenPipe, "Backup queue is closed."); + return Err(e); + }, + } + } + } + Ok(()) + } + + /// Reads from the primary data store. + async fn read_primary( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + match KVStore::read( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + ) + .await + { + Ok(data) => { + log_info!( + self.logger, + "Read succeeded for key: {}/{}/{}", + primary_namespace, + secondary_namespace, + key + ); + Ok(data) + }, + Err(e) => { + log_error!( + self.logger, + "Failed to read from primary store for key {}/{}/{}: {}.", + primary_namespace, + secondary_namespace, + key, + e + ); + Err(e) + }, + } + } + + /// Lists keys from the primary data store. + async fn list_primary( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> io::Result> { + match KVStore::list(self.primary_store.as_ref(), primary_namespace, secondary_namespace) + .await + { + Ok(keys) => { + log_info!( + self.logger, + "List succeeded for namespace: {}/{}", + primary_namespace, + secondary_namespace + ); + return Ok(keys); + }, + Err(e) => { + log_error!( + self.logger, + "Failed to list from primary store for namespace {}/{}: {}.", + primary_namespace, + secondary_namespace, + e + ); + Err(e) + }, + } + } + + async fn primary_write_then_schedule_backup( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + match KVStore::write( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + buf.clone(), + ) + .await + { + Ok(()) => { + if let Err(e) = + self.enqueue_backup_write(primary_namespace, secondary_namespace, key, buf) + { + // We don't propagate backup errors here, opting to log only. + log_warn!( + self.logger, + "Failed to queue backup write for key: {}/{}/{}. Error: {}", + primary_namespace, + secondary_namespace, + key, + e + ) + } + + Ok(()) + }, + Err(e) => { + log_debug!( + self.logger, + "Skipping backup write due to primary write failure for key: {}/{}/{}.", + primary_namespace, + secondary_namespace, + key + ); + Err(e) + }, + } + } + + async fn primary_remove_then_schedule_backup( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + match KVStore::remove( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + lazy, + ) + .await + { + Ok(()) => { + if let Err(e) = + self.enqueue_backup_remove(primary_namespace, secondary_namespace, key, lazy) + { + // We don't propagate backup errors here, opting to silently log. + log_warn!( + self.logger, + "Failed to queue backup removal for key: {}/{}/{}. Error: {}", + primary_namespace, + secondary_namespace, + key, + e + ) + } + + Ok(()) + }, + Err(e) => { + log_debug!( + self.logger, + "Skipping backup removal due to primary removal failure for key: {}/{}/{}.", + primary_namespace, + secondary_namespace, + key + ); + Err(e) + }, + } + } + + async fn read_internal( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> io::Result> { + check_namespace_key_validity( + primary_namespace.as_str(), + secondary_namespace.as_str(), + Some(key.as_str()), + "read", + )?; + + match (primary_namespace.as_str(), secondary_namespace.as_str(), key.as_str()) { + (NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, _, NETWORK_GRAPH_PERSISTENCE_KEY) + | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _, SCORER_PERSISTENCE_KEY) => { + if let Some(eph_store) = self.ephemeral_store.as_ref() { + // We only try once here (without retry logic) because local failure might be indicative + // of a more serious issue (e.g. full memory, memory corruption, permissions change) that + // do not self-resolve such that retrying would negate the latency benefits. + + // The following questions remain: + // 1. Are there situations where local transient errors may warrant a retry? + // 2. Can we reliably identify/detect these transient errors? + // 3. Should we fall back to the primary or backup stores in the event of any error? + KVStore::read( + eph_store.as_ref(), + &primary_namespace, + &secondary_namespace, + &key, + ) + .await + } else { + log_debug!(self.logger, "Ephemeral store not configured. Reading non-critical data from primary or backup stores."); + self.read_primary(&primary_namespace, &secondary_namespace, &key).await + } + }, + _ => self.read_primary(&primary_namespace, &secondary_namespace, &key).await, + } + } + + async fn write_internal( + &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> io::Result<()> { + match (primary_namespace.as_str(), secondary_namespace.as_str(), key.as_str()) { + (NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, _, NETWORK_GRAPH_PERSISTENCE_KEY) + | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _, SCORER_PERSISTENCE_KEY) => { + if let Some(eph_store) = &self.ephemeral_store { + KVStore::write( + eph_store.as_ref(), + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + buf, + ) + .await + } else { + log_debug!(self.logger, "Ephemeral store not configured. Writing non-critical data to primary and backup stores."); + + self.primary_write_then_schedule_backup( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + buf, + ) + .await + } + }, + _ => { + self.primary_write_then_schedule_backup( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + buf, + ) + .await + }, + } + } + + async fn remove_internal( + &self, primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + ) -> io::Result<()> { + match (primary_namespace.as_str(), secondary_namespace.as_str(), key.as_str()) { + (NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, _, NETWORK_GRAPH_PERSISTENCE_KEY) + | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _, SCORER_PERSISTENCE_KEY) => { + if let Some(eph_store) = &self.ephemeral_store { + KVStore::remove( + eph_store.as_ref(), + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + lazy, + ) + .await + } else { + log_debug!(self.logger, "Ephemeral store not configured. Removing non-critical data from primary and backup stores."); + + self.primary_remove_then_schedule_backup( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + lazy, + ) + .await + } + }, + _ => { + self.primary_remove_then_schedule_backup( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + lazy, + ) + .await + }, + } + } + + async fn list_internal( + &self, primary_namespace: String, secondary_namespace: String, + ) -> io::Result> { + match (primary_namespace.as_str(), secondary_namespace.as_str()) { + ( + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + ) + | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _) => { + if let Some(eph_store) = self.ephemeral_store.as_ref() { + KVStoreSync::list(eph_store.as_ref(), &primary_namespace, &secondary_namespace) + } else { + log_debug!( + self.logger, + "Ephemeral store not configured. Listing from primary and backup stores." + ); + self.list_primary(&primary_namespace, &secondary_namespace).await + } + }, + _ => self.list_primary(&primary_namespace, &secondary_namespace).await, + } + } +} + +enum BackupOp { + Write { primary_namespace: String, secondary_namespace: String, key: String, data: Vec }, + Remove { primary_namespace: String, secondary_namespace: String, key: String, lazy: bool }, +} + +impl BackupOp { + fn primary_namespace(&self) -> &str { + match self { + BackupOp::Write { primary_namespace, .. } + | BackupOp::Remove { primary_namespace, .. } => primary_namespace, + } + } + + fn secondary_namespace(&self) -> &str { + match self { + BackupOp::Write { secondary_namespace, .. } + | BackupOp::Remove { secondary_namespace, .. } => secondary_namespace, + } + } + + fn key(&self) -> &str { + match self { + BackupOp::Write { key, .. } | BackupOp::Remove { key, .. } => key, + } + } +} + +#[cfg(test)] +mod tests { + use std::panic::RefUnwindSafe; + use std::path::PathBuf; + use std::sync::Arc; + use std::thread; + use std::time::Duration; + + use lightning::util::logger::Level; + use lightning::util::persist::{ + CHANNEL_MANAGER_PERSISTENCE_KEY, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + }; + use lightning_persister::fs_store::FilesystemStore; + + use crate::io::test_utils::{ + do_read_write_remove_list_persist, random_storage_path, DelayedStore, + }; + use crate::io::tier_store::TierStore; + use crate::logger::Logger; + use crate::runtime::Runtime; + #[cfg(not(feature = "uniffi"))] + use crate::types::DynStore; + use crate::types::DynStoreWrapper; + + use super::*; + + impl RefUnwindSafe for TierStore {} + + struct CleanupDir(PathBuf); + impl Drop for CleanupDir { + fn drop(&mut self) { + let _ = std::fs::remove_dir_all(&self.0); + } + } + + fn setup_tier_store( + primary_store: Arc, logger: Arc, runtime: Arc, + ) -> TierStore { + TierStore::new(primary_store, runtime, logger) + } + + #[test] + fn write_read_list_remove() { + let base_dir = random_storage_path(); + let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + let logger = Arc::new(Logger::new_fs_writer(log_path, Level::Trace).unwrap()); + + let runtime = Arc::new(Runtime::new(Arc::clone(&logger)).unwrap()); + let _cleanup = CleanupDir(base_dir.clone()); + + let primary_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("primary")))); + let tier = setup_tier_store(primary_store, logger, runtime); + + do_read_write_remove_list_persist(&tier); + } + + #[test] + fn ephemeral_routing() { + let base_dir = random_storage_path(); + let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + let logger = Arc::new(Logger::new_fs_writer(log_path, Level::Trace).unwrap()); + + let runtime = Arc::new(Runtime::new(Arc::clone(&logger)).unwrap()); + + let _cleanup = CleanupDir(base_dir.clone()); + + let primary_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("primary")))); + let mut tier = setup_tier_store(Arc::clone(&primary_store), logger, runtime); + + let ephemeral_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("ephemeral")))); + tier.set_ephemeral_store(Arc::clone(&ephemeral_store)); + + let data = vec![42u8; 32]; + + // Non-critical + KVStoreSync::write( + &tier, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_KEY, + data.clone(), + ) + .unwrap(); + + // Critical + KVStoreSync::write( + &tier, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + data.clone(), + ) + .unwrap(); + + let primary_read_ng = KVStoreSync::read( + &*primary_store, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_KEY, + ); + let ephemeral_read_ng = KVStoreSync::read( + &*ephemeral_store, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_KEY, + ); + + let primary_read_cm = KVStoreSync::read( + &*primary_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + let ephemeral_read_cm = KVStoreSync::read( + &*ephemeral_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + + assert!(primary_read_ng.is_err()); + assert_eq!(ephemeral_read_ng.unwrap(), data); + + assert!(ephemeral_read_cm.is_err()); + assert_eq!(primary_read_cm.unwrap(), data); + } + + #[test] + fn lazy_backup() { + let base_dir = random_storage_path(); + let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + let logger = Arc::new(Logger::new_fs_writer(log_path, Level::Trace).unwrap()); + let runtime = Arc::new(Runtime::new(Arc::clone(&logger)).unwrap()); + + let _cleanup = CleanupDir(base_dir.clone()); + + let primary_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("primary")))); + let mut tier = setup_tier_store(Arc::clone(&primary_store), logger, runtime); + + let backup_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("backup")))); + tier.set_backup_store(Arc::clone(&backup_store)); + + let data = vec![42u8; 32]; + + KVStoreSync::write( + &tier, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + data.clone(), + ) + .unwrap(); + + // Immediate read from backup should fail + let backup_read_cm = KVStoreSync::read( + &*backup_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + assert!(backup_read_cm.is_err()); + + // Primary not blocked by backup hence immediate read should succeed + let primary_read_cm = KVStoreSync::read( + &*primary_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + assert_eq!(primary_read_cm.unwrap(), data); + + // Delayed read from backup should succeed + thread::sleep(Duration::from_millis(50)); + let backup_read_cm = KVStoreSync::read( + &*backup_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + assert_eq!(backup_read_cm.unwrap(), data); + } + + #[test] + fn backup_overflow_doesnt_fail_writes() { + let base_dir = random_storage_path(); + let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + let logger = Arc::new(Logger::new_fs_writer(log_path.clone(), Level::Trace).unwrap()); + let runtime = Arc::new(Runtime::new(Arc::clone(&logger)).unwrap()); + + let _cleanup = CleanupDir(base_dir.clone()); + + let primary_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("primary")))); + let mut tier = + setup_tier_store(Arc::clone(&primary_store), Arc::clone(&logger), Arc::clone(&runtime)); + + let backup_store: Arc = + Arc::new(DynStoreWrapper(DelayedStore::new(100, runtime))); + tier.set_backup_store(Arc::clone(&backup_store)); + + let data = vec![42u8; 32]; + + let key = CHANNEL_MANAGER_PERSISTENCE_KEY; + for i in 0..=10 { + let result = KVStoreSync::write( + &tier, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + &format!("{}_{}", key, i), + data.clone(), + ); + + assert!(result.is_ok(), "Write {} should succeed", i); + } + + // Check logs for backup queue overflow message + let log_contents = std::fs::read_to_string(&log_path).unwrap(); + assert!( + log_contents.contains("Backup queue is full"), + "Logs should contain backup queue overflow message" + ); + } + + #[test] + fn lazy_removal() { + let base_dir = random_storage_path(); + let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + let logger = Arc::new(Logger::new_fs_writer(log_path.clone(), Level::Trace).unwrap()); + let runtime = Arc::new(Runtime::new(Arc::clone(&logger)).unwrap()); + + let _cleanup = CleanupDir(base_dir.clone()); + + let primary_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("primary")))); + let mut tier = + setup_tier_store(Arc::clone(&primary_store), Arc::clone(&logger), Arc::clone(&runtime)); + + let backup_store: Arc = + Arc::new(DynStoreWrapper(DelayedStore::new(100, runtime))); + tier.set_backup_store(Arc::clone(&backup_store)); + + let data = vec![42u8; 32]; + + let key = CHANNEL_MANAGER_PERSISTENCE_KEY; + let write_result = KVStoreSync::write( + &tier, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + key, + data.clone(), + ); + assert!(write_result.is_ok(), "Write should succeed"); + + thread::sleep(Duration::from_millis(10)); + + assert_eq!( + KVStoreSync::read( + &*backup_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + key, + ) + .unwrap(), + data + ); + + KVStoreSync::remove( + &tier, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + key, + true, + ) + .unwrap(); + + thread::sleep(Duration::from_millis(10)); + + let res = KVStoreSync::read( + &*backup_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + key, + ); + + assert!(res.is_err()); + } +} diff --git a/src/io/utils.rs b/src/io/utils.rs index 928d4031b..d2f70377b 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -5,9 +5,11 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use std::fs; +use std::fs::{self, OpenOptions}; use std::io::Write; use std::ops::Deref; +#[cfg(unix)] +use std::os::unix::fs::OpenOptionsExt; use std::path::Path; use std::sync::Arc; @@ -18,7 +20,6 @@ use bdk_chain::tx_graph::ChangeSet as BdkTxGraphChangeSet; use bdk_chain::ConfirmationBlockTime; use bdk_wallet::ChangeSet as BdkWalletChangeSet; use bitcoin::Network; -use lightning::io::Cursor; use lightning::ln::msgs::DecodeError; use lightning::routing::gossip::NetworkGraph; use lightning::routing::scoring::{ @@ -45,6 +46,7 @@ use crate::io::{ NODE_METRICS_KEY, NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, }; use crate::logger::{log_error, LdkLogger, Logger}; +use crate::payment::PendingPaymentDetails; use crate::peer_store::PeerStore; use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper}; use crate::wallet::ser::{ChangeSetDeserWrapper, ChangeSetSerWrapper}; @@ -78,7 +80,11 @@ pub(crate) fn read_or_generate_seed_file( fs::create_dir_all(parent_dir)?; } - let mut f = fs::File::create(keys_seed_path)?; + #[cfg(unix)] + let mut f = OpenOptions::new().write(true).create_new(true).mode(0o400).open(keys_seed_path)?; + + #[cfg(not(unix))] + let mut f = OpenOptions::new().write(true).create_new(true).open(keys_seed_path)?; f.write_all(&key)?; @@ -88,59 +94,62 @@ pub(crate) fn read_or_generate_seed_file( } /// Read a previously persisted [`NetworkGraph`] from the store. -pub(crate) fn read_network_graph( - kv_store: Arc, logger: L, +pub(crate) async fn read_network_graph( + kv_store: &DynStore, logger: L, ) -> Result, std::io::Error> where L::Target: LdkLogger, { - let mut reader = Cursor::new(KVStoreSync::read( + let reader = KVStore::read( &*kv_store, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, - )?); - NetworkGraph::read(&mut reader, logger.clone()).map_err(|e| { + ) + .await?; + NetworkGraph::read(&mut &*reader, logger.clone()).map_err(|e| { log_error!(logger, "Failed to deserialize NetworkGraph: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize NetworkGraph") }) } /// Read a previously persisted [`ProbabilisticScorer`] from the store. -pub(crate) fn read_scorer>, L: Deref + Clone>( - kv_store: Arc, network_graph: G, logger: L, +pub(crate) async fn read_scorer>, L: Deref + Clone>( + kv_store: &DynStore, network_graph: G, logger: L, ) -> Result, std::io::Error> where L::Target: LdkLogger, { let params = ProbabilisticScoringDecayParameters::default(); - let mut reader = Cursor::new(KVStoreSync::read( + let reader = KVStore::read( &*kv_store, SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY, - )?); + ) + .await?; let args = (params, network_graph, logger.clone()); - ProbabilisticScorer::read(&mut reader, args).map_err(|e| { + ProbabilisticScorer::read(&mut &*reader, args).map_err(|e| { log_error!(logger, "Failed to deserialize scorer: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize Scorer") }) } /// Read previously persisted external pathfinding scores from the cache. -pub(crate) fn read_external_pathfinding_scores_from_cache( - kv_store: Arc, logger: L, +pub(crate) async fn read_external_pathfinding_scores_from_cache( + kv_store: &DynStore, logger: L, ) -> Result where L::Target: LdkLogger, { - let mut reader = Cursor::new(KVStoreSync::read( + let reader = KVStore::read( &*kv_store, SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, EXTERNAL_PATHFINDING_SCORES_CACHE_KEY, - )?); - ChannelLiquidities::read(&mut reader).map_err(|e| { + ) + .await?; + ChannelLiquidities::read(&mut &*reader).map_err(|e| { log_error!(logger, "Failed to deserialize scorer: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize Scorer") }) @@ -148,7 +157,7 @@ where /// Persist external pathfinding scores to the cache. pub(crate) async fn write_external_pathfinding_scores_to_cache( - kv_store: Arc, data: &ChannelLiquidities, logger: L, + kv_store: &DynStore, data: &ChannelLiquidities, logger: L, ) -> Result<(), Error> where L::Target: LdkLogger, @@ -175,64 +184,107 @@ where } /// Read previously persisted events from the store. -pub(crate) fn read_event_queue( +pub(crate) async fn read_event_queue( kv_store: Arc, logger: L, ) -> Result, std::io::Error> where L::Target: LdkLogger, { - let mut reader = Cursor::new(KVStoreSync::read( + let reader = KVStore::read( &*kv_store, EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_KEY, - )?); - EventQueue::read(&mut reader, (kv_store, logger.clone())).map_err(|e| { + ) + .await?; + EventQueue::read(&mut &*reader, (kv_store, logger.clone())).map_err(|e| { log_error!(logger, "Failed to deserialize event queue: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize EventQueue") }) } /// Read previously persisted peer info from the store. -pub(crate) fn read_peer_info( +pub(crate) async fn read_peer_info( kv_store: Arc, logger: L, ) -> Result, std::io::Error> where L::Target: LdkLogger, { - let mut reader = Cursor::new(KVStoreSync::read( + let reader = KVStore::read( &*kv_store, PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, PEER_INFO_PERSISTENCE_KEY, - )?); - PeerStore::read(&mut reader, (kv_store, logger.clone())).map_err(|e| { + ) + .await?; + PeerStore::read(&mut &*reader, (kv_store, logger.clone())).map_err(|e| { log_error!(logger, "Failed to deserialize peer store: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize PeerStore") }) } /// Read previously persisted payments information from the store. -pub(crate) fn read_payments( - kv_store: Arc, logger: L, +pub(crate) async fn read_payments( + kv_store: &DynStore, logger: L, ) -> Result, std::io::Error> where L::Target: LdkLogger, { let mut res = Vec::new(); - for stored_key in KVStoreSync::list( + let mut stored_keys = KVStore::list( &*kv_store, PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - )? { - let mut reader = Cursor::new(KVStoreSync::read( - &*kv_store, - PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - &stored_key, - )?); - let payment = PaymentDetails::read(&mut reader).map_err(|e| { + ) + .await?; + + const BATCH_SIZE: usize = 50; + + let mut set = tokio::task::JoinSet::new(); + + // Fill JoinSet with tasks if possible + while set.len() < BATCH_SIZE && !stored_keys.is_empty() { + if let Some(next_key) = stored_keys.pop() { + let fut = KVStore::read( + &*kv_store, + PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + &next_key, + ); + set.spawn(fut); + debug_assert!(set.len() <= BATCH_SIZE); + } + } + + while let Some(read_res) = set.join_next().await { + // Exit early if we get an IO error. + let reader = read_res + .map_err(|e| { + log_error!(logger, "Failed to read PaymentDetails: {}", e); + set.abort_all(); + e + })? + .map_err(|e| { + log_error!(logger, "Failed to read PaymentDetails: {}", e); + set.abort_all(); + e + })?; + + // Refill set for every finished future, if we still have something to do. + if let Some(next_key) = stored_keys.pop() { + let fut = KVStore::read( + &*kv_store, + PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + &next_key, + ); + set.spawn(fut); + debug_assert!(set.len() <= BATCH_SIZE); + } + + // Handle result. + let payment = PaymentDetails::read(&mut &*reader).map_err(|e| { log_error!(logger, "Failed to deserialize PaymentDetails: {}", e); std::io::Error::new( std::io::ErrorKind::InvalidData, @@ -241,21 +293,26 @@ where })?; res.push(payment); } + + debug_assert!(set.is_empty()); + debug_assert!(stored_keys.is_empty()); + Ok(res) } /// Read `OutputSweeper` state from the store. -pub(crate) fn read_output_sweeper( +pub(crate) async fn read_output_sweeper( broadcaster: Arc, fee_estimator: Arc, chain_data_source: Arc, keys_manager: Arc, kv_store: Arc, logger: Arc, ) -> Result { - let mut reader = Cursor::new(KVStoreSync::read( + let reader = KVStore::read( &*kv_store, OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_KEY, - )?); + ) + .await?; let args = ( broadcaster, fee_estimator, @@ -265,33 +322,34 @@ pub(crate) fn read_output_sweeper( kv_store, logger.clone(), ); - let (_, sweeper) = <(_, Sweeper)>::read(&mut reader, args).map_err(|e| { + let (_, sweeper) = <(_, Sweeper)>::read(&mut &*reader, args).map_err(|e| { log_error!(logger, "Failed to deserialize OutputSweeper: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize OutputSweeper") })?; Ok(sweeper) } -pub(crate) fn read_node_metrics( - kv_store: Arc, logger: L, +pub(crate) async fn read_node_metrics( + kv_store: &DynStore, logger: L, ) -> Result where L::Target: LdkLogger, { - let mut reader = Cursor::new(KVStoreSync::read( + let reader = KVStore::read( &*kv_store, NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, NODE_METRICS_KEY, - )?); - NodeMetrics::read(&mut reader).map_err(|e| { + ) + .await?; + NodeMetrics::read(&mut &*reader).map_err(|e| { log_error!(logger, "Failed to deserialize NodeMetrics: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize NodeMetrics") }) } pub(crate) fn write_node_metrics( - node_metrics: &NodeMetrics, kv_store: Arc, logger: L, + node_metrics: &NodeMetrics, kv_store: &DynStore, logger: L, ) -> Result<(), Error> where L::Target: LdkLogger, @@ -418,12 +476,12 @@ macro_rules! impl_read_write_change_set_type { $key:expr ) => { pub(crate) fn $read_name( - kv_store: Arc, logger: L, + kv_store: &DynStore, logger: L, ) -> Result, std::io::Error> where L::Target: LdkLogger, { - let bytes = + let reader = match KVStoreSync::read(&*kv_store, $primary_namespace, $secondary_namespace, $key) { Ok(bytes) => bytes, @@ -444,9 +502,8 @@ macro_rules! impl_read_write_change_set_type { }, }; - let mut reader = Cursor::new(bytes); let res: Result, DecodeError> = - Readable::read(&mut reader); + Readable::read(&mut &*reader); match res { Ok(res) => Ok(Some(res.0)), Err(e) => { @@ -460,7 +517,7 @@ macro_rules! impl_read_write_change_set_type { } pub(crate) fn $write_name( - value: &$change_set_type, kv_store: Arc, logger: L, + value: &$change_set_type, kv_store: &DynStore, logger: L, ) -> Result<(), std::io::Error> where L::Target: LdkLogger, @@ -538,44 +595,115 @@ impl_read_write_change_set_type!( // Reads the full BdkWalletChangeSet or returns default fields pub(crate) fn read_bdk_wallet_change_set( - kv_store: Arc, logger: Arc, + kv_store: &DynStore, logger: &Logger, ) -> Result, std::io::Error> { let mut change_set = BdkWalletChangeSet::default(); // We require a descriptor and return `None` to signal creation of a new wallet otherwise. - if let Some(descriptor) = - read_bdk_wallet_descriptor(Arc::clone(&kv_store), Arc::clone(&logger))? - { + if let Some(descriptor) = read_bdk_wallet_descriptor(kv_store, logger)? { change_set.descriptor = Some(descriptor); } else { return Ok(None); } // We require a change_descriptor and return `None` to signal creation of a new wallet otherwise. - if let Some(change_descriptor) = - read_bdk_wallet_change_descriptor(Arc::clone(&kv_store), Arc::clone(&logger))? - { + if let Some(change_descriptor) = read_bdk_wallet_change_descriptor(kv_store, logger)? { change_set.change_descriptor = Some(change_descriptor); } else { return Ok(None); } // We require a network and return `None` to signal creation of a new wallet otherwise. - if let Some(network) = read_bdk_wallet_network(Arc::clone(&kv_store), Arc::clone(&logger))? { + if let Some(network) = read_bdk_wallet_network(kv_store, logger)? { change_set.network = Some(network); } else { return Ok(None); } - read_bdk_wallet_local_chain(Arc::clone(&kv_store), Arc::clone(&logger))? + read_bdk_wallet_local_chain(&*kv_store, logger)? .map(|local_chain| change_set.local_chain = local_chain); - read_bdk_wallet_tx_graph(Arc::clone(&kv_store), Arc::clone(&logger))? - .map(|tx_graph| change_set.tx_graph = tx_graph); - read_bdk_wallet_indexer(Arc::clone(&kv_store), Arc::clone(&logger))? - .map(|indexer| change_set.indexer = indexer); + read_bdk_wallet_tx_graph(&*kv_store, logger)?.map(|tx_graph| change_set.tx_graph = tx_graph); + read_bdk_wallet_indexer(&*kv_store, logger)?.map(|indexer| change_set.indexer = indexer); Ok(Some(change_set)) } +/// Read previously persisted pending payments information from the store. +pub(crate) async fn read_pending_payments( + kv_store: &DynStore, logger: L, +) -> Result, std::io::Error> +where + L::Target: LdkLogger, +{ + let mut res = Vec::new(); + + let mut stored_keys = KVStore::list( + &*kv_store, + PENDING_PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PENDING_PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + ) + .await?; + + const BATCH_SIZE: usize = 50; + + let mut set = tokio::task::JoinSet::new(); + + // Fill JoinSet with tasks if possible + while set.len() < BATCH_SIZE && !stored_keys.is_empty() { + if let Some(next_key) = stored_keys.pop() { + let fut = KVStore::read( + &*kv_store, + PENDING_PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PENDING_PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + &next_key, + ); + set.spawn(fut); + debug_assert!(set.len() <= BATCH_SIZE); + } + } + + while let Some(read_res) = set.join_next().await { + // Exit early if we get an IO error. + let reader = read_res + .map_err(|e| { + log_error!(logger, "Failed to read PendingPaymentDetails: {}", e); + set.abort_all(); + e + })? + .map_err(|e| { + log_error!(logger, "Failed to read PendingPaymentDetails: {}", e); + set.abort_all(); + e + })?; + + // Refill set for every finished future, if we still have something to do. + if let Some(next_key) = stored_keys.pop() { + let fut = KVStore::read( + &*kv_store, + PENDING_PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PENDING_PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + &next_key, + ); + set.spawn(fut); + debug_assert!(set.len() <= BATCH_SIZE); + } + + // Handle result. + let pending_payment = PendingPaymentDetails::read(&mut &*reader).map_err(|e| { + log_error!(logger, "Failed to deserialize PendingPaymentDetails: {}", e); + std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Failed to deserialize PendingPaymentDetails", + ) + })?; + res.push(pending_payment); + } + + debug_assert!(set.is_empty()); + debug_assert!(stored_keys.is_empty()); + + Ok(res) +} + #[cfg(test)] mod tests { use super::read_or_generate_seed_file; diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index eb439ed10..b4fdc770a 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -745,11 +745,10 @@ async fn determine_and_write_schema_version( })? .0; - let schema_version: VssSchemaVersion = Readable::read(&mut io::Cursor::new(decrypted)) - .map_err(|e| { - let msg = format!("Failed to decode schema version: {}", e); - Error::new(ErrorKind::Other, msg) - })?; + let schema_version: VssSchemaVersion = Readable::read(&mut &*decrypted).map_err(|e| { + let msg = format!("Failed to decode schema version: {}", e); + Error::new(ErrorKind::Other, msg) + })?; Ok(schema_version) } else { // The schema version wasn't present, this either means we're running for the first time *or* it's V0 pre-migration (predating writing of the schema version). diff --git a/src/lib.rs b/src/lib.rs index fdaa0f4f1..6a9a5efae 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -110,6 +110,8 @@ use std::default::Default; use std::net::ToSocketAddrs; use std::sync::{Arc, Mutex, RwLock}; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +#[cfg(cycle_tests)] +use std::{any::Any, sync::Weak}; pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance}; use bitcoin::secp256k1::PublicKey; @@ -132,13 +134,15 @@ use event::{EventHandler, EventQueue}; use fee_estimator::{ConfirmationTarget, FeeEstimator, OnchainFeeEstimator}; #[cfg(feature = "uniffi")] use ffi::*; +#[cfg(feature = "uniffi")] +pub use ffi::{FfiDynStore, ForeignDynStoreTrait, IOError}; use gossip::GossipSource; use graph::NetworkGraph; use io::utils::write_node_metrics; use lightning::chain::BestBlock; use lightning::events::bump_transaction::{Input, Wallet as LdkWallet}; use lightning::impl_writeable_tlv_based; -use lightning::ln::chan_utils::{make_funding_redeemscript, FUNDING_TRANSACTION_WITNESS_WEIGHT}; +use lightning::ln::chan_utils::FUNDING_TRANSACTION_WITNESS_WEIGHT; use lightning::ln::channel_state::{ChannelDetails as LdkChannelDetails, ChannelShutdownState}; use lightning::ln::channelmanager::PaymentId; use lightning::ln::funding::SpliceContribution; @@ -152,16 +156,19 @@ use payment::asynchronous::om_mailbox::OnionMessageMailbox; use payment::asynchronous::static_invoice_store::StaticInvoiceStore; use payment::{ Bolt11Payment, Bolt12Payment, OnchainPayment, PaymentDetails, SpontaneousPayment, - UnifiedQrPayment, + UnifiedPayment, }; use peer_store::{PeerInfo, PeerStore}; use rand::Rng; use runtime::Runtime; use types::{ - Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, DynStore, Graph, + Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, Graph, HRNResolver, KeysManager, OnionMessenger, PaymentStore, PeerManager, Router, Scorer, Sweeper, Wallet, }; -pub use types::{ChannelDetails, CustomTlvRecord, PeerDetails, SyncAndAsyncKVStore, UserChannelId}; +pub use types::{ + ChannelDetails, CustomTlvRecord, DynStore, DynStoreWrapper, PeerDetails, SyncAndAsyncKVStore, + UserChannelId, +}; pub use { bip39, bitcoin, lightning, lightning_invoice, lightning_liquidity, lightning_types, tokio, vss_client, @@ -172,6 +179,23 @@ use crate::scoring::setup_background_pathfinding_scores_sync; #[cfg(feature = "uniffi")] uniffi::include_scaffolding!("ldk_node"); +#[cfg(cycle_tests)] +/// A list of [`Weak`]s which can be used to check that a [`Node`]'s inner fields are being +/// properly released after the [`Node`] is dropped. +pub struct LeakChecker(Vec>); + +#[cfg(cycle_tests)] +impl LeakChecker { + /// Asserts that all the stored [`Weak`]s point to contents which have been freed. + /// + /// This will (obviously) panic if the [`Node`] has not yet been dropped. + pub fn assert_no_leaks(&self) { + for weak in self.0.iter() { + assert_eq!(weak.strong_count(), 0); + } + } +} + /// The main interface object of LDK Node, wrapping the necessary LDK and BDK functionalities. /// /// Needs to be initialized and instantiated through [`Builder::build`]. @@ -206,6 +230,9 @@ pub struct Node { node_metrics: Arc>, om_mailbox: Option>, async_payments_role: Option, + hrn_resolver: Arc, + #[cfg(cycle_tests)] + _leak_checker: LeakChecker, } impl Node { @@ -289,7 +316,7 @@ impl Node { { let mut locked_node_metrics = gossip_node_metrics.write().unwrap(); locked_node_metrics.latest_rgs_snapshot_timestamp = Some(updated_timestamp); - write_node_metrics(&*locked_node_metrics, Arc::clone(&gossip_sync_store), Arc::clone(&gossip_sync_logger)) + write_node_metrics(&*locked_node_metrics, &*gossip_sync_store, Arc::clone(&gossip_sync_logger)) .unwrap_or_else(|e| { log_error!(gossip_sync_logger, "Persistence failed: {}", e); }); @@ -505,7 +532,7 @@ impl Node { { let mut locked_node_metrics = bcast_node_metrics.write().unwrap(); locked_node_metrics.latest_node_announcement_broadcast_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, Arc::clone(&bcast_store), Arc::clone(&bcast_logger)) + write_node_metrics(&*locked_node_metrics, &*bcast_store, Arc::clone(&bcast_logger)) .unwrap_or_else(|e| { log_error!(bcast_logger, "Persistence failed: {}", e); }); @@ -726,8 +753,6 @@ impl Node { locked_node_metrics.latest_pathfinding_scores_sync_timestamp; let latest_node_announcement_broadcast_timestamp = locked_node_metrics.latest_node_announcement_broadcast_timestamp; - let latest_channel_monitor_archival_height = - locked_node_metrics.latest_channel_monitor_archival_height; NodeStatus { is_running, @@ -738,7 +763,6 @@ impl Node { latest_rgs_snapshot_timestamp, latest_pathfinding_scores_sync_timestamp, latest_node_announcement_broadcast_timestamp, - latest_channel_monitor_archival_height, } } @@ -942,37 +966,45 @@ impl Node { )) } - /// Returns a payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], - /// and [BOLT 12] payment options. + /// Returns a payment handler that supports creating and paying to [BIP 21] URIs with on-chain, + /// [BOLT 11], and [BOLT 12] payment options. + /// + /// Also supports sending payments to [BIP 353] Human-Readable Names. /// /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki + /// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki #[cfg(not(feature = "uniffi"))] - pub fn unified_qr_payment(&self) -> UnifiedQrPayment { - UnifiedQrPayment::new( + pub fn unified_payment(&self) -> UnifiedPayment { + UnifiedPayment::new( self.onchain_payment().into(), self.bolt11_payment().into(), self.bolt12_payment().into(), Arc::clone(&self.config), Arc::clone(&self.logger), + Arc::clone(&self.hrn_resolver), ) } - /// Returns a payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], - /// and [BOLT 12] payment options. + /// Returns a payment handler that supports creating and paying to [BIP 21] URIs with on-chain, + /// [BOLT 11], and [BOLT 12] payment options. + /// + /// Also supports sending payments to [BIP 353] Human-Readable Names. /// /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki + /// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki #[cfg(feature = "uniffi")] - pub fn unified_qr_payment(&self) -> Arc { - Arc::new(UnifiedQrPayment::new( + pub fn unified_payment(&self) -> Arc { + Arc::new(UnifiedPayment::new( self.onchain_payment(), self.bolt11_payment(), self.bolt12_payment(), Arc::clone(&self.config), Arc::clone(&self.logger), + Arc::clone(&self.hrn_resolver), )) } @@ -1265,29 +1297,27 @@ impl Node { const EMPTY_SCRIPT_SIG_WEIGHT: u64 = 1 /* empty script_sig */ * bitcoin::constants::WITNESS_SCALE_FACTOR as u64; - // Used for creating a redeem script for the previous funding txo and the new funding - // txo. Only needed when selecting which UTXOs to include in the funding tx that would - // be sufficient to pay for fees. Hence, the value does not matter. - let dummy_pubkey = PublicKey::from_slice(&[2; 33]).unwrap(); - let funding_txo = channel_details.funding_txo.ok_or_else(|| { log_error!(self.logger, "Failed to splice channel: channel not yet ready",); Error::ChannelSplicingFailed })?; + let funding_output = channel_details.get_funding_output().ok_or_else(|| { + log_error!(self.logger, "Failed to splice channel: channel not yet ready"); + Error::ChannelSplicingFailed + })?; + let shared_input = Input { outpoint: funding_txo.into_bitcoin_outpoint(), - previous_utxo: bitcoin::TxOut { - value: Amount::from_sat(channel_details.channel_value_satoshis), - script_pubkey: make_funding_redeemscript(&dummy_pubkey, &dummy_pubkey) - .to_p2wsh(), - }, + previous_utxo: funding_output.clone(), satisfaction_weight: EMPTY_SCRIPT_SIG_WEIGHT + FUNDING_TRANSACTION_WITNESS_WEIGHT, }; let shared_output = bitcoin::TxOut { value: shared_input.previous_utxo.value + Amount::from_sat(splice_amount_sats), - script_pubkey: make_funding_redeemscript(&dummy_pubkey, &dummy_pubkey).to_p2wsh(), + // will not actually be the exact same script pubkey after splice + // but it is the same size and good enough for coin selection purposes + script_pubkey: funding_output.script_pubkey.clone(), }; let fee_rate = self.fee_estimator.estimate_fee_rate(ConfirmationTarget::ChannelFunding); @@ -1303,13 +1333,17 @@ impl Node { Error::ChannelSplicingFailed })?; + // insert channel's funding utxo into the wallet so we can later calculate fees + // correctly when viewing this splice-in. + self.wallet.insert_txo(funding_txo.into_bitcoin_outpoint(), funding_output)?; + let change_address = self.wallet.get_new_internal_address()?; - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_amount_sats), + let contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_amount_sats), inputs, - change_script: Some(change_address.script_pubkey()), - }; + Some(change_address.script_pubkey()), + ); let funding_feerate_per_kw: u32 = match fee_rate.to_sat_per_kwu().try_into() { Ok(fee_rate) => fee_rate, @@ -1381,12 +1415,10 @@ impl Node { self.wallet.parse_and_validate_address(address)?; - let contribution = SpliceContribution::SpliceOut { - outputs: vec![bitcoin::TxOut { - value: Amount::from_sat(splice_amount_sats), - script_pubkey: address.script_pubkey(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![bitcoin::TxOut { + value: Amount::from_sat(splice_amount_sats), + script_pubkey: address.script_pubkey(), + }]); let fee_rate = self.fee_estimator.estimate_fee_rate(ConfirmationTarget::ChannelFunding); let funding_feerate_per_kw: u32 = match fee_rate.to_sat_per_kwu().try_into() { @@ -1398,6 +1430,18 @@ impl Node { }, }; + let funding_txo = channel_details.funding_txo.ok_or_else(|| { + log_error!(self.logger, "Failed to splice channel: channel not yet ready",); + Error::ChannelSplicingFailed + })?; + + let funding_output = channel_details.get_funding_output().ok_or_else(|| { + log_error!(self.logger, "Failed to splice channel: channel not yet ready"); + Error::ChannelSplicingFailed + })?; + + self.wallet.insert_txo(funding_txo.into_bitcoin_outpoint(), funding_output)?; + self.channel_manager .splice_channel( &channel_details.channel_id, @@ -1782,10 +1826,6 @@ pub struct NodeStatus { /// /// Will be `None` if we have no public channels or we haven't broadcasted yet. pub latest_node_announcement_broadcast_timestamp: Option, - /// The block height when we last archived closed channel monitor data. - /// - /// Will be `None` if we haven't archived any monitors of closed channels yet. - pub latest_channel_monitor_archival_height: Option, } /// Status fields that are persisted across restarts. @@ -1797,7 +1837,6 @@ pub(crate) struct NodeMetrics { latest_rgs_snapshot_timestamp: Option, latest_pathfinding_scores_sync_timestamp: Option, latest_node_announcement_broadcast_timestamp: Option, - latest_channel_monitor_archival_height: Option, } impl Default for NodeMetrics { @@ -1809,7 +1848,6 @@ impl Default for NodeMetrics { latest_rgs_snapshot_timestamp: None, latest_pathfinding_scores_sync_timestamp: None, latest_node_announcement_broadcast_timestamp: None, - latest_channel_monitor_archival_height: None, } } } @@ -1821,7 +1859,8 @@ impl_writeable_tlv_based!(NodeMetrics, { (4, latest_fee_rate_cache_update_timestamp, option), (6, latest_rgs_snapshot_timestamp, option), (8, latest_node_announcement_broadcast_timestamp, option), - (10, latest_channel_monitor_archival_height, option), + // 10 used to be latest_channel_monitor_archival_height + (10, _legacy_latest_channel_monitor_archival_height, (legacy, u32, |_: &NodeMetrics| None::> )), }); pub(crate) fn total_anchor_channels_reserve_sats( diff --git a/src/liquidity.rs b/src/liquidity.rs index 74e6098dd..2151110b6 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -9,7 +9,7 @@ use std::collections::HashMap; use std::ops::Deref; -use std::sync::{Arc, Mutex, RwLock}; +use std::sync::{Arc, Mutex, RwLock, Weak}; use std::time::Duration; use bitcoin::hashes::{sha256, Hash}; @@ -291,7 +291,7 @@ where lsps2_service: Option, wallet: Arc, channel_manager: Arc, - peer_manager: RwLock>>, + peer_manager: RwLock>>, keys_manager: Arc, liquidity_manager: Arc, config: Arc, @@ -302,7 +302,7 @@ impl LiquiditySource where L::Target: LdkLogger, { - pub(crate) fn set_peer_manager(&self, peer_manager: Arc) { + pub(crate) fn set_peer_manager(&self, peer_manager: Weak) { *self.peer_manager.write().unwrap() = Some(peer_manager); } @@ -715,8 +715,8 @@ where return; }; - let init_features = if let Some(peer_manager) = - self.peer_manager.read().unwrap().as_ref() + let init_features = if let Some(Some(peer_manager)) = + self.peer_manager.read().unwrap().as_ref().map(|weak| weak.upgrade()) { // Fail if we're not connected to the prospective channel partner. if let Some(peer) = peer_manager.peer_by_node_id(&their_network_key) { diff --git a/src/logger.rs b/src/logger.rs index 4eaefad74..f2b53a1dc 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -7,14 +7,16 @@ //! Logging-related objects. -#[cfg(not(feature = "uniffi"))] use core::fmt; use std::fs; use std::io::Write; use std::path::Path; use std::sync::Arc; +use bitcoin::secp256k1::PublicKey; use chrono::Utc; +use lightning::ln::types::ChannelId; +use lightning::types::payment::PaymentHash; pub use lightning::util::logger::Level as LogLevel; pub(crate) use lightning::util::logger::{Logger as LdkLogger, Record as LdkRecord}; pub(crate) use lightning::{log_bytes, log_debug, log_error, log_info, log_trace}; @@ -32,6 +34,64 @@ pub struct LogRecord<'a> { pub module_path: &'a str, /// The line containing the message. pub line: u32, + /// The node id of the peer pertaining to the logged record. + pub peer_id: Option, + /// The channel id of the channel pertaining to the logged record. + pub channel_id: Option, + /// The payment hash pertaining to the logged record. + pub payment_hash: Option, +} + +/// Structured context fields for log messages. +/// +/// Implements `Display` to format context fields (channel_id, peer_id, payment_hash) directly +/// into a formatter, avoiding intermediate heap allocations when used with `format_args!` or +/// `write!` macros. +/// +/// Note: LDK's `Record` Display implementation uses fixed-width padded columns and different +/// formatting for test vs production builds. We intentionally use a simpler format here: +/// fields are only included when present (no padding), and the format is consistent across +/// all build configurations. +pub struct LogContext<'a> { + /// The channel id of the channel pertaining to the logged record. + pub channel_id: Option<&'a ChannelId>, + /// The node id of the peer pertaining to the logged record. + pub peer_id: Option<&'a PublicKey>, + /// The payment hash pertaining to the logged record. + pub payment_hash: Option<&'a PaymentHash>, +} + +impl fmt::Display for LogContext<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn truncate(s: &str) -> &str { + &s[..s.len().min(6)] + } + + if self.channel_id.is_none() && self.peer_id.is_none() && self.payment_hash.is_none() { + return Ok(()); + } + + write!(f, " (")?; + let mut need_space = false; + if let Some(c) = self.channel_id { + write!(f, "ch:{}", truncate(&c.to_string()))?; + need_space = true; + } + if let Some(p) = self.peer_id { + if need_space { + write!(f, " ")?; + } + write!(f, "p:{}", truncate(&p.to_string()))?; + need_space = true; + } + if let Some(h) = self.payment_hash { + if need_space { + write!(f, " ")?; + } + write!(f, "h:{}", truncate(&format!("{:?}", h)))?; + } + write!(f, ")") + } } /// A unit of logging output with metadata to enable filtering `module_path`, @@ -50,6 +110,12 @@ pub struct LogRecord { pub module_path: String, /// The line containing the message. pub line: u32, + /// The node id of the peer pertaining to the logged record. + pub peer_id: Option, + /// The channel id of the channel pertaining to the logged record. + pub channel_id: Option, + /// The payment hash pertaining to the logged record. + pub payment_hash: Option, } #[cfg(feature = "uniffi")] @@ -60,6 +126,9 @@ impl<'a> From> for LogRecord { args: record.args.to_string(), module_path: record.module_path.to_string(), line: record.line, + peer_id: record.peer_id, + channel_id: record.channel_id, + payment_hash: record.payment_hash, } } } @@ -72,6 +141,9 @@ impl<'a> From> for LogRecord<'a> { args: record.args, module_path: record.module_path, line: record.line, + peer_id: record.peer_id, + channel_id: record.channel_id, + payment_hash: record.payment_hash, } } } @@ -113,6 +185,12 @@ pub(crate) enum Writer { impl LogWriter for Writer { fn log(&self, record: LogRecord) { + let context = LogContext { + channel_id: record.channel_id.as_ref(), + peer_id: record.peer_id.as_ref(), + payment_hash: record.payment_hash.as_ref(), + }; + match self { Writer::FileWriter { file_path, max_log_level } => { if record.level < *max_log_level { @@ -120,12 +198,13 @@ impl LogWriter for Writer { } let log = format!( - "{} {:<5} [{}:{}] {}\n", + "{} {:<5} [{}:{}] {}{}\n", Utc::now().format("%Y-%m-%d %H:%M:%S%.3f"), record.level.to_string(), record.module_path, record.line, - record.args + record.args, + context, ); fs::OpenOptions::new() @@ -153,7 +232,7 @@ impl LogWriter for Writer { .target(record.module_path) .module_path(Some(record.module_path)) .line(Some(record.line)) - .args(format_args!("{}", record.args)) + .args(format_args!("{}{}", record.args, context)) .build(), ); #[cfg(feature = "uniffi")] @@ -162,7 +241,7 @@ impl LogWriter for Writer { .target(&record.module_path) .module_path(Some(&record.module_path)) .line(Some(record.line)) - .args(format_args!("{}", record.args)) + .args(format_args!("{}{}", record.args, context)) .build(), ); }, @@ -222,3 +301,128 @@ impl LdkLogger for Logger { } } } + +#[cfg(test)] +mod tests { + use std::sync::Mutex; + + use super::*; + + /// A minimal log facade logger that captures log output for testing. + struct TestLogger { + log: Arc>, + } + + impl log::Log for TestLogger { + fn enabled(&self, _metadata: &log::Metadata) -> bool { + true + } + + fn log(&self, record: &log::Record) { + *self.log.lock().unwrap() = record.args().to_string(); + } + + fn flush(&self) {} + } + + /// Tests that LogContext correctly formats all three structured fields + /// (channel_id, peer_id, payment_hash) with space prefixes and 6-char truncation. + #[test] + fn test_log_context_all_fields() { + let channel_id = ChannelId::from_bytes([ + 0xab, 0xcd, 0xef, 0x12, 0x34, 0x56, 0x78, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + ]); + let peer_id = PublicKey::from_slice(&[ + 0x02, 0xab, 0xcd, 0xef, 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf1, 0x23, 0x45, + 0x67, 0x89, 0xab, 0xcd, 0xef, 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf1, 0x23, + 0x45, 0x67, 0x89, 0xab, 0xcd, + ]) + .unwrap(); + let payment_hash = PaymentHash([ + 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + ]); + + let context = LogContext { + channel_id: Some(&channel_id), + peer_id: Some(&peer_id), + payment_hash: Some(&payment_hash), + }; + + assert_eq!(context.to_string(), " (ch:abcdef p:02abcd h:fedcba)"); + } + + /// Tests that LogContext returns an empty string when no fields are provided. + #[test] + fn test_log_context_no_fields() { + let context = LogContext { channel_id: None, peer_id: None, payment_hash: None }; + assert_eq!(context.to_string(), ""); + } + + /// Tests that LogContext only includes present fields. + #[test] + fn test_log_context_partial_fields() { + let channel_id = ChannelId::from_bytes([ + 0x12, 0x34, 0x56, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + ]); + + let context = + LogContext { channel_id: Some(&channel_id), peer_id: None, payment_hash: None }; + assert_eq!(context.to_string(), " (ch:123456)"); + } + + /// Tests that LogFacadeWriter appends structured context fields to the log message. + #[test] + fn test_log_facade_writer_includes_structured_context() { + let log = Arc::new(Mutex::new(String::new())); + let test_logger = TestLogger { log: log.clone() }; + + let _ = log::set_boxed_logger(Box::new(test_logger)); + log::set_max_level(log::LevelFilter::Trace); + + let writer = Writer::LogFacadeWriter; + + let channel_id = ChannelId::from_bytes([ + 0xab, 0xcd, 0xef, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + ]); + let peer_id = PublicKey::from_slice(&[ + 0x02, 0xab, 0xcd, 0xef, 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf1, 0x23, 0x45, + 0x67, 0x89, 0xab, 0xcd, 0xef, 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf1, 0x23, + 0x45, 0x67, 0x89, 0xab, 0xcd, + ]) + .unwrap(); + + #[cfg(not(feature = "uniffi"))] + let record = LogRecord { + level: LogLevel::Info, + args: format_args!("Test message"), + module_path: "test_module", + line: 42, + peer_id: Some(peer_id), + channel_id: Some(channel_id), + payment_hash: None, + }; + + #[cfg(feature = "uniffi")] + let record = LogRecord { + level: LogLevel::Info, + args: "Test message".to_string(), + module_path: "test_module".to_string(), + line: 42, + peer_id: Some(peer_id), + channel_id: Some(channel_id), + payment_hash: None, + }; + + writer.log(record); + + assert_eq!(*log.lock().unwrap(), "Test message (ch:abcdef p:02abcd)"); + } +} diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index 60c313381..41597bfcc 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -98,8 +98,8 @@ impl Bolt11Payment { } let invoice = maybe_deref(invoice); - let payment_hash = PaymentHash(invoice.payment_hash().to_byte_array()); - let payment_id = PaymentId(invoice.payment_hash().to_byte_array()); + let payment_hash = invoice.payment_hash(); + let payment_id = PaymentId(invoice.payment_hash().0); if let Some(payment) = self.payment_store.get(&payment_id) { if payment.status == PaymentStatus::Pending || payment.status == PaymentStatus::Succeeded @@ -204,8 +204,8 @@ impl Bolt11Payment { } } - let payment_hash = PaymentHash(invoice.payment_hash().to_byte_array()); - let payment_id = PaymentId(invoice.payment_hash().to_byte_array()); + let payment_hash = invoice.payment_hash(); + let payment_id = PaymentId(invoice.payment_hash().0); if let Some(payment) = self.payment_store.get(&payment_id) { if payment.status == PaymentStatus::Pending || payment.status == PaymentStatus::Succeeded @@ -494,7 +494,7 @@ impl Bolt11Payment { } }; - let payment_hash = PaymentHash(invoice.payment_hash().to_byte_array()); + let payment_hash = invoice.payment_hash(); let payment_secret = invoice.payment_secret(); let id = PaymentId(payment_hash.0); let preimage = if manual_claim_payment_hash.is_none() { @@ -712,7 +712,7 @@ impl Bolt11Payment { })?; // Register payment in payment store. - let payment_hash = PaymentHash(invoice.payment_hash().to_byte_array()); + let payment_hash = invoice.payment_hash(); let payment_secret = invoice.payment_secret(); let lsp_fee_limits = LSPFeeLimits { max_total_opening_fee_msat: lsp_total_opening_fee, diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 0dd38edca..98f1d21ef 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -15,7 +15,7 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use lightning::blinded_path::message::BlindedMessagePath; use lightning::ln::channelmanager::{OptionalOfferPaymentParams, PaymentId, Retry}; -use lightning::offers::offer::{Amount, Offer as LdkOffer, Quantity}; +use lightning::offers::offer::{Amount, Offer as LdkOffer, OfferFromHrn, Quantity}; use lightning::offers::parse::Bolt12SemanticError; use lightning::routing::router::RouteParametersConfig; #[cfg(feature = "uniffi")] @@ -45,6 +45,11 @@ type Refund = lightning::offers::refund::Refund; #[cfg(feature = "uniffi")] type Refund = Arc; +#[cfg(not(feature = "uniffi"))] +type HumanReadableName = lightning::onion_message::dns_resolution::HumanReadableName; +#[cfg(feature = "uniffi")] +type HumanReadableName = Arc; + /// A payment handler allowing to create and pay [BOLT 12] offers and refunds. /// /// Should be retrieved by calling [`Node::bolt12_payment`]. @@ -193,6 +198,37 @@ impl Bolt12Payment { pub fn send_using_amount( &self, offer: &Offer, amount_msat: u64, quantity: Option, payer_note: Option, route_parameters: Option, + ) -> Result { + let payment_id = self.send_using_amount_inner( + offer, + amount_msat, + quantity, + payer_note, + route_parameters, + None, + )?; + Ok(payment_id) + } + + /// Internal helper to send a BOLT12 offer payment given an offer + /// and an amount in millisatoshi. + /// + /// This function contains the core payment logic and is called by + /// [`Self::send_using_amount`] and other internal logic that resolves + /// payment parameters (e.g. [`crate::UnifiedPayment::send`]). + /// + /// It wraps the core LDK `pay_for_offer` logic and handles necessary pre-checks, + /// payment ID generation, and payment details storage. + /// + /// The amount validation logic ensures the provided `amount_msat` is sufficient + /// based on the offer's required amount. + /// + /// If `hrn` is `Some`, the payment is initiated using [`ChannelManager::pay_for_offer_from_hrn`] + /// for offers resolved from a Human-Readable Name ([`HumanReadableName`]). + /// Otherwise, it falls back to the standard offer payment methods. + pub(crate) fn send_using_amount_inner( + &self, offer: &Offer, amount_msat: u64, quantity: Option, payer_note: Option, + route_parameters: Option, hrn: Option, ) -> Result { if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); @@ -228,7 +264,11 @@ impl Bolt12Payment { retry_strategy, route_params_config: route_parameters, }; - let res = if let Some(quantity) = quantity { + let res = if let Some(hrn) = hrn { + let hrn = maybe_deref(&hrn); + let offer = OfferFromHrn { offer: offer.clone(), hrn: *hrn }; + self.channel_manager.pay_for_offer_from_hrn(&offer, amount_msat, payment_id, params) + } else if let Some(quantity) = quantity { self.channel_manager.pay_for_offer_with_quantity( &offer, Some(amount_msat), diff --git a/src/payment/mod.rs b/src/payment/mod.rs index f629960e1..42b5aff3b 100644 --- a/src/payment/mod.rs +++ b/src/payment/mod.rs @@ -11,15 +11,17 @@ pub(crate) mod asynchronous; mod bolt11; mod bolt12; mod onchain; +pub(crate) mod pending_payment_store; mod spontaneous; pub(crate) mod store; -mod unified_qr; +mod unified; pub use bolt11::Bolt11Payment; pub use bolt12::Bolt12Payment; pub use onchain::OnchainPayment; +pub use pending_payment_store::PendingPaymentDetails; pub use spontaneous::SpontaneousPayment; pub use store::{ ConfirmationStatus, LSPFeeLimits, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, }; -pub use unified_qr::{QrPaymentResult, UnifiedQrPayment}; +pub use unified::{UnifiedPayment, UnifiedPaymentResult}; diff --git a/src/payment/pending_payment_store.rs b/src/payment/pending_payment_store.rs new file mode 100644 index 000000000..580bdcbcc --- /dev/null +++ b/src/payment/pending_payment_store.rs @@ -0,0 +1,93 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use bitcoin::Txid; +use lightning::{impl_writeable_tlv_based, ln::channelmanager::PaymentId}; + +use crate::{ + data_store::{StorableObject, StorableObjectUpdate}, + payment::{store::PaymentDetailsUpdate, PaymentDetails}, +}; + +/// Represents a pending payment +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct PendingPaymentDetails { + /// The full payment details + pub details: PaymentDetails, + /// Transaction IDs that have replaced or conflict with this payment. + pub conflicting_txids: Vec, +} + +impl PendingPaymentDetails { + pub(crate) fn new(details: PaymentDetails, conflicting_txids: Vec) -> Self { + Self { details, conflicting_txids } + } + + /// Convert to finalized payment for the main payment store + pub fn into_payment_details(self) -> PaymentDetails { + self.details + } +} + +impl_writeable_tlv_based!(PendingPaymentDetails, { + (0, details, required), + (2, conflicting_txids, optional_vec), +}); + +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct PendingPaymentDetailsUpdate { + pub id: PaymentId, + pub payment_update: Option, + pub conflicting_txids: Option>, +} + +impl StorableObject for PendingPaymentDetails { + type Id = PaymentId; + type Update = PendingPaymentDetailsUpdate; + + fn id(&self) -> Self::Id { + self.details.id + } + + fn update(&mut self, update: &Self::Update) -> bool { + let mut updated = false; + + // Update the underlying payment details if present + if let Some(payment_update) = &update.payment_update { + updated |= self.details.update(payment_update); + } + + if let Some(new_conflicting_txids) = &update.conflicting_txids { + if &self.conflicting_txids != new_conflicting_txids { + self.conflicting_txids = new_conflicting_txids.clone(); + updated = true; + } + } + + updated + } + + fn to_update(&self) -> Self::Update { + self.into() + } +} + +impl StorableObjectUpdate for PendingPaymentDetailsUpdate { + fn id(&self) -> ::Id { + self.id + } +} + +impl From<&PendingPaymentDetails> for PendingPaymentDetailsUpdate { + fn from(value: &PendingPaymentDetails) -> Self { + Self { + id: value.id(), + payment_update: Some(value.details.to_update()), + conflicting_txids: Some(value.conflicting_txids.clone()), + } + } +} diff --git a/src/payment/store.rs b/src/payment/store.rs index 184de2ea9..15e94190c 100644 --- a/src/payment/store.rs +++ b/src/payment/store.rs @@ -605,7 +605,6 @@ impl StorableObjectUpdate for PaymentDetailsUpdate { #[cfg(test)] mod tests { - use bitcoin::io::Cursor; use lightning::util::ser::Readable; use super::*; @@ -657,16 +656,12 @@ mod tests { let old_bolt11_encoded = old_bolt11_payment.encode(); assert_eq!( old_bolt11_payment, - OldPaymentDetails::read(&mut Cursor::new(old_bolt11_encoded.clone())).unwrap() + OldPaymentDetails::read(&mut &*old_bolt11_encoded.clone()).unwrap() ); - let bolt11_decoded = - PaymentDetails::read(&mut Cursor::new(old_bolt11_encoded)).unwrap(); + let bolt11_decoded = PaymentDetails::read(&mut &*old_bolt11_encoded).unwrap(); let bolt11_reencoded = bolt11_decoded.encode(); - assert_eq!( - bolt11_decoded, - PaymentDetails::read(&mut Cursor::new(bolt11_reencoded)).unwrap() - ); + assert_eq!(bolt11_decoded, PaymentDetails::read(&mut &*bolt11_reencoded).unwrap()); match bolt11_decoded.kind { PaymentKind::Bolt11 { hash: h, preimage: p, secret: s } => { @@ -700,15 +695,14 @@ mod tests { let old_bolt11_jit_encoded = old_bolt11_jit_payment.encode(); assert_eq!( old_bolt11_jit_payment, - OldPaymentDetails::read(&mut Cursor::new(old_bolt11_jit_encoded.clone())).unwrap() + OldPaymentDetails::read(&mut &*old_bolt11_jit_encoded.clone()).unwrap() ); - let bolt11_jit_decoded = - PaymentDetails::read(&mut Cursor::new(old_bolt11_jit_encoded)).unwrap(); + let bolt11_jit_decoded = PaymentDetails::read(&mut &*old_bolt11_jit_encoded).unwrap(); let bolt11_jit_reencoded = bolt11_jit_decoded.encode(); assert_eq!( bolt11_jit_decoded, - PaymentDetails::read(&mut Cursor::new(bolt11_jit_reencoded)).unwrap() + PaymentDetails::read(&mut &*bolt11_jit_reencoded).unwrap() ); match bolt11_jit_decoded.kind { @@ -746,15 +740,14 @@ mod tests { let old_spontaneous_encoded = old_spontaneous_payment.encode(); assert_eq!( old_spontaneous_payment, - OldPaymentDetails::read(&mut Cursor::new(old_spontaneous_encoded.clone())).unwrap() + OldPaymentDetails::read(&mut &*old_spontaneous_encoded.clone()).unwrap() ); - let spontaneous_decoded = - PaymentDetails::read(&mut Cursor::new(old_spontaneous_encoded)).unwrap(); + let spontaneous_decoded = PaymentDetails::read(&mut &*old_spontaneous_encoded).unwrap(); let spontaneous_reencoded = spontaneous_decoded.encode(); assert_eq!( spontaneous_decoded, - PaymentDetails::read(&mut Cursor::new(spontaneous_reencoded)).unwrap() + PaymentDetails::read(&mut &*spontaneous_reencoded).unwrap() ); match spontaneous_decoded.kind { diff --git a/src/payment/unified_qr.rs b/src/payment/unified.rs similarity index 74% rename from src/payment/unified_qr.rs rename to src/payment/unified.rs index 6ebf25563..671af14ff 100644 --- a/src/payment/unified_qr.rs +++ b/src/payment/unified.rs @@ -5,28 +5,37 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -//! Holds a payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], and [BOLT 12] payment -//! options. +//! Holds a payment handler that supports creating and paying to [BIP 21] URIs with on-chain, [BOLT 11], +//! and [BOLT 12] payment options. +//! +//! Also supports sending payments to [BIP 353] Human-Readable Names. //! //! [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki +//! [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki //! [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md //! [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md use std::sync::Arc; +use std::time::Duration; use std::vec::IntoIter; use bip21::de::ParamKind; use bip21::{DeserializationError, DeserializeParams, Param, SerializeParams}; -use bitcoin::address::{NetworkChecked, NetworkUnchecked}; +use bitcoin::address::NetworkChecked; use bitcoin::{Amount, Txid}; +use bitcoin_payment_instructions::amount::Amount as BPIAmount; +use bitcoin_payment_instructions::{PaymentInstructions, PaymentMethod}; use lightning::ln::channelmanager::PaymentId; use lightning::offers::offer::Offer; +use lightning::onion_message::dns_resolution::HumanReadableName; use lightning::routing::router::RouteParametersConfig; use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, Description}; +use crate::config::HRN_RESOLUTION_TIMEOUT_SECS; use crate::error::Error; use crate::ffi::maybe_wrap; use crate::logger::{log_error, LdkLogger, Logger}; use crate::payment::{Bolt11Payment, Bolt12Payment, OnchainPayment}; +use crate::types::HRNResolver; use crate::Config; type Uri<'a> = bip21::Uri<'a, NetworkChecked, Extras>; @@ -37,29 +46,34 @@ struct Extras { bolt12_offer: Option, } -/// A payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], and [BOLT 12] payment -/// option. +/// A payment handler that supports creating and paying to [BIP 21] URIs with on-chain, [BOLT 11], +/// and [BOLT 12] payment options. +/// +/// Also supports sending payments to [BIP 353] Human-Readable Names. /// -/// Should be retrieved by calling [`Node::unified_qr_payment`] +/// Should be retrieved by calling [`Node::unified_payment`] /// /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki +/// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md -/// [`Node::unified_qr_payment`]: crate::Node::unified_qr_payment -pub struct UnifiedQrPayment { +/// [`Node::unified_payment`]: crate::Node::unified_payment +pub struct UnifiedPayment { onchain_payment: Arc, bolt11_invoice: Arc, bolt12_payment: Arc, config: Arc, logger: Arc, + hrn_resolver: Arc, } -impl UnifiedQrPayment { +impl UnifiedPayment { pub(crate) fn new( onchain_payment: Arc, bolt11_invoice: Arc, bolt12_payment: Arc, config: Arc, logger: Arc, + hrn_resolver: Arc, ) -> Self { - Self { onchain_payment, bolt11_invoice, bolt12_payment, config, logger } + Self { onchain_payment, bolt11_invoice, bolt12_payment, config, logger, hrn_resolver } } /// Generates a URI with an on-chain address, [BOLT 11] invoice and [BOLT 12] offer. @@ -129,72 +143,160 @@ impl UnifiedQrPayment { Ok(format_uri(uri)) } - /// Sends a payment given a [BIP 21] URI. + /// Sends a payment given a [BIP 21] URI or [BIP 353] Human-Readable Name. /// /// This method parses the provided URI string and attempts to send the payment. If the URI /// has an offer and or invoice, it will try to pay the offer first followed by the invoice. /// If they both fail, the on-chain payment will be paid. /// - /// Returns a `QrPaymentResult` indicating the outcome of the payment. If an error + /// Returns a [`UnifiedPaymentResult`] indicating the outcome of the payment. If an error /// occurs, an `Error` is returned detailing the issue encountered. /// /// If `route_parameters` are provided they will override the default as well as the /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. /// /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki - pub fn send( - &self, uri_str: &str, route_parameters: Option, - ) -> Result { - let uri: bip21::Uri = - uri_str.parse().map_err(|_| Error::InvalidUri)?; - - let uri_network_checked = - uri.clone().require_network(self.config.network).map_err(|_| Error::InvalidNetwork)?; - - if let Some(offer) = uri_network_checked.extras.bolt12_offer { - let offer = maybe_wrap(offer); - match self.bolt12_payment.send(&offer, None, None, route_parameters) { - Ok(payment_id) => return Ok(QrPaymentResult::Bolt12 { payment_id }), - Err(e) => log_error!(self.logger, "Failed to send BOLT12 offer: {:?}. This is part of a unified QR code payment. Falling back to the BOLT11 invoice.", e), - } - } - - if let Some(invoice) = uri_network_checked.extras.bolt11_invoice { - let invoice = maybe_wrap(invoice); - match self.bolt11_invoice.send(&invoice, route_parameters) { - Ok(payment_id) => return Ok(QrPaymentResult::Bolt11 { payment_id }), - Err(e) => log_error!(self.logger, "Failed to send BOLT11 invoice: {:?}. This is part of a unified QR code payment. Falling back to the on-chain transaction.", e), - } - } + /// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki + pub async fn send( + &self, uri_str: &str, amount_msat: Option, + route_parameters: Option, + ) -> Result { + let parse_fut = PaymentInstructions::parse( + uri_str, + self.config.network, + self.hrn_resolver.as_ref(), + false, + ); - let amount = match uri_network_checked.amount { - Some(amount) => amount, - None => { - log_error!(self.logger, "No amount specified in the URI. Aborting the payment."); - return Err(Error::InvalidAmount); + let instructions = + tokio::time::timeout(Duration::from_secs(HRN_RESOLUTION_TIMEOUT_SECS), parse_fut) + .await + .map_err(|e| { + log_error!(self.logger, "Payment instructions resolution timed out: {:?}", e); + Error::UriParameterParsingFailed + })? + .map_err(|e| { + log_error!(self.logger, "Failed to parse payment instructions: {:?}", e); + Error::UriParameterParsingFailed + })?; + + let resolved = match instructions { + PaymentInstructions::ConfigurableAmount(instr) => { + let amount_msat = amount_msat.ok_or_else(|| { + log_error!(self.logger, "No amount specified. Aborting the payment."); + Error::InvalidAmount + })?; + + let amt = BPIAmount::from_milli_sats(amount_msat).map_err(|e| { + log_error!(self.logger, "Error while converting amount : {:?}", e); + Error::InvalidAmount + })?; + + let fut = instr.set_amount(amt, self.hrn_resolver.as_ref()); + + tokio::time::timeout(Duration::from_secs(HRN_RESOLUTION_TIMEOUT_SECS), fut) + .await + .map_err(|e| { + log_error!( + self.logger, + "Payment instructions resolution timed out: {:?}", + e + ); + Error::UriParameterParsingFailed + })? + .map_err(|e| { + log_error!(self.logger, "Failed to set amount: {:?}", e); + Error::InvalidAmount + })? + }, + PaymentInstructions::FixedAmount(instr) => { + if let Some(user_amount_msat) = amount_msat { + if instr.max_amount().map_or(false, |amt| user_amount_msat < amt.milli_sats()) { + log_error!(self.logger, "Amount specified is less than the amount in the parsed URI. Aborting the payment."); + return Err(Error::InvalidAmount); + } + } + instr }, }; - let txid = self.onchain_payment.send_to_address( - &uri_network_checked.address, - amount.to_sat(), - None, - )?; + let mut sorted_payment_methods = resolved.methods().to_vec(); + sorted_payment_methods.sort_by_key(|method| match method { + PaymentMethod::LightningBolt12(_) => 0, + PaymentMethod::LightningBolt11(_) => 1, + PaymentMethod::OnChain(_) => 2, + }); + + for method in sorted_payment_methods { + match method { + PaymentMethod::LightningBolt12(offer) => { + let offer = maybe_wrap(offer.clone()); + + let payment_result = if let Ok(hrn) = HumanReadableName::from_encoded(uri_str) { + let hrn = maybe_wrap(hrn.clone()); + self.bolt12_payment.send_using_amount_inner(&offer, amount_msat.unwrap_or(0), None, None, route_parameters, Some(hrn)) + } else if let Some(amount_msat) = amount_msat { + self.bolt12_payment.send_using_amount(&offer, amount_msat, None, None, route_parameters) + } else { + self.bolt12_payment.send(&offer, None, None, route_parameters) + } + .map_err(|e| { + log_error!(self.logger, "Failed to send BOLT12 offer: {:?}. This is part of a unified payment. Falling back to the BOLT11 invoice.", e); + e + }); + + if let Ok(payment_id) = payment_result { + return Ok(UnifiedPaymentResult::Bolt12 { payment_id }); + } + }, + PaymentMethod::LightningBolt11(invoice) => { + let invoice = maybe_wrap(invoice.clone()); + let payment_result = self.bolt11_invoice.send(&invoice, route_parameters) + .map_err(|e| { + log_error!(self.logger, "Failed to send BOLT11 invoice: {:?}. This is part of a unified payment. Falling back to the on-chain transaction.", e); + e + }); + + if let Ok(payment_id) = payment_result { + return Ok(UnifiedPaymentResult::Bolt11 { payment_id }); + } + }, + PaymentMethod::OnChain(address) => { + let amount = resolved.onchain_payment_amount().ok_or_else(|| { + log_error!(self.logger, "No amount specified. Aborting the payment."); + Error::InvalidAmount + })?; + + let amt_sats = amount.sats().map_err(|_| { + log_error!( + self.logger, + "Amount in sats returned an error. Aborting the payment." + ); + Error::InvalidAmount + })?; + + let txid = self.onchain_payment.send_to_address(&address, amt_sats, None)?; + return Ok(UnifiedPaymentResult::Onchain { txid }); + }, + } + } - Ok(QrPaymentResult::Onchain { txid }) + log_error!(self.logger, "Payable methods not found in URI"); + Err(Error::PaymentSendingFailed) } } -/// Represents the result of a payment made using a [BIP 21] QR code. +/// Represents the result of a payment made using a [BIP 21] URI or a [BIP 353] Human-Readable Name. /// /// After a successful on-chain transaction, the transaction ID ([`Txid`]) is returned. /// For BOLT11 and BOLT12 payments, the corresponding [`PaymentId`] is returned. /// /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki +/// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki /// [`PaymentId`]: lightning::ln::channelmanager::PaymentId /// [`Txid`]: bitcoin::hash_types::Txid #[derive(Debug)] -pub enum QrPaymentResult { +pub enum UnifiedPaymentResult { /// An on-chain payment. Onchain { /// The transaction ID (txid) of the on-chain payment. @@ -310,10 +412,10 @@ impl DeserializationError for Extras { mod tests { use std::str::FromStr; + use bitcoin::address::NetworkUnchecked; use bitcoin::{Address, Network}; - use super::*; - use crate::payment::unified_qr::Extras; + use super::{Amount, Bolt11Invoice, Extras, Offer}; #[test] fn parse_uri() { diff --git a/src/runtime.rs b/src/runtime.rs index 1e9883ae4..39a34ddfe 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -9,6 +9,7 @@ use std::future::Future; use std::sync::{Arc, Mutex}; use std::time::Duration; +use lightning::util::native_async::FutureSpawner; use tokio::task::{JoinHandle, JoinSet}; use crate::config::{ @@ -219,3 +220,29 @@ enum RuntimeMode { Owned(tokio::runtime::Runtime), Handle(tokio::runtime::Handle), } + +pub(crate) struct RuntimeSpawner { + runtime: Arc, +} + +impl RuntimeSpawner { + pub(crate) fn new(runtime: Arc) -> Self { + Self { runtime } + } +} + +impl FutureSpawner for RuntimeSpawner { + type E = tokio::sync::oneshot::error::RecvError; + type SpawnedFutureResult = tokio::sync::oneshot::Receiver; + fn spawn + Send + 'static>( + &self, future: F, + ) -> Self::SpawnedFutureResult { + let (result, output) = tokio::sync::oneshot::channel(); + self.runtime.spawn_cancellable_background_task(async move { + // We don't care if the send works or not, if the receiver is dropped its not our + // problem. + let _ = result.send(future.await); + }); + output + } +} diff --git a/src/scoring.rs b/src/scoring.rs index 6385f2f56..3ed7b9d1e 100644 --- a/src/scoring.rs +++ b/src/scoring.rs @@ -1,13 +1,13 @@ -use std::io::Cursor; use std::sync::{Arc, Mutex, RwLock}; -use std::time::{Duration, SystemTime}; +use std::time::SystemTime; use lightning::routing::scoring::ChannelLiquidities; use lightning::util::ser::Readable; use lightning::{log_error, log_info, log_trace}; use crate::config::{ - EXTERNAL_PATHFINDING_SCORES_SYNC_INTERVAL, EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS, + EXTERNAL_PATHFINDING_SCORES_MAX_SIZE, EXTERNAL_PATHFINDING_SCORES_SYNC_INTERVAL, + EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS, }; use crate::io::utils::write_external_pathfinding_scores_to_cache; use crate::logger::LdkLogger; @@ -54,42 +54,30 @@ async fn sync_external_scores( logger: &Logger, scorer: &Mutex, node_metrics: &RwLock, kv_store: Arc, url: &String, ) -> () { - let response = tokio::time::timeout( - Duration::from_secs(EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS), - reqwest::get(url), - ) - .await; + let request = bitreq::get(url) + .with_timeout(EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS) + .with_max_body_size(Some(EXTERNAL_PATHFINDING_SCORES_MAX_SIZE)); - let response = match response { - Ok(resp) => resp, - Err(e) => { - log_error!(logger, "Retrieving external scores timed out: {}", e); - return; - }, - }; - let response = match response { + let response = match request.send_async().await { Ok(resp) => resp, Err(e) => { log_error!(logger, "Failed to retrieve external scores update: {}", e); return; }, }; - let body = match response.bytes().await { - Ok(bytes) => bytes, - Err(e) => { - log_error!(logger, "Failed to read external scores update: {}", e); - return; - }, - }; - let mut reader = Cursor::new(body); + if response.status_code != 200 { + log_error!( + logger, + "Failed to retrieve external scores update: HTTP {}", + response.status_code + ); + return; + } + let mut reader = response.as_bytes(); match ChannelLiquidities::read(&mut reader) { Ok(liquidities) => { - if let Err(e) = write_external_pathfinding_scores_to_cache( - Arc::clone(&kv_store), - &liquidities, - logger, - ) - .await + if let Err(e) = + write_external_pathfinding_scores_to_cache(&*kv_store, &liquidities, logger).await { log_error!(logger, "Failed to persist external scores to cache: {}", e); } @@ -100,10 +88,9 @@ async fn sync_external_scores( let mut locked_node_metrics = node_metrics.write().unwrap(); locked_node_metrics.latest_pathfinding_scores_sync_timestamp = Some(duration_since_epoch.as_secs()); - write_node_metrics(&*locked_node_metrics, Arc::clone(&kv_store), logger) - .unwrap_or_else(|e| { - log_error!(logger, "Persisting node metrics failed: {}", e); - }); + write_node_metrics(&*locked_node_metrics, &*kv_store, logger).unwrap_or_else(|e| { + log_error!(logger, "Persisting node metrics failed: {}", e); + }); log_trace!(logger, "External scores merged successfully"); }, Err(e) => { diff --git a/src/types.rs b/src/types.rs index ea4de2a63..a7ea89e48 100644 --- a/src/types.rs +++ b/src/types.rs @@ -11,7 +11,8 @@ use std::pin::Pin; use std::sync::{Arc, Mutex}; use bitcoin::secp256k1::PublicKey; -use bitcoin::OutPoint; +use bitcoin::{OutPoint, ScriptBuf}; +use bitcoin_payment_instructions::onion_message_resolver::LDKOnionMessageDNSSECHrnResolver; use lightning::chain::chainmonitor; use lightning::impl_writeable_tlv_based; use lightning::ln::channel_state::ChannelDetails as LdkChannelDetails; @@ -22,7 +23,9 @@ use lightning::routing::gossip; use lightning::routing::router::DefaultRouter; use lightning::routing::scoring::{CombinedScorer, ProbabilisticScoringFeeParameters}; use lightning::sign::InMemorySigner; -use lightning::util::persist::{KVStore, KVStoreSync, MonitorUpdatingPersister}; +use lightning::util::persist::{ + KVStore, KVStoreSync, MonitorUpdatingPersister, MonitorUpdatingPersisterAsync, +}; use lightning::util::ser::{Readable, Writeable, Writer}; use lightning::util::sweep::OutputSweeper; use lightning_block_sync::gossip::GossipVerifier; @@ -34,10 +37,10 @@ use crate::chain::ChainSource; use crate::config::ChannelConfig; use crate::data_store::DataStore; use crate::fee_estimator::OnchainFeeEstimator; -use crate::gossip::RuntimeSpawner; use crate::logger::Logger; use crate::message_handler::NodeCustomMessageHandler; -use crate::payment::PaymentDetails; +use crate::payment::{PaymentDetails, PendingPaymentDetails}; +use crate::runtime::RuntimeSpawner; /// A supertrait that requires that a type implements both [`KVStore`] and [`KVStoreSync`] at the /// same time. @@ -50,7 +53,7 @@ where { } -pub(crate) trait DynStoreTrait: Send + Sync { +pub trait DynStoreTrait: Send + Sync { fn read_async( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> Pin, bitcoin::io::Error>> + Send + 'static>>; @@ -130,9 +133,11 @@ impl<'a> KVStoreSync for dyn DynStoreTrait + 'a { } } -pub(crate) type DynStore = dyn DynStoreTrait; +/// Type alias for any store that implements DynStoreTrait. +pub type DynStore = dyn DynStoreTrait; -pub(crate) struct DynStoreWrapper(pub(crate) T); +/// A wrapper that allows using any [`SyncAndAsyncKVStore`] implementor as a trait object. +pub struct DynStoreWrapper(pub T); impl DynStoreTrait for DynStoreWrapper { fn read_async( @@ -184,6 +189,16 @@ impl DynStoreTrait for DynStoreWrapper } } +pub(crate) type AsyncPersister = MonitorUpdatingPersisterAsync< + Arc, + RuntimeSpawner, + Arc, + Arc, + Arc, + Arc, + Arc, +>; + pub type Persister = MonitorUpdatingPersister< Arc, Arc, @@ -253,7 +268,7 @@ pub(crate) type Scorer = CombinedScorer, Arc>; pub(crate) type Graph = gossip::NetworkGraph>; -pub(crate) type UtxoLookup = GossipVerifier, Arc>; +pub(crate) type UtxoLookup = GossipVerifier>; pub(crate) type P2PGossipSync = lightning::routing::gossip::P2PGossipSync, Arc, Arc>; @@ -276,10 +291,12 @@ pub(crate) type OnionMessenger = lightning::onion_message::messenger::OnionMesse Arc, Arc, Arc, - IgnoringMessageHandler, + Arc, IgnoringMessageHandler, >; +pub(crate) type HRNResolver = LDKOnionMessageDNSSECHrnResolver, Arc>; + pub(crate) type MessageRouter = lightning::onion_message::messenger::DefaultMessageRouter< Arc, Arc, @@ -356,6 +373,15 @@ pub struct ChannelDetails { /// state until the splice transaction reaches sufficient confirmations to be locked (and we /// exchange `splice_locked` messages with our peer). pub funding_txo: Option, + /// The witness script that is used to lock the channel's funding output to commitment transactions. + /// + /// This field will be `None` if we have not negotiated the funding transaction with our + /// counterparty already. + /// + /// When a channel is spliced, this continues to refer to the original pre-splice channel + /// state until the splice transaction reaches sufficient confirmations to be locked (and we + /// exchange `splice_locked` messages with our peer). + pub funding_redeem_script: Option, /// The position of the funding transaction in the chain. None if the funding transaction has /// not yet been confirmed and the channel fully opened. /// @@ -512,6 +538,7 @@ impl From for ChannelDetails { channel_id: value.channel_id, counterparty_node_id: value.counterparty.node_id, funding_txo: value.funding_txo.map(|o| o.into_bitcoin_outpoint()), + funding_redeem_script: value.funding_redeem_script, short_channel_id: value.short_channel_id, outbound_scid_alias: value.outbound_scid_alias, inbound_scid_alias: value.inbound_scid_alias, @@ -596,3 +623,5 @@ impl From<&(u64, Vec)> for CustomTlvRecord { CustomTlvRecord { type_num: tlv.0, value: tlv.1.clone() } } } + +pub(crate) type PendingPaymentStore = DataStore>; diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index b2366a45f..05c743bd9 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -12,6 +12,7 @@ use std::sync::{Arc, Mutex}; use bdk_chain::spk_client::{FullScanRequest, SyncRequest}; use bdk_wallet::descriptor::ExtendedDescriptor; +use bdk_wallet::event::WalletEvent; #[allow(deprecated)] use bdk_wallet::SignOptions; use bdk_wallet::{Balance, KeychainKind, PersistedWallet, Update}; @@ -25,7 +26,7 @@ use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature}; use bitcoin::secp256k1::{All, PublicKey, Scalar, Secp256k1, SecretKey}; use bitcoin::{ - Address, Amount, FeeRate, ScriptBuf, Transaction, TxOut, Txid, WPubkeyHash, Weight, + Address, Amount, FeeRate, OutPoint, ScriptBuf, Transaction, TxOut, Txid, WPubkeyHash, Weight, WitnessProgram, WitnessVersion, }; use lightning::chain::chaininterface::BroadcasterInterface; @@ -49,8 +50,10 @@ use crate::config::Config; use crate::fee_estimator::{ConfirmationTarget, FeeEstimator, OnchainFeeEstimator}; use crate::logger::{log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; use crate::payment::store::ConfirmationStatus; -use crate::payment::{PaymentDetails, PaymentDirection, PaymentStatus}; -use crate::types::{Broadcaster, PaymentStore}; +use crate::payment::{ + PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, PendingPaymentDetails, +}; +use crate::types::{Broadcaster, PaymentStore, PendingPaymentStore}; use crate::Error; pub(crate) enum OnchainSendAmount { @@ -71,6 +74,7 @@ pub(crate) struct Wallet { payment_store: Arc, config: Arc, logger: Arc, + pending_payment_store: Arc, } impl Wallet { @@ -78,11 +82,20 @@ impl Wallet { wallet: bdk_wallet::PersistedWallet, wallet_persister: KVStoreWalletPersister, broadcaster: Arc, fee_estimator: Arc, payment_store: Arc, - config: Arc, logger: Arc, + config: Arc, logger: Arc, pending_payment_store: Arc, ) -> Self { let inner = Mutex::new(wallet); let persister = Mutex::new(wallet_persister); - Self { inner, persister, broadcaster, fee_estimator, payment_store, config, logger } + Self { + inner, + persister, + broadcaster, + fee_estimator, + payment_store, + config, + logger, + pending_payment_store, + } } pub(crate) fn get_full_scan_request(&self) -> FullScanRequest { @@ -114,15 +127,15 @@ impl Wallet { pub(crate) fn apply_update(&self, update: impl Into) -> Result<(), Error> { let mut locked_wallet = self.inner.lock().unwrap(); - match locked_wallet.apply_update(update) { - Ok(()) => { + match locked_wallet.apply_update_events(update) { + Ok(events) => { let mut locked_persister = self.persister.lock().unwrap(); locked_wallet.persist(&mut locked_persister).map_err(|e| { log_error!(self.logger, "Failed to persist wallet: {}", e); Error::PersistenceFailed })?; - self.update_payment_store(&mut *locked_wallet).map_err(|e| { + self.update_payment_store(&mut *locked_wallet, events).map_err(|e| { log_error!(self.logger, "Failed to update payment store: {}", e); Error::PersistenceFailed })?; @@ -152,75 +165,174 @@ impl Wallet { Ok(()) } + pub(crate) fn insert_txo(&self, outpoint: OutPoint, txout: TxOut) -> Result<(), Error> { + let mut locked_wallet = self.inner.lock().unwrap(); + locked_wallet.insert_txout(outpoint, txout); + + let mut locked_persister = self.persister.lock().unwrap(); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + Error::PersistenceFailed + })?; + + Ok(()) + } + fn update_payment_store<'a>( &self, locked_wallet: &'a mut PersistedWallet, + mut events: Vec, ) -> Result<(), Error> { - for wtx in locked_wallet.transactions() { - let id = PaymentId(wtx.tx_node.txid.to_byte_array()); - let txid = wtx.tx_node.txid; - let (payment_status, confirmation_status) = match wtx.chain_position { - bdk_chain::ChainPosition::Confirmed { anchor, .. } => { - let confirmation_height = anchor.block_id.height; + if events.is_empty() { + return Ok(()); + } + + // Sort events to ensure proper sequencing for data consistency: + // 1. TXReplaced (0) before TxUnconfirmed (1) - Critical for RBF handling + // When a transaction is replaced via RBF, both events fire. Processing + // TXReplaced first stores the replaced transaction, allowing TxUnconfirmed + // to detect and skip duplicate payment record creation. + // 2. TxConfirmed (2) before ChainTipChanged (3) - Ensures height accuracy + // ChainTipChanged updates block height. Processing TxConfirmed first ensures + // it references the correct height for confirmation depth calculations. + // 3. Other events follow in deterministic order for predictable processing + if events.len() > 1 { + events.sort_by_key(|e| match e { + WalletEvent::TxReplaced { .. } => 0, + WalletEvent::TxUnconfirmed { .. } => 1, + WalletEvent::TxConfirmed { .. } => 2, + WalletEvent::ChainTipChanged { .. } => 3, + WalletEvent::TxDropped { .. } => 4, + _ => 5, + }); + } + + for event in events { + match event { + WalletEvent::TxConfirmed { txid, tx, block_time, .. } => { let cur_height = locked_wallet.latest_checkpoint().height(); + let confirmation_height = block_time.block_id.height; let payment_status = if cur_height >= confirmation_height + ANTI_REORG_DELAY - 1 { PaymentStatus::Succeeded } else { PaymentStatus::Pending }; + let confirmation_status = ConfirmationStatus::Confirmed { - block_hash: anchor.block_id.hash, + block_hash: block_time.block_id.hash, height: confirmation_height, - timestamp: anchor.confirmation_time, + timestamp: block_time.confirmation_time, }; - (payment_status, confirmation_status) - }, - bdk_chain::ChainPosition::Unconfirmed { .. } => { - (PaymentStatus::Pending, ConfirmationStatus::Unconfirmed) - }, - }; - // TODO: It would be great to introduce additional variants for - // `ChannelFunding` and `ChannelClosing`. For the former, we could just - // take a reference to `ChannelManager` here and check against - // `list_channels`. But for the latter the best approach is much less - // clear: for force-closes/HTLC spends we should be good querying - // `OutputSweeper::tracked_spendable_outputs`, but regular channel closes - // (i.e., `SpendableOutputDescriptor::StaticOutput` variants) are directly - // spent to a wallet address. The only solution I can come up with is to - // create and persist a list of 'static pending outputs' that we could use - // here to determine the `PaymentKind`, but that's not really satisfactory, so - // we're punting on it until we can come up with a better solution. - let kind = crate::payment::PaymentKind::Onchain { txid, status: confirmation_status }; - let fee = locked_wallet.calculate_fee(&wtx.tx_node.tx).unwrap_or(Amount::ZERO); - let (sent, received) = locked_wallet.sent_and_received(&wtx.tx_node.tx); - let (direction, amount_msat) = if sent > received { - let direction = PaymentDirection::Outbound; - let amount_msat = Some( - sent.to_sat().saturating_sub(fee.to_sat()).saturating_sub(received.to_sat()) - * 1000, - ); - (direction, amount_msat) - } else { - let direction = PaymentDirection::Inbound; - let amount_msat = Some( - received.to_sat().saturating_sub(sent.to_sat().saturating_sub(fee.to_sat())) - * 1000, - ); - (direction, amount_msat) - }; - let fee_paid_msat = Some(fee.to_sat() * 1000); + let payment_id = self + .find_payment_by_txid(txid) + .unwrap_or_else(|| PaymentId(txid.to_byte_array())); + + let payment = self.create_payment_from_tx( + locked_wallet, + txid, + payment_id, + &tx, + payment_status, + confirmation_status, + ); - let payment = PaymentDetails::new( - id, - kind, - amount_msat, - fee_paid_msat, - direction, - payment_status, - ); + let pending_payment = + self.create_pending_payment_from_tx(payment.clone(), Vec::new()); - self.payment_store.insert_or_update(payment)?; + self.payment_store.insert_or_update(payment)?; + self.pending_payment_store.insert_or_update(pending_payment)?; + }, + WalletEvent::ChainTipChanged { new_tip, .. } => { + // Get all payments that are Pending with Confirmed status + let pending_payments: Vec = + self.pending_payment_store.list_filter(|p| { + p.details.status == PaymentStatus::Pending + && matches!( + p.details.kind, + PaymentKind::Onchain { + status: ConfirmationStatus::Confirmed { .. }, + .. + } + ) + }); + + for mut payment in pending_payments { + if let PaymentKind::Onchain { + status: ConfirmationStatus::Confirmed { height, .. }, + .. + } = payment.details.kind + { + let payment_id = payment.details.id; + if new_tip.height >= height + ANTI_REORG_DELAY - 1 { + payment.details.status = PaymentStatus::Succeeded; + self.payment_store.insert_or_update(payment.details)?; + self.pending_payment_store.remove(&payment_id)?; + } + } + } + }, + WalletEvent::TxUnconfirmed { txid, tx, old_block_time: None } => { + let payment_id = self + .find_payment_by_txid(txid) + .unwrap_or_else(|| PaymentId(txid.to_byte_array())); + + let payment = self.create_payment_from_tx( + locked_wallet, + txid, + payment_id, + &tx, + PaymentStatus::Pending, + ConfirmationStatus::Unconfirmed, + ); + let pending_payment = + self.create_pending_payment_from_tx(payment.clone(), Vec::new()); + self.payment_store.insert_or_update(payment)?; + self.pending_payment_store.insert_or_update(pending_payment)?; + }, + WalletEvent::TxReplaced { txid, conflicts, tx, .. } => { + let payment_id = self + .find_payment_by_txid(txid) + .unwrap_or_else(|| PaymentId(txid.to_byte_array())); + + // Collect all conflict txids + let conflict_txids: Vec = + conflicts.iter().map(|(_, conflict_txid)| *conflict_txid).collect(); + + let payment = self.create_payment_from_tx( + locked_wallet, + txid, + payment_id, + &tx, + PaymentStatus::Pending, + ConfirmationStatus::Unconfirmed, + ); + let pending_payment_details = self + .create_pending_payment_from_tx(payment.clone(), conflict_txids.clone()); + + self.pending_payment_store.insert_or_update(pending_payment_details)?; + }, + WalletEvent::TxDropped { txid, tx } => { + let payment_id = self + .find_payment_by_txid(txid) + .unwrap_or_else(|| PaymentId(txid.to_byte_array())); + let payment = self.create_payment_from_tx( + locked_wallet, + txid, + payment_id, + &tx, + PaymentStatus::Pending, + ConfirmationStatus::Unconfirmed, + ); + let pending_payment = + self.create_pending_payment_from_tx(payment.clone(), Vec::new()); + self.payment_store.insert_or_update(payment)?; + self.pending_payment_store.insert_or_update(pending_payment)?; + }, + _ => { + continue; + }, + }; } Ok(()) @@ -793,6 +905,79 @@ impl Wallet { Ok(tx) } + + fn create_payment_from_tx( + &self, locked_wallet: &PersistedWallet, txid: Txid, + payment_id: PaymentId, tx: &Transaction, payment_status: PaymentStatus, + confirmation_status: ConfirmationStatus, + ) -> PaymentDetails { + // TODO: It would be great to introduce additional variants for + // `ChannelFunding` and `ChannelClosing`. For the former, we could just + // take a reference to `ChannelManager` here and check against + // `list_channels`. But for the latter the best approach is much less + // clear: for force-closes/HTLC spends we should be good querying + // `OutputSweeper::tracked_spendable_outputs`, but regular channel closes + // (i.e., `SpendableOutputDescriptor::StaticOutput` variants) are directly + // spent to a wallet address. The only solution I can come up with is to + // create and persist a list of 'static pending outputs' that we could use + // here to determine the `PaymentKind`, but that's not really satisfactory, so + // we're punting on it until we can come up with a better solution. + + let kind = PaymentKind::Onchain { txid, status: confirmation_status }; + + let fee = locked_wallet.calculate_fee(tx).unwrap_or(Amount::ZERO); + let (sent, received) = locked_wallet.sent_and_received(tx); + let fee_sat = fee.to_sat(); + + let (direction, amount_msat) = if sent > received { + ( + PaymentDirection::Outbound, + Some( + (sent.to_sat().saturating_sub(fee_sat).saturating_sub(received.to_sat())) + * 1000, + ), + ) + } else { + ( + PaymentDirection::Inbound, + Some( + received.to_sat().saturating_sub(sent.to_sat().saturating_sub(fee_sat)) * 1000, + ), + ) + }; + + PaymentDetails::new( + payment_id, + kind, + amount_msat, + Some(fee_sat * 1000), + direction, + payment_status, + ) + } + + fn create_pending_payment_from_tx( + &self, payment: PaymentDetails, conflicting_txids: Vec, + ) -> PendingPaymentDetails { + PendingPaymentDetails::new(payment, conflicting_txids) + } + + fn find_payment_by_txid(&self, target_txid: Txid) -> Option { + let direct_payment_id = PaymentId(target_txid.to_byte_array()); + if self.pending_payment_store.contains_key(&direct_payment_id) { + return Some(direct_payment_id); + } + + if let Some(replaced_details) = self + .pending_payment_store + .list_filter(|p| p.conflicting_txids.contains(&target_txid)) + .first() + { + return Some(replaced_details.details.id); + } + + None + } } impl Listen for Wallet { @@ -821,9 +1006,9 @@ impl Listen for Wallet { ); } - match locked_wallet.apply_block(block, height) { - Ok(()) => { - if let Err(e) = self.update_payment_store(&mut *locked_wallet) { + match locked_wallet.apply_block_events(block, height) { + Ok(events) => { + if let Err(e) = self.update_payment_store(&mut *locked_wallet, events) { log_error!(self.logger, "Failed to update payment store: {}", e); return; } diff --git a/src/wallet/persist.rs b/src/wallet/persist.rs index 5c8668937..10be1fac0 100644 --- a/src/wallet/persist.rs +++ b/src/wallet/persist.rs @@ -38,10 +38,7 @@ impl WalletPersister for KVStoreWalletPersister { return Ok(latest_change_set.clone()); } - let change_set_opt = read_bdk_wallet_change_set( - Arc::clone(&persister.kv_store), - Arc::clone(&persister.logger), - )?; + let change_set_opt = read_bdk_wallet_change_set(&*persister.kv_store, &*persister.logger)?; let change_set = match change_set_opt { Some(persisted_change_set) => persisted_change_set, @@ -87,11 +84,7 @@ impl WalletPersister for KVStoreWalletPersister { )); } else { latest_change_set.descriptor = Some(descriptor.clone()); - write_bdk_wallet_descriptor( - &descriptor, - Arc::clone(&persister.kv_store), - Arc::clone(&persister.logger), - )?; + write_bdk_wallet_descriptor(&descriptor, &*persister.kv_store, &*persister.logger)?; } } @@ -112,8 +105,8 @@ impl WalletPersister for KVStoreWalletPersister { latest_change_set.change_descriptor = Some(change_descriptor.clone()); write_bdk_wallet_change_descriptor( &change_descriptor, - Arc::clone(&persister.kv_store), - Arc::clone(&persister.logger), + &*persister.kv_store, + &*persister.logger, )?; } } @@ -131,11 +124,7 @@ impl WalletPersister for KVStoreWalletPersister { )); } else { latest_change_set.network = Some(network); - write_bdk_wallet_network( - &network, - Arc::clone(&persister.kv_store), - Arc::clone(&persister.logger), - )?; + write_bdk_wallet_network(&network, &*persister.kv_store, &*persister.logger)?; } } @@ -157,7 +146,7 @@ impl WalletPersister for KVStoreWalletPersister { latest_change_set.indexer.merge(change_set.indexer.clone()); write_bdk_wallet_indexer( &latest_change_set.indexer, - Arc::clone(&persister.kv_store), + &*persister.kv_store, Arc::clone(&persister.logger), )?; } @@ -166,7 +155,7 @@ impl WalletPersister for KVStoreWalletPersister { latest_change_set.tx_graph.merge(change_set.tx_graph.clone()); write_bdk_wallet_tx_graph( &latest_change_set.tx_graph, - Arc::clone(&persister.kv_store), + &*persister.kv_store, Arc::clone(&persister.logger), )?; } @@ -175,7 +164,7 @@ impl WalletPersister for KVStoreWalletPersister { latest_change_set.local_chain.merge(change_set.local_chain.clone()); write_bdk_wallet_local_chain( &latest_change_set.local_chain, - Arc::clone(&persister.kv_store), + &*persister.kv_store, Arc::clone(&persister.logger), )?; } diff --git a/tests/common/logging.rs b/tests/common/logging.rs index 3ff24d34d..1e3a8a1c2 100644 --- a/tests/common/logging.rs +++ b/tests/common/logging.rs @@ -1,7 +1,7 @@ use std::sync::{Arc, Mutex}; use chrono::Utc; -use ldk_node::logger::{LogLevel, LogRecord, LogWriter}; +use ldk_node::logger::{LogContext, LogLevel, LogRecord, LogWriter}; #[cfg(not(feature = "uniffi"))] use log::Record as LogFacadeRecord; use log::{Level as LogFacadeLevel, LevelFilter as LogFacadeLevelFilter, Log as LogFacadeLog}; @@ -156,13 +156,18 @@ impl MultiNodeLogger { impl LogWriter for MultiNodeLogger { fn log(&self, record: LogRecord) { let log = format!( - "[{}] {} {:<5} [{}:{}] {}\n", + "[{}] {} {:<5} [{}:{}] {}{}\n", self.node_id, Utc::now().format("%Y-%m-%d %H:%M:%S%.3f"), record.level.to_string(), record.module_path, record.line, - record.args + record.args, + LogContext { + channel_id: record.channel_id.as_ref(), + peer_id: record.peer_id.as_ref(), + payment_hash: record.payment_hash.as_ref(), + }, ); print!("{}", log); diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 96f58297c..f9765dfc2 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -17,6 +17,8 @@ use std::path::PathBuf; use std::sync::{Arc, RwLock}; use std::time::Duration; +#[cfg(feature = "uniffi")] +use async_trait::async_trait; use bitcoin::hashes::hex::FromHex; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; @@ -31,8 +33,11 @@ use ldk_node::entropy::{generate_entropy_mnemonic, NodeEntropy}; use ldk_node::io::sqlite_store::SqliteStore; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; use ldk_node::{ - Builder, CustomTlvRecord, Event, LightningBalance, Node, NodeError, PendingSweepBalance, + Builder, CustomTlvRecord, DynStore, DynStoreWrapper, Event, LightningBalance, Node, NodeError, + PendingSweepBalance, }; +#[cfg(feature = "uniffi")] +use ldk_node::{FfiDynStore, ForeignDynStoreTrait, IOError}; use lightning::io; use lightning::ln::msgs::SocketAddress; use lightning::routing::gossip::NodeAlias; @@ -274,10 +279,20 @@ pub(crate) enum TestChainSource<'a> { BitcoindRestSync(&'a BitcoinD), } -#[derive(Clone, Copy)] +#[cfg(feature = "uniffi")] +type TestDynStore = Arc; +#[cfg(not(feature = "uniffi"))] +type TestDynStore = Arc; + +#[derive(Clone)] pub(crate) enum TestStoreType { TestSyncStore, Sqlite, + TierStore { + primary: TestDynStore, + backup: Option, + ephemeral: Option, + }, } impl Default for TestStoreType { @@ -317,6 +332,96 @@ macro_rules! setup_builder { pub(crate) use setup_builder; +#[cfg(feature = "uniffi")] +struct TestForeignDynStoreAdapter(Arc); + +#[cfg(feature = "uniffi")] +#[async_trait] +impl ForeignDynStoreTrait for TestForeignDynStoreAdapter { + async fn read_async( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> Result, IOError> { + self.0.read_async(&primary_namespace, &secondary_namespace, &key).await.map_err(Into::into) + } + + async fn write_async( + &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> Result<(), IOError> { + self.0 + .write_async(&primary_namespace, &secondary_namespace, &key, buf) + .await + .map_err(Into::into) + } + + async fn remove_async( + &self, primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + ) -> Result<(), IOError> { + self.0 + .remove_async(&primary_namespace, &secondary_namespace, &key, lazy) + .await + .map_err(Into::into) + } + + async fn list_async( + &self, primary_namespace: String, secondary_namespace: String, + ) -> Result, IOError> { + self.0.list_async(&primary_namespace, &secondary_namespace).await.map_err(Into::into) + } + + fn read( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> Result, IOError> { + KVStoreSync::read(&*self.0, &primary_namespace, &secondary_namespace, &key) + .map_err(Into::into) + } + + fn write( + &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> Result<(), IOError> { + KVStoreSync::write(&*self.0, &primary_namespace, &secondary_namespace, &key, buf) + .map_err(Into::into) + } + + fn remove( + &self, primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + ) -> Result<(), IOError> { + KVStoreSync::remove(&*self.0, &primary_namespace, &secondary_namespace, &key, lazy) + .map_err(Into::into) + } + + fn list( + &self, primary_namespace: String, secondary_namespace: String, + ) -> Result, IOError> { + KVStoreSync::list(&*self.0, &primary_namespace, &secondary_namespace).map_err(Into::into) + } +} + +pub(crate) fn create_tier_stores(base_path: PathBuf) -> (TestDynStore, TestDynStore, TestDynStore) { + let primary = Arc::new(DynStoreWrapper( + SqliteStore::new( + base_path.join("primary"), + Some("primary_db".to_string()), + Some("primary_kv".to_string()), + ) + .unwrap(), + )); + let backup = Arc::new(DynStoreWrapper(FilesystemStore::new(base_path.join("backup")))); + let ephemeral = Arc::new(DynStoreWrapper(FilesystemStore::new(base_path.join("ephemeral")))); + + #[cfg(feature = "uniffi")] + { + ( + Arc::new(FfiDynStore::from_store(Arc::new(TestForeignDynStoreAdapter(primary)))), + Arc::new(FfiDynStore::from_store(Arc::new(TestForeignDynStoreAdapter(backup)))), + Arc::new(FfiDynStore::from_store(Arc::new(TestForeignDynStoreAdapter(ephemeral)))), + ) + } + #[cfg(not(feature = "uniffi"))] + { + (primary, backup, ephemeral) + } +} + pub(crate) fn setup_two_nodes( chain_source: &TestChainSource, allow_0conf: bool, anchor_channels: bool, anchors_trusted_no_reserve: bool, @@ -327,21 +432,22 @@ pub(crate) fn setup_two_nodes( anchor_channels, anchors_trusted_no_reserve, TestStoreType::TestSyncStore, + TestStoreType::TestSyncStore, ) } pub(crate) fn setup_two_nodes_with_store( chain_source: &TestChainSource, allow_0conf: bool, anchor_channels: bool, - anchors_trusted_no_reserve: bool, store_type: TestStoreType, + anchors_trusted_no_reserve: bool, store_type_a: TestStoreType, store_type_b: TestStoreType, ) -> (TestNode, TestNode) { println!("== Node A =="); let mut config_a = random_config(anchor_channels); - config_a.store_type = store_type; + config_a.store_type = store_type_a; let node_a = setup_node(chain_source, config_a); println!("\n== Node B =="); let mut config_b = random_config(anchor_channels); - config_b.store_type = store_type; + config_b.store_type = store_type_b; if allow_0conf { config_b.node_config.trusted_peers_0conf.push(node_a.node_id()); } @@ -370,12 +476,14 @@ pub(crate) fn setup_node_for_async_payments( match chain_source { TestChainSource::Esplora(electrsd) => { let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); - let sync_config = EsploraSyncConfig { background_sync_config: None }; + let mut sync_config = EsploraSyncConfig::default(); + sync_config.background_sync_config = None; builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); }, TestChainSource::Electrum(electrsd) => { let electrum_url = format!("tcp://{}", electrsd.electrum_url); - let sync_config = ElectrumSyncConfig { background_sync_config: None }; + let mut sync_config = ElectrumSyncConfig::default(); + sync_config.background_sync_config = None; builder.set_chain_source_electrum(electrum_url.clone(), Some(sync_config)); }, TestChainSource::BitcoindRpcSync(bitcoind) => { @@ -425,6 +533,15 @@ pub(crate) fn setup_node_for_async_payments( builder.build_with_store(config.node_entropy.into(), kv_store).unwrap() }, TestStoreType::Sqlite => builder.build(config.node_entropy.into()).unwrap(), + TestStoreType::TierStore { primary, backup, ephemeral } => { + if let Some(backup) = backup { + builder.set_tier_store_backup(backup); + } + if let Some(ephemeral) = ephemeral { + builder.set_tier_store_ephemeral(ephemeral); + } + builder.build_with_tier_store(config.node_entropy.into(), primary).unwrap() + }, }; node.start().unwrap(); diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 892afedcc..f609219cd 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -23,22 +23,31 @@ use common::{ expect_splice_pending_event, generate_blocks_and_wait, open_channel, open_channel_push_amt, premine_and_distribute_funds, premine_blocks, prepare_rbf, random_config, random_listening_addresses, setup_bitcoind_and_electrsd, setup_builder, setup_node, - setup_node_for_async_payments, setup_two_nodes, wait_for_tx, TestChainSource, TestSyncStore, + setup_node_for_async_payments, setup_two_nodes, wait_for_tx, TestChainSource, TestStoreType, + TestSyncStore, }; use ldk_node::config::{AsyncPaymentsRole, EsploraSyncConfig}; +use ldk_node::entropy::NodeEntropy; use ldk_node::liquidity::LSPS2ServiceConfig; use ldk_node::payment::{ ConfirmationStatus, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, - QrPaymentResult, + UnifiedPaymentResult, }; use ldk_node::{Builder, Event, NodeError}; use lightning::ln::channelmanager::PaymentId; use lightning::routing::gossip::{NodeAlias, NodeId}; use lightning::routing::router::RouteParametersConfig; +use lightning::util::persist::{ + KVStoreSync, CHANNEL_MANAGER_PERSISTENCE_KEY, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, +}; use lightning_invoice::{Bolt11InvoiceDescription, Description}; use lightning_types::payment::{PaymentHash, PaymentPreimage}; use log::LevelFilter; +use crate::common::{create_tier_stores, random_storage_path, setup_two_nodes_with_store}; + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn channel_full_cycle() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); @@ -48,6 +57,85 @@ async fn channel_full_cycle() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_tier_store() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + let (primary_a, backup_a, ephemeral_a) = create_tier_stores(random_storage_path()); + let (primary_b, backup_b, ephemeral_b) = create_tier_stores(random_storage_path()); + + let (node_a, node_b) = setup_two_nodes_with_store( + &chain_source, + false, + true, + false, + TestStoreType::TierStore { + primary: Arc::clone(&primary_a), + backup: Some(Arc::clone(&backup_a)), + ephemeral: Some(Arc::clone(&ephemeral_a)), + }, + TestStoreType::TierStore { + primary: Arc::clone(&primary_b), + backup: Some(Arc::clone(&backup_b)), + ephemeral: Some(Arc::clone(&ephemeral_b)), + }, + ); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false) + .await; + + // Verify Primary store contains channel manager data + let primary_channel_manager = KVStoreSync::read( + primary_a.as_ref(), + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + assert!(primary_channel_manager.is_ok(), "Primary should have channel manager data"); + + // Verify Primary store contains payment info + let primary_payments = KVStoreSync::list(primary_a.as_ref(), "payments", ""); + assert!(primary_payments.is_ok(), "Primary should have payment data"); + assert!(!primary_payments.unwrap().is_empty(), "Primary should have payment entries"); + + // Verify Backup store synced critical data + let backup_channel_manager = KVStoreSync::read( + backup_a.as_ref(), + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + assert!(backup_channel_manager.is_ok(), "Backup should have synced channel manager"); + + // Verify backup is not empty + let backup_all_keys = KVStoreSync::list(backup_a.as_ref(), "", "").unwrap(); + assert!(!backup_all_keys.is_empty(), "Backup store should not be empty"); + + // Verify Ephemeral does NOT have channel manager + let ephemeral_channel_manager = KVStoreSync::read( + ephemeral_a.as_ref(), + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + assert!(ephemeral_channel_manager.is_err(), "Ephemeral should NOT have channel manager"); + + // Verify Ephemeral does NOT have payment info + let ephemeral_payments = KVStoreSync::list(ephemeral_a.as_ref(), "payments", ""); + assert!( + ephemeral_payments.is_err() || ephemeral_payments.unwrap().is_empty(), + "Ephemeral should NOT have payment data" + ); + + //Verify Ephemeral does have network graph + let ephemeral_network_graph = KVStoreSync::read( + ephemeral_a.as_ref(), + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_KEY, + ); + assert!(ephemeral_network_graph.is_ok(), "Ephemeral should have network graph"); +} + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn channel_full_cycle_electrum() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); @@ -156,7 +244,8 @@ async fn multi_hop_sending() { let mut nodes = Vec::new(); for _ in 0..5 { let config = random_config(true); - let sync_config = EsploraSyncConfig { background_sync_config: None }; + let mut sync_config = EsploraSyncConfig::default(); + sync_config.background_sync_config = None; setup_builder!(builder, config.node_config); builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); let node = builder.build(config.node_entropy.into()).unwrap(); @@ -254,7 +343,8 @@ async fn start_stop_reinit() { let test_sync_store = TestSyncStore::new(config.node_config.storage_dir_path.clone().into()); - let sync_config = EsploraSyncConfig { background_sync_config: None }; + let mut sync_config = EsploraSyncConfig::default(); + sync_config.background_sync_config = None; setup_builder!(builder, config.node_config); builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); @@ -926,10 +1016,13 @@ async fn concurrent_connections_succeed() { } } -#[tokio::test(flavor = "multi_thread", worker_threads = 1)] -async fn splice_channel() { +async fn run_splice_channel_test(bitcoind_chain_source: bool) { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::Esplora(&electrsd); + let chain_source = if bitcoind_chain_source { + TestChainSource::BitcoindRpcSync(&bitcoind) + } else { + TestChainSource::Esplora(&electrsd) + }; let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); let address_a = node_a.onchain_payment().new_address().unwrap(); @@ -994,7 +1087,7 @@ async fn splice_channel() { // Splice-in funds for Node B so that it has outbound liquidity to make a payment node_b.splice_in(&user_channel_id_b, node_a.node_id(), 4_000_000).unwrap(); - expect_splice_pending_event!(node_a, node_b.node_id()); + let txo = expect_splice_pending_event!(node_a, node_b.node_id()); expect_splice_pending_event!(node_b, node_a.node_id()); generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; @@ -1005,11 +1098,16 @@ async fn splice_channel() { expect_channel_ready_event!(node_a, node_b.node_id()); expect_channel_ready_event!(node_b, node_a.node_id()); - let splice_in_fee_sat = 252; + let expected_splice_in_fee_sat = 252; + + let payments = node_b.list_payments(); + let payment = + payments.into_iter().find(|p| p.id == PaymentId(txo.txid.to_byte_array())).unwrap(); + assert_eq!(payment.fee_paid_msat, Some(expected_splice_in_fee_sat * 1_000)); assert_eq!( node_b.list_balances().total_onchain_balance_sats, - premine_amount_sat - 4_000_000 - splice_in_fee_sat + premine_amount_sat - 4_000_000 - expected_splice_in_fee_sat ); assert_eq!(node_b.list_balances().total_lightning_balance_sats, 4_000_000); @@ -1032,7 +1130,7 @@ async fn splice_channel() { let address = node_a.onchain_payment().new_address().unwrap(); node_a.splice_out(&user_channel_id_a, node_b.node_id(), &address, amount_msat / 1000).unwrap(); - expect_splice_pending_event!(node_a, node_b.node_id()); + let txo = expect_splice_pending_event!(node_a, node_b.node_id()); expect_splice_pending_event!(node_b, node_a.node_id()); generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; @@ -1043,7 +1141,12 @@ async fn splice_channel() { expect_channel_ready_event!(node_a, node_b.node_id()); expect_channel_ready_event!(node_b, node_a.node_id()); - let splice_out_fee_sat = 183; + let expected_splice_out_fee_sat = 183; + + let payments = node_a.list_payments(); + let payment = + payments.into_iter().find(|p| p.id == PaymentId(txo.txid.to_byte_array())).unwrap(); + assert_eq!(payment.fee_paid_msat, Some(expected_splice_out_fee_sat * 1_000)); assert_eq!( node_a.list_balances().total_onchain_balance_sats, @@ -1051,10 +1154,16 @@ async fn splice_channel() { ); assert_eq!( node_a.list_balances().total_lightning_balance_sats, - 4_000_000 - closing_transaction_fee_sat - anchor_output_sat - splice_out_fee_sat + 4_000_000 - closing_transaction_fee_sat - anchor_output_sat - expected_splice_out_fee_sat ); } +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn splice_channel() { + run_splice_channel_test(false).await; + run_splice_channel_test(true).await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn simple_bolt12_send_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); @@ -1526,15 +1635,15 @@ async fn generate_bip21_uri() { // Test 1: Verify URI generation (on-chain + BOLT11) works // even before any channels are opened. This checks the graceful fallback behavior. - let initial_uqr_payment = node_b - .unified_qr_payment() + let initial_uni_payment = node_b + .unified_payment() .receive(expected_amount_sats, "asdf", expiry_sec) .expect("Failed to generate URI"); - println!("Initial URI (no channels): {}", initial_uqr_payment); + println!("Initial URI (no channels): {}", initial_uni_payment); - assert!(initial_uqr_payment.contains("bitcoin:")); - assert!(initial_uqr_payment.contains("lightning=")); - assert!(!initial_uqr_payment.contains("lno=")); // BOLT12 requires channels + assert!(initial_uni_payment.contains("bitcoin:")); + assert!(initial_uni_payment.contains("lightning=")); + assert!(!initial_uni_payment.contains("lno=")); // BOLT12 requires channels premine_and_distribute_funds( &bitcoind.client, @@ -1555,19 +1664,19 @@ async fn generate_bip21_uri() { expect_channel_ready_event!(node_b, node_a.node_id()); // Test 2: Verify URI generation (on-chain + BOLT11 + BOLT12) works after channels are established. - let uqr_payment = node_b - .unified_qr_payment() + let uni_payment = node_b + .unified_payment() .receive(expected_amount_sats, "asdf", expiry_sec) .expect("Failed to generate URI"); - println!("Generated URI: {}", uqr_payment); - assert!(uqr_payment.contains("bitcoin:")); - assert!(uqr_payment.contains("lightning=")); - assert!(uqr_payment.contains("lno=")); + println!("Generated URI: {}", uni_payment); + assert!(uni_payment.contains("bitcoin:")); + assert!(uni_payment.contains("lightning=")); + assert!(uni_payment.contains("lno=")); } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] -async fn unified_qr_send_receive() { +async fn unified_send_receive_bip21_uri() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); @@ -1605,38 +1714,39 @@ async fn unified_qr_send_receive() { let expected_amount_sats = 100_000; let expiry_sec = 4_000; - let uqr_payment = node_b.unified_qr_payment().receive(expected_amount_sats, "asdf", expiry_sec); - let uri_str = uqr_payment.clone().unwrap(); - let offer_payment_id: PaymentId = match node_a.unified_qr_payment().send(&uri_str, None) { - Ok(QrPaymentResult::Bolt12 { payment_id }) => { - println!("\nBolt12 payment sent successfully with PaymentID: {:?}", payment_id); - payment_id - }, - Ok(QrPaymentResult::Bolt11 { payment_id: _ }) => { - panic!("Expected Bolt12 payment but got Bolt11"); - }, - Ok(QrPaymentResult::Onchain { txid: _ }) => { - panic!("Expected Bolt12 payment but get On-chain transaction"); - }, - Err(e) => { - panic!("Expected Bolt12 payment but got error: {:?}", e); - }, - }; + let uni_payment = node_b.unified_payment().receive(expected_amount_sats, "asdf", expiry_sec); + let uri_str = uni_payment.clone().unwrap(); + let offer_payment_id: PaymentId = + match node_a.unified_payment().send(&uri_str, None, None).await { + Ok(UnifiedPaymentResult::Bolt12 { payment_id }) => { + println!("\nBolt12 payment sent successfully with PaymentID: {:?}", payment_id); + payment_id + }, + Ok(UnifiedPaymentResult::Bolt11 { payment_id: _ }) => { + panic!("Expected Bolt12 payment but got Bolt11"); + }, + Ok(UnifiedPaymentResult::Onchain { txid: _ }) => { + panic!("Expected Bolt12 payment but got On-chain transaction"); + }, + Err(e) => { + panic!("Expected Bolt12 payment but got error: {:?}", e); + }, + }; expect_payment_successful_event!(node_a, Some(offer_payment_id), None); // Cut off the BOLT12 part to fallback to BOLT11. let uri_str_without_offer = uri_str.split("&lno=").next().unwrap(); let invoice_payment_id: PaymentId = - match node_a.unified_qr_payment().send(uri_str_without_offer, None) { - Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { + match node_a.unified_payment().send(uri_str_without_offer, None, None).await { + Ok(UnifiedPaymentResult::Bolt12 { payment_id: _ }) => { panic!("Expected Bolt11 payment but got Bolt12"); }, - Ok(QrPaymentResult::Bolt11 { payment_id }) => { + Ok(UnifiedPaymentResult::Bolt11 { payment_id }) => { println!("\nBolt11 payment sent successfully with PaymentID: {:?}", payment_id); payment_id }, - Ok(QrPaymentResult::Onchain { txid: _ }) => { + Ok(UnifiedPaymentResult::Onchain { txid: _ }) => { panic!("Expected Bolt11 payment but got on-chain transaction"); }, Err(e) => { @@ -1646,19 +1756,19 @@ async fn unified_qr_send_receive() { expect_payment_successful_event!(node_a, Some(invoice_payment_id), None); let expect_onchain_amount_sats = 800_000; - let onchain_uqr_payment = - node_b.unified_qr_payment().receive(expect_onchain_amount_sats, "asdf", 4_000).unwrap(); + let onchain_uni_payment = + node_b.unified_payment().receive(expect_onchain_amount_sats, "asdf", 4_000).unwrap(); // Cut off any lightning part to fallback to on-chain only. - let uri_str_without_lightning = onchain_uqr_payment.split("&lightning=").next().unwrap(); - let txid = match node_a.unified_qr_payment().send(&uri_str_without_lightning, None) { - Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { + let uri_str_without_lightning = onchain_uni_payment.split("&lightning=").next().unwrap(); + let txid = match node_a.unified_payment().send(&uri_str_without_lightning, None, None).await { + Ok(UnifiedPaymentResult::Bolt12 { payment_id: _ }) => { panic!("Expected on-chain payment but got Bolt12") }, - Ok(QrPaymentResult::Bolt11 { payment_id: _ }) => { + Ok(UnifiedPaymentResult::Bolt11 { payment_id: _ }) => { panic!("Expected on-chain payment but got Bolt11"); }, - Ok(QrPaymentResult::Onchain { txid }) => { + Ok(UnifiedPaymentResult::Onchain { txid }) => { println!("\nOn-chain transaction successful with Txid: {}", txid); txid }, @@ -1687,7 +1797,8 @@ async fn do_lsps2_client_service_integration(client_trusts_lsp: bool) { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); - let sync_config = EsploraSyncConfig { background_sync_config: None }; + let mut sync_config = EsploraSyncConfig::default(); + sync_config.background_sync_config = None; // Setup three nodes: service, client, and payer let channel_opening_fee_ppm = 10_000; @@ -2004,7 +2115,8 @@ async fn lsps2_client_trusts_lsp() { let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); - let sync_config = EsploraSyncConfig { background_sync_config: None }; + let mut sync_config = EsploraSyncConfig::default(); + sync_config.background_sync_config = None; // Setup three nodes: service, client, and payer let channel_opening_fee_ppm = 10_000; @@ -2177,7 +2289,8 @@ async fn lsps2_lsp_trusts_client_but_client_does_not_claim() { let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); - let sync_config = EsploraSyncConfig { background_sync_config: None }; + let mut sync_config = EsploraSyncConfig::default(); + sync_config.background_sync_config = None; // Setup three nodes: service, client, and payer let channel_opening_fee_ppm = 10_000; @@ -2297,3 +2410,185 @@ async fn lsps2_lsp_trusts_client_but_client_does_not_claim() { Some(6) ); } + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn payment_persistence_after_restart() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + + // Setup nodes manually so we can restart node_a with the same config + println!("== Node A =="); + let mut config_a = random_config(true); + config_a.store_type = TestStoreType::Sqlite; + + let num_payments = 200; + let payment_amount_msat = 1_000_000; // 1000 sats per payment + + { + let node_a = setup_node(&chain_source, config_a.clone()); + + println!("\n== Node B =="); + let config_b = random_config(true); + let node_b = setup_node(&chain_source, config_b); + + let addr_a = node_a.onchain_payment().new_address().unwrap(); + let addr_b = node_b.onchain_payment().new_address().unwrap(); + + // Premine sufficient funds for a large channel and many payments + let premine_amount_sat = 10_000_000; + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![addr_a, addr_b], + Amount::from_sat(premine_amount_sat), + ) + .await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + assert_eq!(node_a.list_balances().spendable_onchain_balance_sats, premine_amount_sat); + assert_eq!(node_b.list_balances().spendable_onchain_balance_sats, premine_amount_sat); + + // Open a large channel from node_a to node_b + let channel_amount_sat = 5_000_000; + open_channel(&node_a, &node_b, channel_amount_sat, true, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + // Send 200 payments from node_a to node_b + println!("\nSending {} payments from A to B...", num_payments); + let invoice_description = + Bolt11InvoiceDescription::Direct(Description::new(String::from("test")).unwrap()); + + for i in 0..num_payments { + let invoice = node_b + .bolt11_payment() + .receive(payment_amount_msat, &invoice_description.clone().into(), 3600) + .unwrap(); + let payment_id = node_a.bolt11_payment().send(&invoice, None).unwrap(); + expect_event!(node_a, PaymentSuccessful); + expect_event!(node_b, PaymentReceived); + + if (i + 1) % 50 == 0 { + println!("Completed {} payments", i + 1); + } + + // Verify payment succeeded + assert_eq!(node_a.payment(&payment_id).unwrap().status, PaymentStatus::Succeeded); + } + println!("All {} payments completed successfully", num_payments); + + // Verify node_a has 200 outbound Bolt11 payments before shutdown + let outbound_payments_before = node_a.list_payments_with_filter(|p| { + p.direction == PaymentDirection::Outbound + && matches!(p.kind, PaymentKind::Bolt11 { .. }) + }); + assert_eq!(outbound_payments_before.len(), num_payments); + + // Shut down both nodes + println!("\nShutting down nodes..."); + node_a.stop().unwrap(); + node_b.stop().unwrap(); + } + + // Restart node_a with the same config + println!("\nRestarting node A..."); + let restarted_node_a = setup_node(&chain_source, config_a); + + // Assert all 200 payments are still in the store + let outbound_payments_after = restarted_node_a.list_payments_with_filter(|p| { + p.direction == PaymentDirection::Outbound && matches!(p.kind, PaymentKind::Bolt11 { .. }) + }); + assert_eq!( + outbound_payments_after.len(), + num_payments, + "Expected {} payments after restart, found {}", + num_payments, + outbound_payments_after.len() + ); + + // Verify all payments have the correct status + for payment in &outbound_payments_after { + assert_eq!( + payment.status, + PaymentStatus::Succeeded, + "Payment {:?} has unexpected status {:?}", + payment.id, + payment.status + ); + assert_eq!(payment.amount_msat, Some(payment_amount_msat)); + } + + println!( + "Successfully verified {} payments persisted after restart", + outbound_payments_after.len() + ); + + restarted_node_a.stop().unwrap(); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn persistence_backwards_compatibility() { + let (bitcoind, electrsd) = common::setup_bitcoind_and_electrsd(); + let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); + + let storage_path = common::random_storage_path().to_str().unwrap().to_owned(); + let seed_bytes = [42u8; 64]; + + // Setup a v0.6.2 `Node` + let (old_balance, old_node_id) = { + let mut builder_old = ldk_node_062::Builder::new(); + builder_old.set_network(bitcoin::Network::Regtest); + builder_old.set_storage_dir_path(storage_path.clone()); + builder_old.set_entropy_seed_bytes(seed_bytes); + builder_old.set_chain_source_esplora(esplora_url.clone(), None); + let node_old = builder_old.build().unwrap(); + + node_old.start().unwrap(); + let addr_old = node_old.onchain_payment().new_address().unwrap(); + common::premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![addr_old], + bitcoin::Amount::from_sat(100_000), + ) + .await; + node_old.sync_wallets().unwrap(); + + let balance = node_old.list_balances().spendable_onchain_balance_sats; + assert!(balance > 0); + let node_id = node_old.node_id(); + + node_old.stop().unwrap(); + + (balance, node_id) + }; + + // Now ensure we can still reinit from the same backend. + #[cfg(feature = "uniffi")] + let builder_new = Builder::new(); + #[cfg(not(feature = "uniffi"))] + let mut builder_new = Builder::new(); + builder_new.set_network(bitcoin::Network::Regtest); + builder_new.set_storage_dir_path(storage_path); + builder_new.set_chain_source_esplora(esplora_url, None); + + #[cfg(feature = "uniffi")] + let node_entropy = NodeEntropy::from_seed_bytes(seed_bytes.to_vec()).unwrap(); + #[cfg(not(feature = "uniffi"))] + let node_entropy = NodeEntropy::from_seed_bytes(seed_bytes); + let node_new = builder_new.build(node_entropy.into()).unwrap(); + + node_new.start().unwrap(); + node_new.sync_wallets().unwrap(); + + let new_balance = node_new.list_balances().spendable_onchain_balance_sats; + let new_node_id = node_new.node_id(); + + assert_eq!(old_node_id, new_node_id); + assert_eq!(old_balance, new_balance); + + node_new.stop().unwrap(); +}