From a7f0034198ef21b88269837a3fa53916312df069 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Fri, 14 Mar 2025 13:44:38 -0400 Subject: [PATCH 1/7] ln/fix: remove undefined PERM | 1 code from reason method This failure code isn't used anywhere in the codebase and is not defined in BOLT 04. --- lightning/src/ln/onion_utils.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 68d7e4d7d9d..82eec80e386 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -1418,8 +1418,7 @@ impl HTLCFailReason { const NODE: u16 = 0x2000; const UPDATE: u16 = 0x1000; - if failure_code == 1 | PERM { debug_assert!(data.is_empty()) } - else if failure_code == 2 | NODE { debug_assert!(data.is_empty()) } + if failure_code == 2 | NODE { debug_assert!(data.is_empty()) } else if failure_code == 2 | PERM | NODE { debug_assert!(data.is_empty()) } else if failure_code == 3 | PERM | NODE { debug_assert!(data.is_empty()) } else if failure_code == 4 | BADONION | PERM { debug_assert_eq!(data.len(), 32) } From f58c320a4bfd3072a856bf16d8732cd646351280 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Fri, 14 Mar 2025 16:01:09 -0400 Subject: [PATCH 2/7] ln/fix: invalid_onion_version code for DecodeError::UnknownVersion Realm is no longer specified in BOLT04, use the specified version error instead. --- lightning/src/ln/onion_utils.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 82eec80e386..d818a3af53a 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -1988,7 +1988,7 @@ fn decode_next_hop, N: NextPacketBytes>( Err(err) => { let error_code = match err { // Unknown realm byte - msgs::DecodeError::UnknownVersion => 0x4000 | 1, + msgs::DecodeError::UnknownVersion => 0x8000 | 0x4000 | 1, // invalid_onion_payload msgs::DecodeError::UnknownRequiredFeature | msgs::DecodeError::InvalidValue From 99615059b6b464d57d9e6166605255e7915671af Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Thu, 20 Mar 2025 13:30:07 -0400 Subject: [PATCH 3/7] ln/refactor: introduce enum for bolt 04 failure codes --- lightning/src/ln/async_payments_tests.rs | 10 +- lightning/src/ln/blinded_payment_tests.rs | 40 +- lightning/src/ln/channel.rs | 35 +- lightning/src/ln/channelmanager.rs | 219 +++++------ lightning/src/ln/functional_test_utils.rs | 14 +- lightning/src/ln/functional_tests.rs | 7 +- lightning/src/ln/mod.rs | 2 +- lightning/src/ln/onion_payment.rs | 106 +++--- lightning/src/ln/onion_route_tests.rs | 132 ++++--- lightning/src/ln/onion_utils.rs | 431 ++++++++++++++++++---- lightning/src/ln/payment_tests.rs | 8 +- lightning/src/ln/priv_short_conf_tests.rs | 7 +- lightning/src/ln/shutdown_tests.rs | 4 +- 13 files changed, 650 insertions(+), 365 deletions(-) diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index b888b9ceb5c..1d9c6fb84c7 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -20,7 +20,7 @@ use crate::ln::msgs::{ BaseMessageHandler, ChannelMessageHandler, MessageSendEvent, OnionMessageHandler, }; use crate::ln::offers_tests; -use crate::ln::onion_utils::INVALID_ONION_BLINDING; +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::ln::outbound_payment::PendingOutboundPayment; use crate::ln::outbound_payment::Retry; use crate::offers::invoice_request::InvoiceRequest; @@ -179,7 +179,10 @@ fn invalid_keysend_payment_secret() { assert_eq!(updates_2_1.update_fail_malformed_htlcs.len(), 1); let update_malformed = &updates_2_1.update_fail_malformed_htlcs[0]; assert_eq!(update_malformed.sha256_of_onion, [0; 32]); - assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING); + assert_eq!( + update_malformed.failure_code, + LocalHTLCFailureReason::InvalidOnionBlinding.failure_code() + ); nodes[1] .node .handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed); @@ -196,7 +199,8 @@ fn invalid_keysend_payment_secret() { &nodes[0], payment_hash, false, - PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]), + PaymentFailedConditions::new() + .expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32]), ); } diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index 89b83ede3eb..f3148abadbc 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -26,8 +26,7 @@ use crate::ln::inbound_payment::ExpandedKey; use crate::ln::msgs; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, UnsignedGossipMessage, MessageSendEvent}; use crate::ln::onion_payment; -use crate::ln::onion_utils; -use crate::ln::onion_utils::INVALID_ONION_BLINDING; +use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; use crate::ln::outbound_payment::{Retry, IDEMPOTENCY_TIMEOUT_TICKS}; use crate::offers::invoice::UnsignedBolt12Invoice; use crate::offers::nonce::Nonce; @@ -117,7 +116,7 @@ pub fn fail_blinded_htlc_backwards( match i { 0 => { let mut payment_failed_conditions = PaymentFailedConditions::new() - .expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]); + .expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32]); if retry_expected { payment_failed_conditions = payment_failed_conditions.retry_expected(); } @@ -136,7 +135,7 @@ pub fn fail_blinded_htlc_backwards( assert_eq!(blinded_node_updates.update_fail_malformed_htlcs.len(), 1); let update_malformed = &blinded_node_updates.update_fail_malformed_htlcs[0]; assert_eq!(update_malformed.sha256_of_onion, [0; 32]); - assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING); + assert_eq!(update_malformed.failure_code, LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()); nodes[i-1].node.handle_update_fail_malformed_htlc(nodes[i].node.get_our_node_id(), update_malformed); do_commitment_signed_dance(&nodes[i-1], &nodes[i], &blinded_node_updates.commitment_signed, true, false); } @@ -436,11 +435,11 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { match check { ForwardCheckFail::ForwardPayloadEncodedAsReceive => { expect_payment_failed_conditions(&nodes[0], payment_hash, false, - PaymentFailedConditions::new().expected_htlc_error_data(0x4000 | 22, &[0; 0])); + PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionPayload, &[0; 0])); } _ => { expect_payment_failed_conditions(&nodes[0], payment_hash, false, - PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32])); + PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32])); } }; return @@ -468,12 +467,12 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); let update_malformed = &mut updates.update_fail_malformed_htlcs[0]; - assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING); + assert_eq!(update_malformed.failure_code, LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()); assert_eq!(update_malformed.sha256_of_onion, [0; 32]); // Ensure the intro node will properly blind the error if its downstream node failed to do so. update_malformed.sha256_of_onion = [1; 32]; - update_malformed.failure_code = INVALID_ONION_BLINDING ^ 1; + update_malformed.failure_code = LocalHTLCFailureReason::InvalidOnionBlinding.failure_code() ^ 1; nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, true, false); @@ -481,7 +480,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, - PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32])); + PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32])); } #[test] @@ -533,7 +532,7 @@ fn failed_backwards_to_intro_node() { let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); let mut update_malformed = &mut updates.update_fail_malformed_htlcs[0]; // Check that the final node encodes its failure correctly. - assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING); + assert_eq!(update_malformed.failure_code, LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()); assert_eq!(update_malformed.sha256_of_onion, [0; 32]); // Modify such the final hop does not correctly blind their error so we can ensure the intro node @@ -546,7 +545,7 @@ fn failed_backwards_to_intro_node() { nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, - PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32])); + PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32])); } enum ProcessPendingHTLCsCheck { @@ -654,12 +653,12 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); let update_malformed = &mut updates.update_fail_malformed_htlcs[0]; - assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING); + assert_eq!(update_malformed.failure_code, LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()); assert_eq!(update_malformed.sha256_of_onion, [0; 32]); // Ensure the intro node will properly blind the error if its downstream node failed to do so. update_malformed.sha256_of_onion = [1; 32]; - update_malformed.failure_code = INVALID_ONION_BLINDING ^ 1; + update_malformed.failure_code = LocalHTLCFailureReason::InvalidOnionBlinding.failure_code() ^ 1; nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, true, false); @@ -667,7 +666,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, - PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32])); + PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32])); } #[test] @@ -1041,7 +1040,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { assert_eq!(updates_2_1.update_fail_malformed_htlcs.len(), 1); let update_malformed = &updates_2_1.update_fail_malformed_htlcs[0]; assert_eq!(update_malformed.sha256_of_onion, [0; 32]); - assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING); + assert_eq!(update_malformed.failure_code, LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()); nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed); do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, true, false); @@ -1063,7 +1062,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates_1_0.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates_1_0.commitment_signed, false, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, - PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32])); + PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32])); } #[test] @@ -1130,7 +1129,7 @@ fn blinded_path_retries() { assert_eq!(updates.update_fail_malformed_htlcs.len(), 1); let update_malformed = &updates.update_fail_malformed_htlcs[0]; assert_eq!(update_malformed.sha256_of_onion, [0; 32]); - assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING); + assert_eq!(update_malformed.failure_code, LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()); $intro_node.node.handle_update_fail_malformed_htlc(nodes[3].node.get_our_node_id(), update_malformed); do_commitment_signed_dance(&$intro_node, &nodes[3], &updates.commitment_signed, true, false); @@ -1250,7 +1249,7 @@ fn min_htlc() { nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, - PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32])); + PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32])); } #[test] @@ -1445,7 +1444,7 @@ fn fails_receive_tlvs_authentication() { commitment_signed_dance!(nodes[0], nodes[1], update_fail.commitment_signed, false); expect_payment_failed_conditions( &nodes[0], payment_hash, true, - PaymentFailedConditions::new().expected_htlc_error_data(0x4000 | 22, &[]), + PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionPayload, &[]), ); } @@ -1727,7 +1726,8 @@ fn route_blinding_spec_test_vector() { match onion_payment::decode_incoming_update_add_htlc_onion( &eve_update_add, &eve_node_signer, &logger, &secp_ctx ) { - Err(HTLCFailureMsg::Malformed(msg)) => assert_eq!(msg.failure_code, INVALID_ONION_BLINDING), + Err((HTLCFailureMsg::Malformed(msg), _)) => assert_eq!(msg.failure_code, + LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()), _ => panic!("Unexpected error") } } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 9268d07449d..0c47930a214 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -50,7 +50,7 @@ use crate::ln::chan_utils::{ #[cfg(splicing)] use crate::ln::chan_utils::FUNDING_TRANSACTION_WITNESS_WEIGHT; use crate::ln::chan_utils; -use crate::ln::onion_utils::{HTLCFailReason}; +use crate::ln::onion_utils::{HTLCFailReason, LocalHTLCFailureReason}; use crate::chain::BestBlock; use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator, fee_for_weight}; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS}; @@ -7581,21 +7581,17 @@ impl FundedChannel where fn internal_htlc_satisfies_config( &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig, - ) -> Result<(), (&'static str, u16)> { + ) -> Result<(), (&'static str, LocalHTLCFailureReason)> { let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64) .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64)); if fee.is_none() || htlc.amount_msat < fee.unwrap() || (htlc.amount_msat - fee.unwrap()) < amt_to_forward { - return Err(( - "Prior hop has deviated from specified fees parameters or origin node has obsolete ones", - 0x1000 | 12, // fee_insufficient - )); + return Err(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", + LocalHTLCFailureReason::FeeInsufficient)); } if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 { - return Err(( - "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", - 0x1000 | 13, // incorrect_cltv_expiry - )); + return Err(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", + LocalHTLCFailureReason::IncorrectCLTVExpiry)); } Ok(()) } @@ -7605,7 +7601,7 @@ impl FundedChannel where /// unsuccessful, falls back to the previous one if one exists. pub fn htlc_satisfies_config( &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, - ) -> Result<(), (&'static str, u16)> { + ) -> Result<(), (&'static str, LocalHTLCFailureReason)> { self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config()) .or_else(|err| { if let Some(prev_config) = self.context.prev_config() { @@ -7620,13 +7616,13 @@ impl FundedChannel where /// this function determines whether to fail the HTLC, or forward / claim it. pub fn can_accept_incoming_htlc( &self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator, logger: L - ) -> Result<(), (&'static str, u16)> + ) -> Result<(), (&'static str, LocalHTLCFailureReason)> where F::Target: FeeEstimator, L::Target: Logger { if self.context.channel_state.is_local_shutdown_sent() { - return Err(("Shutdown was already sent", 0x4000|8)) + return Err(("Shutdown was already sent", LocalHTLCFailureReason::DroppedPending)) } let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator); @@ -7637,7 +7633,8 @@ impl FundedChannel where // Note that the total dust exposure includes both the dust HTLCs and the excess mining fees of the counterparty commitment transaction log_info!(logger, "Cannot accept value that would put our total dust exposure at {} over the limit {} on counterparty commitment tx", on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); - return Err(("Exceeded our total dust exposure limit on counterparty commitment tx", 0x1000|7)) + return Err(("Exceeded our total dust exposure limit on counterparty commitment tx", + LocalHTLCFailureReason::DustLimitCounterparty)) } let htlc_success_dust_limit = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { 0 @@ -7651,7 +7648,8 @@ impl FundedChannel where if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); - return Err(("Exceeded our dust exposure limit on holder commitment tx", 0x1000|7)) + return Err(("Exceeded our dust exposure limit on holder commitment tx", + LocalHTLCFailureReason::DustLimitHolder)) } } @@ -7689,7 +7687,7 @@ impl FundedChannel where } if pending_remote_value_msat.saturating_sub(self.funding.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat { log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id()); - return Err(("Fee spike buffer violation", 0x1000|7)); + return Err(("Fee spike buffer violation", LocalHTLCFailureReason::FeeSpikeBuffer)); } } @@ -11150,7 +11148,7 @@ mod tests { use bitcoin::network::Network; #[cfg(splicing)] use bitcoin::Weight; - use crate::ln::onion_utils::INVALID_ONION_BLINDING; + use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::types::payment::{PaymentHash, PaymentPreimage}; use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint}; use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; @@ -11790,7 +11788,8 @@ mod tests { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] } }; let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC { - htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32], + htlc_id, failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), + sha256_of_onion: [0; 32], }; let mut holding_cell_htlc_updates = Vec::with_capacity(12); for i in 0..12 { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index fab15bfea28..61129cd91af 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -60,7 +60,7 @@ use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, Payee, PaymentPar use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, HopConnector, InboundHTLCErr, NextPacketDetails}; use crate::ln::msgs; use crate::ln::onion_utils; -use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING}; +use crate::ln::onion_utils::{HTLCFailReason, LocalHTLCFailureReason}; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, CommitmentUpdate, DecodeError, LightningError, MessageSendEvent}; #[cfg(test)] use crate::ln::outbound_payment; @@ -402,10 +402,11 @@ pub(super) enum HTLCForwardInfo { #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] pub enum BlindedFailure { /// This HTLC is being failed backwards by the introduction node, and thus should be failed with - /// [`msgs::UpdateFailHTLC`] and error code `0x8000|0x4000|24`. + /// [`msgs::UpdateFailHTLC`] and error code [`LocalHTLCFailureReason::InvalidOnionBlinding`]. FromIntroductionNode, /// This HTLC is being failed backwards by a blinded node within the path, and thus should be - /// failed with [`msgs::UpdateFailMalformedHTLC`] and error code `0x8000|0x4000|24`. + /// failed with [`msgs::UpdateFailMalformedHTLC`] and error code + /// [`LocalHTLCFailureReason::InvalidOnionBlinding`]. FromBlindedNode, } @@ -766,13 +767,13 @@ pub enum FailureCode { InvalidOnionPayload(Option<(u64, u16)>), } -impl Into for FailureCode { - fn into(self) -> u16 { +impl Into for FailureCode { + fn into(self) -> LocalHTLCFailureReason { match self { - FailureCode::TemporaryNodeFailure => 0x2000 | 2, - FailureCode::RequiredNodeFeatureMissing => 0x4000 | 0x2000 | 3, - FailureCode::IncorrectOrUnknownPaymentDetails => 0x4000 | 15, - FailureCode::InvalidOnionPayload(_) => 0x4000 | 22, + FailureCode::TemporaryNodeFailure => LocalHTLCFailureReason::TemporaryNodeFailure, + FailureCode::RequiredNodeFeatureMissing => LocalHTLCFailureReason::RequiredNodeFeature, + FailureCode::IncorrectOrUnknownPaymentDetails => LocalHTLCFailureReason::IncorrectPaymentDetails, + FailureCode::InvalidOnionPayload(_) => LocalHTLCFailureReason::InvalidOnionPayload, } } } @@ -3910,7 +3911,8 @@ where } for htlc_source in failed_htlcs.drain(..) { - let reason = HTLCFailReason::from_failure_code(0x4000 | 8); + let failure_reason = LocalHTLCFailureReason::DroppedPending; + let reason = HTLCFailReason::from_failure_code(failure_reason); let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id }; self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } @@ -4033,7 +4035,8 @@ where shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len()); for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; - let reason = HTLCFailReason::from_failure_code(0x4000 | 8); + let failure_reason = LocalHTLCFailureReason::DroppedPending; + let reason = HTLCFailReason::from_failure_code(failure_reason); let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } @@ -4329,22 +4332,26 @@ where fn can_forward_htlc_to_outgoing_channel( &self, chan: &mut FundedChannel, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails - ) -> Result<(), (&'static str, u16)> { + ) -> Result<(), (&'static str, LocalHTLCFailureReason)> { if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { // Note that the behavior here should be identical to the above block - we // should NOT reveal the existence or non-existence of a private channel if // we don't allow forwards outbound over them. - return Err(("Refusing to forward to a private channel based on our config.", 0x4000 | 10)); + return Err(("Refusing to forward to a private channel based on our config.", + LocalHTLCFailureReason::PrivateChannelForward)); } if let HopConnector::ShortChannelId(outgoing_scid) = next_packet.outgoing_connector { if chan.context.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() { // `option_scid_alias` (referred to in LDK as `scid_privacy`) means // "refuse to forward unless the SCID alias was used", so we pretend // we don't have the channel here. - return Err(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10)); + return Err(( + "Refusing to forward over real channel SCID as our counterparty requested.", + LocalHTLCFailureReason::RealSCIDForward)); } } else { - return Err(("Cannot forward by Node ID without SCID.", 0x4000 | 10)); + return Err(("Cannot forward by Node ID without SCID.", + LocalHTLCFailureReason::InvalidTrampolineForward)); } // Note that we could technically not return an error yet here and just hope @@ -4354,19 +4361,18 @@ where // on a small/per-node/per-channel scale. if !chan.context.is_live() { if !chan.context.is_enabled() { - // channel_disabled - return Err(("Forwarding channel has been disconnected for some time.", 0x1000 | 20)); + return Err(("Forwarding channel has been disconnected for some time.", + LocalHTLCFailureReason::ChannelDisabled)); } else { - // temporary_channel_failure - return Err(("Forwarding channel is not in a ready state.", 0x1000 | 7)); + return Err(("Forwarding channel is not in a ready state.", + LocalHTLCFailureReason::ChannelNotReady)); } } - if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum - return Err(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11)); - } - if let Err((err, code)) = chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value) { - return Err((err, code)); + if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { + return Err(("HTLC amount was below the htlc_minimum_msat", + LocalHTLCFailureReason::AmountBelowMinimum)); } + chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value)?; Ok(()) } @@ -4395,11 +4401,12 @@ where fn can_forward_htlc( &self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails - ) -> Result<(), (&'static str, u16)> { + ) -> Result<(), (&'static str, LocalHTLCFailureReason)> { let outgoing_scid = match next_packet_details.outgoing_connector { HopConnector::ShortChannelId(scid) => scid, HopConnector::Trampoline(_) => { - return Err(("Cannot forward by Node ID without SCID.", 0x4000 | 10)); + return Err(("Cannot forward by Node ID without SCID.", + LocalHTLCFailureReason::InvalidTrampolineForward)); } }; match self.do_funded_channel_callback(outgoing_scid, |chan: &mut FundedChannel| { @@ -4414,36 +4421,34 @@ where fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) || fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash) {} else { - return Err(("Don't have available channel for forwarding as requested.", 0x4000 | 10)); + return Err(("Don't have available channel for forwarding as requested.", + LocalHTLCFailureReason::UnknownNextPeer)); } } } let cur_height = self.best_block.read().unwrap().height + 1; - if let Err((err_msg, err_code)) = check_incoming_htlc_cltv( - cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry - ) { - return Err((err_msg, err_code)); - } + check_incoming_htlc_cltv(cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry)?; Ok(()) } fn htlc_failure_from_update_add_err( &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, err_msg: &'static str, - err_code: u16, is_intro_node_blinded_forward: bool, + reason: LocalHTLCFailureReason, is_intro_node_blinded_forward: bool, shared_secret: &[u8; 32] ) -> HTLCFailureMsg { // at capacity, we write fields `htlc_msat` and `len` let mut res = VecWriter(Vec::with_capacity(8 + 2)); - if err_code & 0x1000 == 0x1000 { - if err_code == 0x1000 | 11 || err_code == 0x1000 | 12 { + if reason.is_temporary() { + if reason == LocalHTLCFailureReason::AmountBelowMinimum || + reason == LocalHTLCFailureReason::FeeInsufficient { msg.amount_msat.write(&mut res).expect("Writes cannot fail"); } - else if err_code == 0x1000 | 13 { + else if reason == LocalHTLCFailureReason::IncorrectCLTVExpiry { msg.cltv_expiry.write(&mut res).expect("Writes cannot fail"); } - else if err_code == 0x1000 | 20 { + else if reason == LocalHTLCFailureReason::ChannelDisabled { // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791 0u16.write(&mut res).expect("Writes cannot fail"); } @@ -4461,16 +4466,16 @@ where channel_id: msg.channel_id, htlc_id: msg.htlc_id, sha256_of_onion: [0; 32], - failure_code: INVALID_ONION_BLINDING, + failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), }); } - let (err_code, err_data) = if is_intro_node_blinded_forward { - (INVALID_ONION_BLINDING, &[0; 32][..]) + let (reason, err_data) = if is_intro_node_blinded_forward { + (LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32][..]) } else { - (err_code, &res.0[..]) + (reason, &res.0[..]) }; - let failure = HTLCFailReason::reason(err_code, err_data.to_vec()) + let failure = HTLCFailReason::reason(reason, err_data.to_vec()) .get_encrypted_failure_packet(shared_secret, &None); HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id: msg.channel_id, @@ -4485,7 +4490,7 @@ where next_packet_pubkey_opt: Option>, ) -> PendingHTLCStatus { macro_rules! return_err { - ($msg: expr, $err_code: expr, $data: expr) => { + ($msg: expr, $reason: expr, $data: expr) => { { let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash)); log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg); @@ -4495,11 +4500,11 @@ where channel_id: msg.channel_id, htlc_id: msg.htlc_id, sha256_of_onion: [0; 32], - failure_code: INVALID_ONION_BLINDING, + failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), } )) } - let failure = HTLCFailReason::reason($err_code, $data.to_vec()) + let failure = HTLCFailReason::reason($reason, $data.to_vec()) .get_encrypted_failure_packet(&shared_secret, &None); return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id: msg.channel_id, @@ -4524,7 +4529,7 @@ where // delay) once they've sent us a commitment_signed! PendingHTLCStatus::Forward(info) }, - Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) + Err(InboundHTLCErr { reason, err_data, msg }) => return_err!(msg, reason, &err_data) } }, #[cfg(trampoline)] @@ -4542,20 +4547,20 @@ where // delay) once they've sent us a commitment_signed! PendingHTLCStatus::Forward(info) }, - Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) + Err(InboundHTLCErr { reason, err_data, msg }) => return_err!(msg, reason , &err_data) } }, onion_utils::Hop::Forward { .. } | onion_utils::Hop::BlindedForward { .. } => { match create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt) { Ok(info) => PendingHTLCStatus::Forward(info), - Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) + Err(InboundHTLCErr { reason, err_data, msg }) => return_err!(msg, reason, &err_data) } }, #[cfg(trampoline)] onion_utils::Hop::TrampolineForward { .. } | onion_utils::Hop::TrampolineBlindedForward { .. } => { match create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt) { Ok(info) => PendingHTLCStatus::Forward(info), - Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data) + Err(InboundHTLCErr { reason, err_data, msg }) => return_err!(msg, reason, &err_data) } } } @@ -5752,9 +5757,9 @@ where cltv_expiry: incoming_cltv_expiry, }); - let failure_reason = HTLCFailReason::from_failure_code(0x4000 | 10); + let reason = HTLCFailReason::from_failure_code(LocalHTLCFailureReason::UnknownNextPeer); let destination = HTLCDestination::UnknownNextHop { requested_forward_scid: short_channel_id }; - self.fail_htlc_backwards_internal(&htlc_source, &payment.forward_info.payment_hash, &failure_reason, destination); + self.fail_htlc_backwards_internal(&htlc_source, &payment.forward_info.payment_hash, &reason, destination); } else { unreachable!() } // Only `PendingHTLCRouting::Forward`s are intercepted Ok(()) @@ -5807,7 +5812,7 @@ where &update_add_htlc, &*self.node_signer, &*self.logger, &self.secp_ctx ) { Ok(decoded_onion) => decoded_onion, - Err(htlc_fail) => { + Err((htlc_fail, _)) => { htlc_fails.push((htlc_fail, HTLCDestination::InvalidOnion)); continue; }, @@ -5830,9 +5835,9 @@ where ) }) { Some(Ok(_)) => {}, - Some(Err((err, code))) => { + Some(Err((err, reason))) => { let htlc_fail = self.htlc_failure_from_update_add_err( - &update_add_htlc, &incoming_counterparty_node_id, err, code, + &update_add_htlc, &incoming_counterparty_node_id, err, reason, is_intro_node_blinded_forward, &shared_secret, ); let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); @@ -5845,11 +5850,11 @@ where // Now process the HTLC on the outgoing channel if it's a forward. if let Some(next_packet_details) = next_packet_details_opt.as_ref() { - if let Err((err, code)) = self.can_forward_htlc( + if let Err((err, reason)) = self.can_forward_htlc( &update_add_htlc, next_packet_details ) { let htlc_fail = self.htlc_failure_from_update_add_err( - &update_add_htlc, &incoming_counterparty_node_id, err, code, + &update_add_htlc, &incoming_counterparty_node_id, err, reason, is_intro_node_blinded_forward, &shared_secret, ); let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); @@ -5932,7 +5937,7 @@ where }) => { let cltv_expiry = routing.incoming_cltv_expiry(); macro_rules! failure_handler { - ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => { + ($msg: expr, $reason: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => { let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id), Some(payment_hash)); log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg); @@ -5956,23 +5961,23 @@ where }; failed_forwards.push((htlc_source, payment_hash, - HTLCFailReason::reason($err_code, $err_data), + HTLCFailReason::reason($reason, $err_data), reason )); continue; } } macro_rules! fail_forward { - ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => { + ($msg: expr, $reason: expr, $err_data: expr, $phantom_ss: expr) => { { - failure_handler!($msg, $err_code, $err_data, $phantom_ss, true); + failure_handler!($msg, $reason, $err_data, $phantom_ss, true); } } } macro_rules! failed_payment { - ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => { + ($msg: expr, $reason: expr, $err_data: expr, $phantom_ss: expr) => { { - failure_handler!($msg, $err_code, $err_data, $phantom_ss, false); + failure_handler!($msg, $reason, $err_data, $phantom_ss, false); } } } @@ -5984,17 +5989,17 @@ where onion_packet.hmac, payment_hash, None, &*self.node_signer ) { Ok(res) => res, - Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => { + Err(onion_utils::OnionDecodeErr::Malformed { err_msg, reason }) => { let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).to_byte_array(); // In this scenario, the phantom would have sent us an // `update_fail_malformed_htlc`, meaning here we encrypt the error as // if it came from us (the second-to-last hop) but contains the sha256 // of the onion. - failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None); + failed_payment!(err_msg, reason, sha256_of_onion.to_vec(), None); }, - Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code, shared_secret, .. }) => { + Err(onion_utils::OnionDecodeErr::Relay { err_msg, reason, shared_secret, .. }) => { let phantom_shared_secret = shared_secret.secret_bytes(); - failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret)); + failed_payment!(err_msg, reason, Vec::new(), Some(phantom_shared_secret)); }, }; let phantom_shared_secret = next_hop.shared_secret().secret_bytes(); @@ -6008,13 +6013,15 @@ where prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)] )), - Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) + Err(InboundHTLCErr { reason, err_data, msg }) => failed_payment!(msg, reason, err_data, Some(phantom_shared_secret)) } } else { - fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None); + fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), + LocalHTLCFailureReason::UnknownNextPeer, Vec::new(), None); } } else { - fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None); + fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), + LocalHTLCFailureReason::UnknownNextPeer, Vec::new(), None); } }, HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => { @@ -6136,10 +6143,10 @@ where .get_mut(&forward_chan_id) .and_then(Channel::as_funded_mut) { - let failure_code = 0x1000|7; - let data = self.get_htlc_inbound_temp_fail_data(failure_code); + let reason = LocalHTLCFailureReason::TemporaryChannelFailure; + let data = self.get_htlc_inbound_temp_fail_data(reason); failed_forwards.push((htlc_source, payment_hash, - HTLCFailReason::reason(failure_code, data), + HTLCFailReason::reason(reason, data), HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id } )); } else { @@ -6293,7 +6300,7 @@ where blinded_failure, cltv_expiry: Some(cltv_expiry), }), payment_hash, - HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data), + HTLCFailReason::reason(LocalHTLCFailureReason::IncorrectPaymentDetails, htlc_msat_height_data), HTLCDestination::FailedPayment { payment_hash: $payment_hash }, )); continue 'next_forwardable_htlc; @@ -6850,7 +6857,8 @@ where for htlc_source in timed_out_mpp_htlcs.drain(..) { let source = HTLCSource::PreviousHopData(htlc_source.0.clone()); - let reason = HTLCFailReason::from_failure_code(23); + let failure_reason = LocalHTLCFailureReason::MPPTimeout; + let reason = HTLCFailReason::from_failure_code(failure_reason); let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 }; self.fail_htlc_backwards_internal(&source, &htlc_source.1, &reason, receiver); } @@ -6947,14 +6955,14 @@ where /// /// This is for failures on the channel on which the HTLC was *received*, not failures /// forwarding - fn get_htlc_inbound_temp_fail_data(&self, err_code: u16) -> Vec { - debug_assert_eq!(err_code & 0x1000, 0x1000); - debug_assert_ne!(err_code, 0x1000|11); - debug_assert_ne!(err_code, 0x1000|12); - debug_assert_ne!(err_code, 0x1000|13); + fn get_htlc_inbound_temp_fail_data(&self, reason: LocalHTLCFailureReason) -> Vec { + debug_assert!(reason.is_temporary()); + debug_assert!(reason != LocalHTLCFailureReason::AmountBelowMinimum); + debug_assert!(reason != LocalHTLCFailureReason::FeeInsufficient); + debug_assert!(reason != LocalHTLCFailureReason::IncorrectCLTVExpiry); // at capacity, we write fields `disabled_flags` and `len` let mut enc = VecWriter(Vec::with_capacity(4)); - if err_code == 0x1000 | 20 { + if reason == LocalHTLCFailureReason::ChannelDisabled { // No flags for `disabled_flags` are currently defined so they're always two zero bytes. // See https://github.com/lightning/bolts/blob/341ec84/04-onion-routing.md?plain=1#L1008 0u16.write(&mut enc).expect("Writes cannot fail"); @@ -6971,7 +6979,7 @@ where &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: ChannelId, counterparty_node_id: &PublicKey ) { - let (failure_code, onion_failure_data) = { + let (failure_reason, onion_failure_data) = { let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -6979,22 +6987,22 @@ where match peer_state.channel_by_id.entry(channel_id) { hash_map::Entry::Occupied(chan_entry) => { if let Some(_chan) = chan_entry.get().as_funded() { - let failure_code = 0x1000|7; - let data = self.get_htlc_inbound_temp_fail_data(failure_code); - (failure_code, data) + let reason = LocalHTLCFailureReason::TemporaryChannelFailure; + let data = self.get_htlc_inbound_temp_fail_data(reason); + (reason, data) } else { // We shouldn't be trying to fail holding cell HTLCs on an unfunded channel. debug_assert!(false); - (0x4000|10, Vec::new()) + (LocalHTLCFailureReason::UnknownNextPeer, Vec::new()) } }, - hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) + hash_map::Entry::Vacant(_) => (LocalHTLCFailureReason::UnknownNextPeer, Vec::new()) } - } else { (0x4000|10, Vec::new()) } + } else { (LocalHTLCFailureReason::UnknownNextPeer, Vec::new()) } }; for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { - let reason = HTLCFailReason::reason(failure_code, onion_failure_data.clone()); + let reason = HTLCFailReason::reason(failure_reason, onion_failure_data.clone()); let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id }; self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver); } @@ -7043,7 +7051,7 @@ where ); let failure = match blinded_failure { Some(BlindedFailure::FromIntroductionNode) => { - let blinded_onion_error = HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32]); + let blinded_onion_error = HTLCFailReason::reason(LocalHTLCFailureReason::InvalidOnionBlinding, vec![0; 32]); let err_packet = blinded_onion_error.get_encrypted_failure_packet( incoming_packet_shared_secret, phantom_shared_secret ); @@ -7052,7 +7060,7 @@ where Some(BlindedFailure::FromBlindedNode) => { HTLCForwardInfo::FailMalformedHTLC { htlc_id: *htlc_id, - failure_code: INVALID_ONION_BLINDING, + failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), sha256_of_onion: [0; 32] } }, @@ -7242,7 +7250,7 @@ where let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes()); let source = HTLCSource::PreviousHopData(htlc.prev_hop); - let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data); + let reason = HTLCFailReason::reason(LocalHTLCFailureReason::IncorrectPaymentDetails, htlc_msat_height_data); let receiver = HTLCDestination::FailedPayment { payment_hash }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } @@ -8781,7 +8789,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } for htlc_source in dropped_htlcs.drain(..) { let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id }; - let reason = HTLCFailReason::from_failure_code(0x4000 | 8); + let reason = HTLCFailReason::from_failure_code(LocalHTLCFailureReason::DroppedPending); self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } if let Some(shutdown_res) = finish_shutdown { @@ -8982,7 +8990,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ try_channel_entry!(self, peer_state, Err(chan_err), chan_entry); } if let Some(chan) = chan_entry.get_mut().as_funded_mut() { - try_channel_entry!(self, peer_state, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_entry); + try_channel_entry!(self, peer_state, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code.into(), msg.sha256_of_onion.to_vec())), chan_entry); } else { return try_channel_entry!(self, peer_state, Err(ChannelError::close( "Got an update_fail_malformed_htlc message for an unfunded channel!".into())), chan_entry); @@ -9122,7 +9130,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }); failed_intercept_forwards.push((htlc_source, forward_info.payment_hash, - HTLCFailReason::from_failure_code(0x4000 | 10), + HTLCFailReason::from_failure_code(LocalHTLCFailureReason::UnknownNextPeer), HTLCDestination::InvalidForward { requested_forward_scid: scid }, )); } @@ -9606,8 +9614,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ); } else { log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash); + let failure_reason = LocalHTLCFailureReason::ChannelClosed; let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; - let reason = HTLCFailReason::from_failure_code(0x4000 | 8); + let reason = HTLCFailReason::from_failure_code(failure_reason); self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver); } }, @@ -11697,9 +11706,9 @@ where let res = f(funded_channel); if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res { for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { - let failure_code = 0x1000|14; /* expiry_too_soon */ - let data = self.get_htlc_inbound_temp_fail_data(failure_code); - timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data), + let reason = LocalHTLCFailureReason::CLTVExpiryTooSoon; + let data = self.get_htlc_inbound_temp_fail_data(reason); + timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(reason, data), HTLCDestination::NextHopChannel { node_id: Some(funded_channel.context.get_counterparty_node_id()), channel_id: funded_channel.context.channel_id() })); } let logger = WithChannelContext::from(&self.logger, &funded_channel.context, None); @@ -11822,8 +11831,9 @@ where let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); htlc_msat_height_data.extend_from_slice(&height.to_be_bytes()); + let reason = LocalHTLCFailureReason::PaymentClaimBuffer; timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), - HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data), + HTLCFailReason::reason(reason, htlc_msat_height_data), HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); false } else { true } @@ -11852,7 +11862,7 @@ where _ => unreachable!(), }; timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash, - HTLCFailReason::from_failure_code(0x2000 | 2), + HTLCFailReason::from_failure_code(LocalHTLCFailureReason::ForwardExpiryBuffer), HTLCDestination::InvalidForward { requested_forward_scid })); let logger = WithContext::from( &self.logger, None, Some(htlc.prev_channel_id), Some(htlc.forward_info.payment_hash) @@ -14926,8 +14936,9 @@ where for htlc_source in failed_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; + let failure_reason = LocalHTLCFailureReason::DroppedPending; let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; - let reason = HTLCFailReason::from_failure_code(0x4000 | 8); + let reason = HTLCFailReason::from_failure_code(failure_reason); channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } @@ -14959,7 +14970,7 @@ mod tests { use crate::ln::channelmanager::{create_recv_pending_htlc_info, inbound_payment, ChannelConfigOverrides, HTLCForwardInfo, InterceptId, PaymentId, RecipientOnionFields}; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, AcceptChannel, ErrorAction, MessageSendEvent}; - use crate::ln::onion_utils; + use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; use crate::ln::outbound_payment::Retry; use crate::prelude::*; use crate::routing::router::{PaymentParameters, RouteParameters, find_route}; @@ -15948,12 +15959,12 @@ mod tests { // Check that if the amount we received + the penultimate hop extra fee is less than the sender // intended amount, we fail the payment. let current_height: u32 = node[0].node.best_block.read().unwrap().height; - if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) = + if let Err(crate::ln::channelmanager::InboundHTLCErr { reason, .. }) = create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]), sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat), current_height) { - assert_eq!(err_code, 19); + assert_eq!(reason, LocalHTLCFailureReason::FinalIncorrectHTLCAmount); } else { panic!(); } // If amt_received + extra_fee is equal to the sender intended amount, we're fine. diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 64775d9e0f7..0d3ce0432c7 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -24,6 +24,7 @@ use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEven use crate::ln::outbound_payment::Retry; use crate::ln::peer_handler::IgnoringMessageHandler; use crate::onion_message::messenger::OnionMessenger; +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate}; use crate::routing::router::{self, PaymentParameters, Route, RouteParameters}; use crate::sign::{EntropySource, RandomBytes}; @@ -2488,7 +2489,7 @@ pub fn expect_probe_successful_events(node: &Node, mut probe_results: Vec<(Payme } pub struct PaymentFailedConditions<'a> { - pub(crate) expected_htlc_error_data: Option<(u16, &'a [u8])>, + pub(crate) expected_htlc_error_data: Option<(LocalHTLCFailureReason, &'a [u8])>, pub(crate) expected_blamed_scid: Option, pub(crate) expected_blamed_chan_closed: Option, pub(crate) expected_mpp_parts_remain: bool, @@ -2517,8 +2518,8 @@ impl<'a> PaymentFailedConditions<'a> { self.expected_blamed_chan_closed = Some(closed); self } - pub fn expected_htlc_error_data(mut self, code: u16, data: &'a [u8]) -> Self { - self.expected_htlc_error_data = Some((code, data)); + pub fn expected_htlc_error_data(mut self, reason: LocalHTLCFailureReason, data: &'a [u8]) -> Self { + self.expected_htlc_error_data = Some((reason, data)); self } pub fn retry_expected(mut self) -> Self { @@ -2539,11 +2540,11 @@ macro_rules! expect_payment_failed_with_update { #[cfg(any(test, feature = "_externalize_tests"))] macro_rules! expect_payment_failed { - ($node: expr, $expected_payment_hash: expr, $payment_failed_permanently: expr $(, $expected_error_code: expr, $expected_error_data: expr)*) => { + ($node: expr, $expected_payment_hash: expr, $payment_failed_permanently: expr $(, $expected_error_reason: expr, $expected_error_data: expr)*) => { #[allow(unused_mut)] let mut conditions = $crate::ln::functional_test_utils::PaymentFailedConditions::new(); $( - conditions = conditions.expected_htlc_error_data($expected_error_code, &$expected_error_data); + conditions = conditions.expected_htlc_error_data($expected_error_reason, &$expected_error_data); )* $crate::ln::functional_test_utils::expect_payment_failed_conditions(&$node, $expected_payment_hash, $payment_failed_permanently, conditions); }; @@ -2564,8 +2565,9 @@ pub fn expect_payment_failed_conditions_event<'a, 'b, 'c, 'd, 'e>( { assert!(error_code.is_some(), "expected error_code.is_some() = true"); assert!(error_data.is_some(), "expected error_data.is_some() = true"); + let reason: LocalHTLCFailureReason = error_code.unwrap().into(); if let Some((code, data)) = conditions.expected_htlc_error_data { - assert_eq!(error_code.unwrap(), code, "unexpected error code"); + assert_eq!(reason, code, "unexpected error code"); assert_eq!(&error_data.as_ref().unwrap()[..], data, "unexpected error data"); } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 3a365919a46..adc218c9dd4 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -17,6 +17,7 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor; use crate::chain::channelmonitor::{Balance, ChannelMonitorUpdateStep, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE}; use crate::chain::transaction::OutPoint; +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider}; use crate::events::bump_transaction::WalletSource; use crate::events::{Event, FundingInfo, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason}; @@ -4760,7 +4761,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { // 100_000 msat as u64, followed by the height at which we failed back above let mut expected_failure_data = (100_000 as u64).to_be_bytes().to_vec(); expected_failure_data.extend_from_slice(&(block_count - 1).to_be_bytes()); - expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]); + expect_payment_failed!(nodes[0], our_payment_hash, true, LocalHTLCFailureReason::IncorrectPaymentDetails, &expected_failure_data[..]); } #[xtest(feature = "_externalize_tests")] @@ -7725,7 +7726,7 @@ pub fn test_check_htlc_underpaying() { // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32 let mut expected_failure_data = (10_000 as u64).to_be_bytes().to_vec(); expected_failure_data.extend_from_slice(&CHAN_CONFIRM_DEPTH.to_be_bytes()); - expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000|15, &expected_failure_data[..]); + expect_payment_failed!(nodes[0], our_payment_hash, true, LocalHTLCFailureReason::IncorrectPaymentDetails, &expected_failure_data[..]); } #[xtest(feature = "_externalize_tests")] @@ -8918,7 +8919,7 @@ pub fn test_bad_secret_hash() { } } - let expected_error_code = 0x4000|15; // incorrect_or_unknown_payment_details + let expected_error_code = LocalHTLCFailureReason::IncorrectPaymentDetails; // Error data is the HTLC value (100,000) and current block height let expected_error_data = [0, 0, 0, 0, 0, 1, 0x86, 0xa0, 0, 0, 0, CHAN_CONFIRM_DEPTH as u8]; diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index dc87200d300..037aefe0da4 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -46,7 +46,7 @@ pub mod wire; #[allow(dead_code)] // TODO(dual_funding): Remove once contribution to V2 channels is enabled. pub(crate) mod interactivetxs; -pub use onion_utils::create_payment_onion; +pub use onion_utils::{create_payment_onion, LocalHTLCFailureReason}; // Older rustc (which we support) refuses to let us call the get_payment_preimage_hash!() macro // without the node parameter being mut. This is incorrect, and thus newer rustcs will complain // about an unnecessary mut. Thus, we silence the unused_mut warning in two test modules below. diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index 46661df6807..5637a2bdc9e 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -18,7 +18,7 @@ use crate::ln::channelmanager::{BlindedFailure, BlindedForward, CLTV_FAR_FAR_AWA use crate::types::features::BlindedHopFeatures; use crate::ln::msgs; use crate::ln::onion_utils; -use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING, ONION_DATA_LEN}; +use crate::ln::onion_utils::{HTLCFailReason, ONION_DATA_LEN, LocalHTLCFailureReason}; use crate::sign::{NodeSigner, Recipient}; use crate::util::logger::Logger; @@ -31,7 +31,7 @@ use core::ops::Deref; #[derive(Clone, Debug, Hash, PartialEq, Eq)] pub struct InboundHTLCErr { /// BOLT 4 error code. - pub err_code: u16, + pub reason: LocalHTLCFailureReason, /// Data attached to this error. pub err_data: Vec, /// Error message text. @@ -105,7 +105,7 @@ pub(super) fn create_fwd_pending_htlc_info( // unreachable right now since we checked it in `decode_update_add_htlc_onion`. InboundHTLCErr { msg: "Underflow calculating outbound amount or cltv value for blinded forward", - err_code: INVALID_ONION_BLINDING, + reason: LocalHTLCFailureReason::InvalidOnionBlinding, err_data: vec![0; 32], } })?; @@ -115,14 +115,14 @@ pub(super) fn create_fwd_pending_htlc_info( onion_utils::Hop::Receive { .. } | onion_utils::Hop::BlindedReceive { .. } => return Err(InboundHTLCErr { msg: "Final Node OnionHopData provided for us as an intermediary node", - err_code: 0x4000 | 22, + reason: LocalHTLCFailureReason::InvalidOnionPayload, err_data: Vec::new(), }), #[cfg(trampoline)] onion_utils::Hop::TrampolineReceive { .. } | onion_utils::Hop::TrampolineBlindedReceive { .. } => return Err(InboundHTLCErr { msg: "Final Node OnionHopData provided for us as an intermediary node", - err_code: 0x4000 | 22, + reason: LocalHTLCFailureReason::InvalidOnionPayload, err_data: Vec::new(), }), #[cfg(trampoline)] @@ -150,7 +150,7 @@ pub(super) fn create_fwd_pending_htlc_info( // unreachable right now since we checked it in `decode_update_add_htlc_onion`. InboundHTLCErr { msg: "Underflow calculating outbound amount or cltv value for blinded forward", - err_code: INVALID_ONION_BLINDING, + reason: LocalHTLCFailureReason::InvalidOnionBlinding, err_data: vec![0; 32], } })?; @@ -198,7 +198,7 @@ pub(super) fn create_fwd_pending_htlc_info( Some(Ok(pubkey)) => pubkey, _ => return Err(InboundHTLCErr { msg: "Missing next Trampoline hop pubkey from intermediate Trampoline forwarding data", - err_code: 0x4000 | 22, + reason: LocalHTLCFailureReason::InvalidTrampolineHop, err_data: Vec::new(), }), }; @@ -262,7 +262,7 @@ pub(super) fn create_recv_pending_htlc_info( ) .map_err(|()| { InboundHTLCErr { - err_code: INVALID_ONION_BLINDING, + reason: LocalHTLCFailureReason::InvalidOnionBlinding, err_data: vec![0; 32], msg: "Amount or cltv_expiry violated blinded payment constraints", } @@ -276,14 +276,14 @@ pub(super) fn create_recv_pending_htlc_info( onion_utils::Hop::TrampolineReceive { .. } | onion_utils::Hop::TrampolineBlindedReceive { .. } => todo!(), onion_utils::Hop::Forward { .. } => { return Err(InboundHTLCErr { - err_code: 0x4000|22, + reason: LocalHTLCFailureReason::InvalidOnionPayload, err_data: Vec::new(), msg: "Got non final data with an HMAC of 0", }) }, onion_utils::Hop::BlindedForward { .. } => { return Err(InboundHTLCErr { - err_code: INVALID_ONION_BLINDING, + reason: LocalHTLCFailureReason::InvalidOnionBlinding, err_data: vec![0; 32], msg: "Got blinded non final data with an HMAC of 0", }) @@ -291,7 +291,7 @@ pub(super) fn create_recv_pending_htlc_info( #[cfg(trampoline)] onion_utils::Hop::TrampolineForward { .. } | onion_utils::Hop::TrampolineBlindedForward { .. } => { return Err(InboundHTLCErr { - err_code: 0x4000|22, + reason: LocalHTLCFailureReason::InvalidOnionPayload, err_data: Vec::new(), msg: "Got Trampoline non final data with an HMAC of 0", }) @@ -301,7 +301,7 @@ pub(super) fn create_recv_pending_htlc_info( if onion_cltv_expiry > cltv_expiry { return Err(InboundHTLCErr { msg: "Upstream node set CLTV to less than the CLTV set by the sender", - err_code: 18, + reason: LocalHTLCFailureReason::FinalIncorrectCLTVExpiry, err_data: cltv_expiry.to_be_bytes().to_vec() }) } @@ -317,7 +317,7 @@ pub(super) fn create_recv_pending_htlc_info( err_data.extend_from_slice(&amt_msat.to_be_bytes()); err_data.extend_from_slice(¤t_height.to_be_bytes()); return Err(InboundHTLCErr { - err_code: 0x4000 | 15, err_data, + reason: LocalHTLCFailureReason::PaymentClaimBuffer, err_data, msg: "The final CLTV expiry is too soon to handle", }); } @@ -326,7 +326,7 @@ pub(super) fn create_recv_pending_htlc_info( amt_msat.saturating_add(counterparty_skimmed_fee_msat.unwrap_or(0))) { return Err(InboundHTLCErr { - err_code: 19, + reason: LocalHTLCFailureReason::FinalIncorrectHTLCAmount, err_data: amt_msat.to_be_bytes().to_vec(), msg: "Upstream node sent less than we were supposed to receive in payment", }); @@ -341,7 +341,7 @@ pub(super) fn create_recv_pending_htlc_info( let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array()); if hashed_preimage != payment_hash { return Err(InboundHTLCErr { - err_code: 0x4000|22, + reason: LocalHTLCFailureReason::InvalidKeysendPreimage, err_data: Vec::new(), msg: "Payment preimage didn't match payment hash", }); @@ -369,7 +369,7 @@ pub(super) fn create_recv_pending_htlc_info( } } else { return Err(InboundHTLCErr { - err_code: 0x4000|0x2000|3, + reason: LocalHTLCFailureReason::PaymentSecretRequired, err_data: Vec::new(), msg: "We require payment_secrets", }); @@ -404,13 +404,13 @@ where { let (hop, next_packet_details_opt) = decode_incoming_update_add_htlc_onion(msg, node_signer, logger, secp_ctx - ).map_err(|e| { - let (err_code, err_data) = match e { - HTLCFailureMsg::Malformed(m) => (m.failure_code, Vec::new()), - HTLCFailureMsg::Relay(r) => (0x4000 | 22, r.reason), + ).map_err(|(msg, failure_reason)| { + let (reason, err_data) = match msg { + HTLCFailureMsg::Malformed(_) => (failure_reason, Vec::new()), + HTLCFailureMsg::Relay(r) => (LocalHTLCFailureReason::InvalidOnionPayload, r.reason), }; let msg = "Failed to decode update add htlc onion"; - InboundHTLCErr { msg, err_code, err_data } + InboundHTLCErr { msg, reason, err_data } })?; Ok(match hop { onion_utils::Hop::Forward { shared_secret, .. } | @@ -422,17 +422,17 @@ where // Forward should always include the next hop details None => return Err(InboundHTLCErr { msg: "Failed to decode update add htlc onion", - err_code: 0x4000 | 22, + reason: LocalHTLCFailureReason::InvalidOnionPayload, err_data: Vec::new(), }), }; - if let Err((err_msg, code)) = check_incoming_htlc_cltv( + if let Err((err_msg, reason)) = check_incoming_htlc_cltv( cur_height, outgoing_cltv_value, msg.cltv_expiry, ) { return Err(InboundHTLCErr { msg: err_msg, - err_code: code, + reason, err_data: Vec::new(), }); } @@ -468,32 +468,32 @@ pub(super) struct NextPacketDetails { pub(super) fn decode_incoming_update_add_htlc_onion( msg: &msgs::UpdateAddHTLC, node_signer: NS, logger: L, secp_ctx: &Secp256k1, -) -> Result<(onion_utils::Hop, Option), HTLCFailureMsg> +) -> Result<(onion_utils::Hop, Option), (HTLCFailureMsg, LocalHTLCFailureReason)> where NS::Target: NodeSigner, L::Target: Logger, { macro_rules! return_malformed_err { - ($msg: expr, $err_code: expr) => { + ($msg: expr, $reason: expr) => { { log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg); - let (sha256_of_onion, failure_code) = if msg.blinding_point.is_some() { - ([0; 32], INVALID_ONION_BLINDING) + let (sha256_of_onion, failure_reason) = if msg.blinding_point.is_some() { + ([0; 32], LocalHTLCFailureReason::InvalidOnionBlinding) } else { - (Sha256::hash(&msg.onion_routing_packet.hop_data).to_byte_array(), $err_code) + (Sha256::hash(&msg.onion_routing_packet.hop_data).to_byte_array(), $reason) }; - return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { + return Err((HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, sha256_of_onion, - failure_code, - })); + failure_code: failure_reason.failure_code(), + }), failure_reason)); } } } if let Err(_) = msg.onion_routing_packet.public_key { - return_malformed_err!("invalid ephemeral pubkey", 0x8000 | 0x4000 | 6); + return_malformed_err!("invalid ephemeral pubkey", LocalHTLCFailureReason::InvalidOnionKey); } if msg.onion_routing_packet.version != 0 { @@ -503,22 +503,22 @@ where //receiving node would have to brute force to figure out which version was put in the //packet by the node that send us the message, in the case of hashing the hop_data, the //node knows the HMAC matched, so they already know what is there... - return_malformed_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4); + return_malformed_err!("Unknown onion packet version", LocalHTLCFailureReason::InvalidOnionVersion) } - let encode_relay_error = |message: &str, err_code: u16, shared_secret: [u8; 32], trampoline_shared_secret: Option<[u8; 32]>, data: &[u8]| { + let encode_relay_error = |message: &str, reason: LocalHTLCFailureReason, shared_secret: [u8; 32], trampoline_shared_secret: Option<[u8; 32]>, data: &[u8]| { if msg.blinding_point.is_some() { - return_malformed_err!(message, INVALID_ONION_BLINDING) + return_malformed_err!(message, LocalHTLCFailureReason::InvalidOnionBlinding) } log_info!(logger, "Failed to accept/forward incoming HTLC: {}", message); - let failure = HTLCFailReason::reason(err_code, data.to_vec()) + let failure = HTLCFailReason::reason(reason, data.to_vec()) .get_encrypted_failure_packet(&shared_secret, &trampoline_shared_secret); - return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { + return Err((HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, reason: failure.data, - })); + }), reason)); }; let next_hop = match onion_utils::decode_next_payment_hop( @@ -526,11 +526,11 @@ where msg.payment_hash, msg.blinding_point, node_signer ) { Ok(res) => res, - Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => { - return_malformed_err!(err_msg, err_code); + Err(onion_utils::OnionDecodeErr::Malformed { err_msg, reason }) => { + return_malformed_err!(err_msg, reason); }, - Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code, shared_secret, trampoline_shared_secret }) => { - return encode_relay_error(err_msg, err_code, shared_secret.secret_bytes(), trampoline_shared_secret.map(|tss| tss.secret_bytes()), &[0; 0]); + Err(onion_utils::OnionDecodeErr::Relay { err_msg, reason, shared_secret, trampoline_shared_secret }) => { + return encode_relay_error(err_msg, reason, shared_secret.secret_bytes(), trampoline_shared_secret.map(|tss| tss.secret_bytes()), &[0; 0]); }, }; @@ -550,7 +550,7 @@ where Ok((amt, cltv)) => (amt, cltv), Err(()) => { return encode_relay_error("Underflow calculating outbound amount or cltv value for blinded forward", - INVALID_ONION_BLINDING, shared_secret.secret_bytes(), None, &[0; 32]); + LocalHTLCFailureReason::InvalidOnionBlinding, shared_secret.secret_bytes(), None, &[0; 32]); } }; let next_packet_pubkey = onion_utils::next_hop_pubkey(&secp_ctx, @@ -579,21 +579,19 @@ where pub(super) fn check_incoming_htlc_cltv( cur_height: u32, outgoing_cltv_value: u32, cltv_expiry: u32 -) -> Result<(), (&'static str, u16)> { +) -> Result<(), (&'static str, LocalHTLCFailureReason)> { if (cltv_expiry as u64) < (outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 { - return Err(( - "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", - 0x1000 | 13, // incorrect_cltv_expiry - )); + return Err(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", + LocalHTLCFailureReason::IncorrectCLTVExpiry)); } // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, // but we want to be robust wrt to counterparty packet sanitization (see // HTLC_FAIL_BACK_BUFFER rationale). - if cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon - return Err(("CLTV expiry is too close", 0x1000 | 14)); + if cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { + return Err(("CLTV expiry is too close", LocalHTLCFailureReason::CLTVExpiryTooSoon)); } - if cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far - return Err(("CLTV expiry is too far in the future", 21)); + if cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { + return Err(("CLTV expiry is too far in the future", LocalHTLCFailureReason::CLTVExpiryTooFar)); } // If the HTLC expires ~now, don't bother trying to forward it to our // counterparty. They should fail it anyway, but we don't want to bother with @@ -604,7 +602,7 @@ pub(super) fn check_incoming_htlc_cltv( // but there is no need to do that, and since we're a bit conservative with our // risk threshold it just results in failing to forward payments. if (outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 { - return Err(("Outgoing CLTV value is too soon", 0x1000 | 14)); + return Err(("Outgoing CLTV value is too soon", LocalHTLCFailureReason::OutgoingCLTVTooSoon)); } Ok(()) diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index 127d4a45588..274b08a7007 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -17,7 +17,7 @@ use crate::events::{Event, HTLCDestination, PathFailure, PaymentFailureReason}; use crate::types::payment::{PaymentHash, PaymentSecret}; use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS; use crate::ln::channelmanager::{HTLCForwardInfo, FailureCode, CLTV_FAR_FAR_AWAY, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA, PendingAddHTLCInfo, PendingHTLCInfo, PendingHTLCRouting, PaymentId, RecipientOnionFields}; -use crate::ln::onion_utils; +use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; use crate::routing::gossip::{NetworkUpdate, RoutingFees}; use crate::routing::router::{get_route, PaymentParameters, Route, RouteParameters, RouteHint, RouteHintHop, Path, TrampolineHop, BlindedTail, RouteHop}; use crate::types::features::{InitFeatures, Bolt11InvoiceFeatures}; @@ -51,7 +51,7 @@ use crate::ln::onion_utils::{construct_trampoline_onion_keys, construct_trampoli use super::msgs::OnionErrorPacket; -fn run_onion_failure_test(_name: &str, test_case: u8, nodes: &Vec, route: &Route, payment_hash: &PaymentHash, payment_secret: &PaymentSecret, callback_msg: F1, callback_node: F2, expected_retryable: bool, expected_error_code: Option, expected_channel_update: Option, expected_short_channel_id: Option, expected_htlc_destination: Option) +fn run_onion_failure_test(_name: &str, test_case: u8, nodes: &Vec, route: &Route, payment_hash: &PaymentHash, payment_secret: &PaymentSecret, callback_msg: F1, callback_node: F2, expected_retryable: bool, expected_error_code: Option, expected_channel_update: Option, expected_short_channel_id: Option, expected_htlc_destination: Option) where F1: for <'a> FnMut(&'a mut msgs::UpdateAddHTLC), F2: FnMut(), { @@ -68,7 +68,7 @@ fn run_onion_failure_test(_name: &str, test_case: u8, nodes: &Vec, fn run_onion_failure_test_with_fail_intercept( _name: &str, test_case: u8, nodes: &Vec, route: &Route, payment_hash: &PaymentHash, payment_secret: &PaymentSecret, mut callback_msg: F1, mut callback_fail: F2, - mut callback_node: F3, expected_retryable: bool, expected_error_code: Option, + mut callback_node: F3, expected_retryable: bool, expected_error_reason: Option, expected_channel_update: Option, expected_short_channel_id: Option, expected_htlc_destination: Option, ) @@ -188,7 +188,10 @@ fn run_onion_failure_test_with_fail_intercept( assert_eq!(events.len(), 2); if let &Event::PaymentPathFailed { ref payment_failed_permanently, ref short_channel_id, ref error_code, failure: PathFailure::OnPath { ref network_update }, .. } = &events[0] { assert_eq!(*payment_failed_permanently, !expected_retryable); - assert_eq!(*error_code, expected_error_code); + assert_eq!(error_code.is_none(), expected_error_reason.is_none()); + if let Some(expected_reason) = expected_error_reason { + assert_eq!(expected_reason, error_code.unwrap().into()) + } if expected_channel_update.is_some() { match network_update { Some(update) => match update { @@ -277,11 +280,6 @@ impl Writeable for BogusOnionHopData { } } -const BADONION: u16 = 0x8000; -const PERM: u16 = 0x4000; -const NODE: u16 = 0x2000; -const UPDATE: u16 = 0x1000; - #[test] fn test_fee_failures() { // Tests that the fee required when forwarding remains consistent over time. This was @@ -314,7 +312,7 @@ fn test_fee_failures() { let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("fee_insufficient", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; - }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), + }, || {}, true, Some(LocalHTLCFailureReason::FeeInsufficient), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), Some(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 })); // In an earlier version, we spuriously failed to forward payments if the expected feerate @@ -380,7 +378,7 @@ fn test_onion_failure() { // describing a length-1 TLV payload, which is obviously bogus. new_payloads[0].data[0] = 1; msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); - }, ||{}, true, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + }, ||{}, true, Some(LocalHTLCFailureReason::InvalidOnionPayload), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); // final node failure let short_channel_id = channels[1].0.contents.short_channel_id; @@ -399,7 +397,7 @@ fn test_onion_failure() { // length-1 TLV payload, which is obviously bogus. new_payloads[1].data[0] = 1; msg.onion_routing_packet = onion_utils::construct_onion_packet_with_writable_hopdata(new_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); - }, ||{}, false, Some(PERM|22), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + }, ||{}, false, Some(LocalHTLCFailureReason::InvalidOnionPayload), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); // the following three with run_onion_failure_test_with_fail_intercept() test only the origin node // receiving simulated fail messages @@ -411,20 +409,20 @@ fn test_onion_failure() { // and tamper returning error message let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), NODE|2, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::TemporaryNodeFailure, &[0;0]); msg.reason = failure.data; - }, ||{}, true, Some(NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: false}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); + }, ||{}, true, Some(LocalHTLCFailureReason::TemporaryNodeFailure), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: false}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); // final node failure run_onion_failure_test_with_fail_intercept("temporary_node_failure", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { // and tamper returning error message let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), NODE|2, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), LocalHTLCFailureReason::TemporaryNodeFailure, &[0;0]); msg.reason = failure.data; }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, true, Some(NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: false}), Some(route.paths[0].hops[1].short_channel_id), None); + }, true, Some(LocalHTLCFailureReason::TemporaryNodeFailure), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: false}), Some(route.paths[0].hops[1].short_channel_id), None); let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); // intermediate node failure @@ -433,19 +431,19 @@ fn test_onion_failure() { }, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|NODE|2, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::PermanentNodeFailure, &[0;0]); msg.reason = failure.data; - }, ||{}, true, Some(PERM|NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); + }, ||{}, true, Some(LocalHTLCFailureReason::PermanentNodeFailure), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); // final node failure run_onion_failure_test_with_fail_intercept("permanent_node_failure", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), PERM|NODE|2, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), LocalHTLCFailureReason::PermanentNodeFailure, &[0;0]); msg.reason = failure.data; }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, false, Some(PERM|NODE|2), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: true}), Some(route.paths[0].hops[1].short_channel_id), None); + }, false, Some(LocalHTLCFailureReason::PermanentNodeFailure), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: true}), Some(route.paths[0].hops[1].short_channel_id), None); let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); // intermediate node failure @@ -454,34 +452,34 @@ fn test_onion_failure() { }, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|NODE|3, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::RequiredNodeFeature, &[0;0]); msg.reason = failure.data; }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, true, Some(PERM|NODE|3), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); + }, true, Some(LocalHTLCFailureReason::RequiredNodeFeature), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); // final node failure run_onion_failure_test_with_fail_intercept("required_node_feature_missing", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), PERM|NODE|3, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), LocalHTLCFailureReason::RequiredNodeFeature, &[0;0]); msg.reason = failure.data; }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, false, Some(PERM|NODE|3), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: true}), Some(route.paths[0].hops[1].short_channel_id), None); + }, false, Some(LocalHTLCFailureReason::RequiredNodeFeature), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: true}), Some(route.paths[0].hops[1].short_channel_id), None); let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); // Our immediate peer sent UpdateFailMalformedHTLC because it couldn't understand the onion in // the UpdateAddHTLC that we sent. let short_channel_id = channels[0].0.contents.short_channel_id; run_onion_failure_test("invalid_onion_version", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.version = 1; }, ||{}, true, - Some(BADONION|PERM|4), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + Some(LocalHTLCFailureReason::InvalidOnionVersion), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); run_onion_failure_test("invalid_onion_hmac", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.hmac = [3; 32]; }, ||{}, true, - Some(BADONION|PERM|5), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + Some(LocalHTLCFailureReason::InvalidOnionHMAC), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); run_onion_failure_test("invalid_onion_key", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.onion_routing_packet.public_key = Err(secp256k1::Error::InvalidPublicKey);}, ||{}, true, - Some(BADONION|PERM|6), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); + Some(LocalHTLCFailureReason::InvalidOnionKey), None, Some(short_channel_id), Some(HTLCDestination::InvalidOnion)); let short_channel_id = channels[1].0.contents.short_channel_id; let chan_update = ChannelUpdate::dummy(short_channel_id); @@ -495,9 +493,9 @@ fn test_onion_failure() { }, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), UPDATE|7, &err_data); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::TemporaryChannelFailure, &err_data); msg.reason = failure.data; - }, ||{}, true, Some(UPDATE|7), + }, ||{}, true, Some(LocalHTLCFailureReason::TemporaryChannelFailure), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), Some(short_channel_id), Some(next_hop_failure.clone())); @@ -508,9 +506,9 @@ fn test_onion_failure() { }, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), UPDATE|7, &err_data_without_type); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::TemporaryChannelFailure, &err_data_without_type); msg.reason = failure.data; - }, ||{}, true, Some(UPDATE|7), + }, ||{}, true, Some(LocalHTLCFailureReason::TemporaryChannelFailure), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), Some(short_channel_id), Some(next_hop_failure.clone())); @@ -520,10 +518,10 @@ fn test_onion_failure() { }, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|8, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::PermanentChannelFailure, &[0;0]); msg.reason = failure.data; // short_channel_id from the processing node - }, ||{}, true, Some(PERM|8), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(next_hop_failure.clone())); + }, ||{}, true, Some(LocalHTLCFailureReason::PermanentChannelFailure), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(next_hop_failure.clone())); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test_with_fail_intercept("required_channel_feature_missing", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { @@ -531,15 +529,15 @@ fn test_onion_failure() { }, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), PERM|9, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::RequiredChannelFeature, &[0;0]); msg.reason = failure.data; // short_channel_id from the processing node - }, ||{}, true, Some(PERM|9), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(next_hop_failure.clone())); + }, ||{}, true, Some(LocalHTLCFailureReason::RequiredChannelFeature), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(next_hop_failure.clone())); let mut bogus_route = route.clone(); bogus_route.paths[0].hops[1].short_channel_id -= 1; let short_channel_id = bogus_route.paths[0].hops[1].short_channel_id; - run_onion_failure_test("unknown_next_peer", 100, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(PERM|10), + run_onion_failure_test("unknown_next_peer", 100, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(LocalHTLCFailureReason::UnknownNextPeer), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent:true}), Some(short_channel_id), Some(HTLCDestination::UnknownNextHop { requested_forward_scid: short_channel_id })); let short_channel_id = channels[1].0.contents.short_channel_id; @@ -549,7 +547,7 @@ fn test_onion_failure() { let mut bogus_route = route.clone(); let route_len = bogus_route.paths[0].hops.len(); bogus_route.paths[0].hops[route_len-1].fee_msat = amt_to_forward; - run_onion_failure_test("amount_below_minimum", 100, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(UPDATE|11), + run_onion_failure_test("amount_below_minimum", 100, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(LocalHTLCFailureReason::AmountBelowMinimum), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), Some(short_channel_id), Some(next_hop_failure.clone())); @@ -568,13 +566,13 @@ fn test_onion_failure() { let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("fee_insufficient", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; - }, || {}, true, Some(UPDATE|12), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), Some(next_hop_failure.clone())); + }, || {}, true, Some(LocalHTLCFailureReason::FeeInsufficient), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), Some(next_hop_failure.clone())); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("incorrect_cltv_expiry", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { // need to violate: cltv_expiry - cltv_expiry_delta >= outgoing_cltv_value msg.cltv_expiry -= 1; - }, || {}, true, Some(UPDATE|13), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), Some(next_hop_failure.clone())); + }, || {}, true, Some(LocalHTLCFailureReason::IncorrectCLTVExpiry), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), Some(next_hop_failure.clone())); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("expiry_too_soon", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { @@ -582,13 +580,13 @@ fn test_onion_failure() { connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], height - nodes[2].best_block_info().1); - }, ||{}, true, Some(UPDATE|14), + }, ||{}, true, Some(LocalHTLCFailureReason::CLTVExpiryTooSoon), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), Some(short_channel_id), Some(next_hop_failure.clone())); run_onion_failure_test("unknown_payment_hash", 2, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { nodes[2].node.fail_htlc_backwards(&payment_hash); - }, false, Some(PERM|15), None, None, None); + }, false, Some(LocalHTLCFailureReason::IncorrectPaymentDetails), None, None, None); let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); run_onion_failure_test("final_expiry_too_soon", 1, &nodes, &route, &payment_hash, &payment_secret, |msg| { @@ -596,7 +594,7 @@ fn test_onion_failure() { connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], height - nodes[2].best_block_info().1); - }, || {}, false, Some(0x4000 | 15), None, None, Some(HTLCDestination::FailedPayment { payment_hash })); + }, || {}, false, Some(LocalHTLCFailureReason::IncorrectPaymentDetails), None, None, Some(HTLCDestination::FailedPayment { payment_hash })); run_onion_failure_test("final_incorrect_cltv_expiry", 1, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { nodes[1].node.process_pending_update_add_htlcs(); @@ -609,7 +607,7 @@ fn test_onion_failure() { } } } - }, true, Some(18), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCDestination::FailedPayment { payment_hash })); + }, true, Some(LocalHTLCFailureReason::FinalIncorrectCLTVExpiry), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCDestination::FailedPayment { payment_hash })); run_onion_failure_test("final_incorrect_htlc_amount", 1, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { nodes[1].node.process_pending_update_add_htlcs(); @@ -623,14 +621,14 @@ fn test_onion_failure() { } } } - }, true, Some(19), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCDestination::FailedPayment { payment_hash })); + }, true, Some(LocalHTLCFailureReason::FinalIncorrectHTLCAmount), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCDestination::FailedPayment { payment_hash })); let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("channel_disabled", 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { // disconnect event to the channel between nodes[1] ~ nodes[2] nodes[1].node.peer_disconnected(nodes[2].node.get_our_node_id()); nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); - }, true, Some(UPDATE|7), + }, true, Some(LocalHTLCFailureReason::TemporaryChannelFailure), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), Some(short_channel_id), Some(next_hop_failure.clone())); run_onion_failure_test("channel_disabled", 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { @@ -641,7 +639,7 @@ fn test_onion_failure() { } nodes[1].node.get_and_clear_pending_msg_events(); nodes[2].node.get_and_clear_pending_msg_events(); - }, true, Some(UPDATE|20), + }, true, Some(LocalHTLCFailureReason::ChannelDisabled), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), Some(short_channel_id), Some(next_hop_failure.clone())); reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2])); @@ -658,17 +656,17 @@ fn test_onion_failure() { let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); msg.cltv_expiry = htlc_cltv; msg.onion_routing_packet = onion_packet; - }, ||{}, true, Some(21), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); + }, ||{}, true, Some(LocalHTLCFailureReason::CLTVExpiryTooFar), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); run_onion_failure_test_with_fail_intercept("mpp_timeout", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { // Tamper returning error message let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), 23, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), LocalHTLCFailureReason::MPPTimeout, &[0;0]); msg.reason = failure.data; }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, true, Some(23), None, None, None); + }, true, Some(LocalHTLCFailureReason::MPPTimeout), None, None, None); run_onion_failure_test_with_fail_intercept("bogus err packet with valid hmac", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { @@ -716,7 +714,7 @@ fn test_onion_failure() { onion_utils::test_crypt_failure_packet( &onion_keys[0].shared_secret.as_ref(), &mut onion_error); msg.reason = onion_error.data; - }, || {}, true, Some(0x1000|7), + }, || {}, true, Some(LocalHTLCFailureReason::TemporaryChannelFailure), Some(NetworkUpdate::ChannelFailure { short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: false, @@ -744,7 +742,7 @@ fn test_onion_failure() { onion_utils::test_crypt_failure_packet( &onion_keys[1].shared_secret.as_ref(), &mut onion_error); msg.reason = onion_error.data; - }, || nodes[2].node.fail_htlc_backwards(&payment_hash), true, Some(0x1000|7), + }, || nodes[2].node.fail_htlc_backwards(&payment_hash), true, Some(LocalHTLCFailureReason::TemporaryChannelFailure), Some(NetworkUpdate::ChannelFailure { short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: false, @@ -895,12 +893,12 @@ fn do_test_onion_failure_stale_channel_update(announce_for_forwarding: bool) { // We'll be attempting to route payments using the default ChannelUpdate for channels. This will // lead to onion failures at the first hop once we update the ChannelConfig for the // second hop. - let expect_onion_failure = |name: &str, error_code: u16| { + let expect_onion_failure = |name: &str, error_reason: LocalHTLCFailureReason| { let short_channel_id = channel_to_update.1; let network_update = NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }; run_onion_failure_test( name, 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || {}, true, - Some(error_code), Some(network_update), Some(short_channel_id), + Some(error_reason), Some(network_update), Some(short_channel_id), Some(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_to_update.0 }), ); }; @@ -930,7 +928,7 @@ fn do_test_onion_failure_stale_channel_update(announce_for_forwarding: bool) { // Connect a block, which should expire the previous config, leading to a failure when // forwarding the HTLC. expire_prev_config(); - expect_onion_failure("fee_insufficient", UPDATE|12); + expect_onion_failure("fee_insufficient", LocalHTLCFailureReason::FeeInsufficient); // Redundant updates should not trigger a new ChannelUpdate. assert!(update_and_get_channel_update(&config, false, None, false).is_none()); @@ -944,14 +942,14 @@ fn do_test_onion_failure_stale_channel_update(announce_for_forwarding: bool) { config.forwarding_fee_base_msat = default_config.forwarding_fee_base_msat; config.cltv_expiry_delta = u16::max_value(); assert!(update_and_get_channel_update(&config, true, Some(&msg), true).is_some()); - expect_onion_failure("incorrect_cltv_expiry", UPDATE|13); + expect_onion_failure("incorrect_cltv_expiry", LocalHTLCFailureReason::IncorrectCLTVExpiry); // Reset the proportional fee and increase the CLTV expiry delta which should trigger a new // ChannelUpdate. config.cltv_expiry_delta = default_config.cltv_expiry_delta; config.forwarding_fee_proportional_millionths = u32::max_value(); assert!(update_and_get_channel_update(&config, true, Some(&msg), true).is_some()); - expect_onion_failure("fee_insufficient", UPDATE|12); + expect_onion_failure("fee_insufficient", LocalHTLCFailureReason::FeeInsufficient); // To test persistence of the updated config, we'll re-initialize the ChannelManager. let config_after_restart = { @@ -1394,9 +1392,7 @@ fn do_test_fail_htlc_backwards_with_reason(failure_code: FailureCode) { }; let failure_code = failure_code.into(); - let permanent_flag = 0x4000; - let permanent_fail = (failure_code & permanent_flag) != 0; - expect_payment_failed!(nodes[0], payment_hash, permanent_fail, failure_code, failure_data); + expect_payment_failed!(nodes[0], payment_hash, failure_code.is_permanent(), failure_code, failure_data); } @@ -1507,7 +1503,7 @@ fn test_phantom_onion_hmac_failure() { let mut fail_conditions = PaymentFailedConditions::new() .blamed_scid(phantom_scid) .blamed_chan_closed(true) - .expected_htlc_error_data(0x8000 | 0x4000 | 5, &sha256_of_onion); + .expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionHMAC, &sha256_of_onion); expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions); } @@ -1585,7 +1581,7 @@ fn test_phantom_invalid_onion_payload() { let mut fail_conditions = PaymentFailedConditions::new() .blamed_scid(phantom_scid) .blamed_chan_closed(true) - .expected_htlc_error_data(0x4000 | 22, &error_data); + .expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionPayload, &error_data); expect_payment_failed_conditions(&nodes[0], payment_hash, true, fail_conditions); } @@ -1643,7 +1639,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { let error_data = expected_cltv.to_be_bytes().to_vec(); let mut fail_conditions = PaymentFailedConditions::new() .blamed_scid(phantom_scid) - .expected_htlc_error_data(18, &error_data); + .expected_htlc_error_data(LocalHTLCFailureReason::FinalIncorrectCLTVExpiry, &error_data); expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions); } @@ -1692,7 +1688,7 @@ fn test_phantom_failure_too_low_cltv() { ); let mut fail_conditions = PaymentFailedConditions::new() .blamed_scid(phantom_scid) - .expected_htlc_error_data(0x4000 | 15, &error_data); + .expected_htlc_error_data(LocalHTLCFailureReason::IncorrectPaymentDetails, &error_data); expect_payment_failed_conditions(&nodes[0], payment_hash, true, fail_conditions); } @@ -1743,7 +1739,7 @@ fn test_phantom_failure_modified_cltv() { err_data.extend_from_slice(&0u16.to_be_bytes()); let mut fail_conditions = PaymentFailedConditions::new() .blamed_scid(phantom_scid) - .expected_htlc_error_data(0x1000 | 13, &err_data); + .expected_htlc_error_data(LocalHTLCFailureReason::IncorrectCLTVExpiry, &err_data); expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions); } @@ -1790,7 +1786,7 @@ fn test_phantom_failure_expires_too_soon() { let err_data = 0u16.to_be_bytes(); let mut fail_conditions = PaymentFailedConditions::new() .blamed_scid(phantom_scid) - .expected_htlc_error_data(0x1000 | 14, &err_data); + .expected_htlc_error_data(LocalHTLCFailureReason::CLTVExpiryTooSoon, &err_data); expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions); } @@ -1837,7 +1833,7 @@ fn test_phantom_failure_too_low_recv_amt() { error_data.extend_from_slice(&nodes[1].node.best_block.read().unwrap().height.to_be_bytes()); let mut fail_conditions = PaymentFailedConditions::new() .blamed_scid(phantom_scid) - .expected_htlc_error_data(0x4000 | 15, &error_data); + .expected_htlc_error_data(LocalHTLCFailureReason::IncorrectPaymentDetails, &error_data); expect_payment_failed_conditions(&nodes[0], payment_hash, true, fail_conditions); } @@ -1895,7 +1891,7 @@ fn do_test_phantom_dust_exposure_failure(multiplier_dust_limit: bool) { let err_data = 0u16.to_be_bytes(); let mut fail_conditions = PaymentFailedConditions::new() .blamed_scid(phantom_scid) - .expected_htlc_error_data(0x1000 | 7, &err_data); + .expected_htlc_error_data(LocalHTLCFailureReason::TemporaryChannelFailure, &err_data); expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions); } @@ -1945,6 +1941,6 @@ fn test_phantom_failure_reject_payment() { error_data.extend_from_slice(&nodes[1].node.best_block.read().unwrap().height.to_be_bytes()); let mut fail_conditions = PaymentFailedConditions::new() .blamed_scid(phantom_scid) - .expected_htlc_error_data(0x4000 | 15, &error_data); + .expected_htlc_error_data(LocalHTLCFailureReason::IncorrectPaymentDetails, &error_data); expect_payment_failed_conditions(&nodes[0], payment_hash, true, fail_conditions); } diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index d818a3af53a..a336f2a55cb 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -684,8 +684,6 @@ pub(crate) fn set_max_path_length( /// the hops can be of variable length. pub(crate) const ONION_DATA_LEN: usize = 20 * 65; -pub(super) const INVALID_ONION_BLINDING: u16 = 0x8000 | 0x4000 | 24; - #[inline] fn shift_slice_right(arr: &mut [u8], amt: usize) { for i in (amt..arr.len()).rev() { @@ -887,17 +885,18 @@ fn process_chacha(key: &[u8; 32], packet: &mut [u8]) { } fn build_unencrypted_failure_packet( - shared_secret: &[u8], failure_type: u16, failure_data: &[u8], + shared_secret: &[u8], failure_reason: LocalHTLCFailureReason, failure_data: &[u8], ) -> OnionErrorPacket { assert_eq!(shared_secret.len(), 32); assert!(failure_data.len() <= 256 - 2); let um = gen_um_from_shared_secret(&shared_secret); + let failure_code = failure_reason.failure_code(); let failuremsg = { let mut res = Vec::with_capacity(2 + failure_data.len()); - res.push(((failure_type >> 8) & 0xff) as u8); - res.push(((failure_type >> 0) & 0xff) as u8); + res.push(((failure_code >> 8) & 0xff) as u8); + res.push(((failure_code >> 0) & 0xff) as u8); res.extend_from_slice(&failure_data[..]); res }; @@ -916,10 +915,10 @@ fn build_unencrypted_failure_packet( } pub(super) fn build_failure_packet( - shared_secret: &[u8], failure_type: u16, failure_data: &[u8], + shared_secret: &[u8], failure_reason: LocalHTLCFailureReason, failure_data: &[u8], ) -> OnionErrorPacket { let mut onion_error_packet = - build_unencrypted_failure_packet(shared_secret, failure_type, failure_data); + build_unencrypted_failure_packet(shared_secret, failure_reason, failure_data); crypt_failure_packet(shared_secret, &mut onion_error_packet); @@ -1004,11 +1003,6 @@ where let mut _error_packet_ret = None; let mut is_from_final_non_blinded_node = false; - const BADONION: u16 = 0x8000; - const PERM: u16 = 0x4000; - const NODE: u16 = 0x2000; - const UPDATE: u16 = 0x1000; - enum ErrorHop<'a> { RouteHop(&'a RouteHop), TrampolineHop(&'a TrampolineHop), @@ -1358,6 +1352,249 @@ where } } +const BADONION: u16 = 0x8000; +const PERM: u16 = 0x4000; +const NODE: u16 = 0x2000; +const UPDATE: u16 = 0x1000; + +/// The reason that a HTLC was failed by the local node. These errors either represent direct, +/// human-readable mappings of BOLT04 error codes or provide additional information that would +/// otherwise be erased by the BOLT04 error code. +/// +/// For example: +/// [`Self::FeeInsufficient`] is a direct representation of its underlying BOLT04 error code. +/// [`Self::PrivateChannelForward`] provides additional information that is not provided by its +/// BOLT04 error code. +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub enum LocalHTLCFailureReason { + /// There has been a temporary processing failure on the node which may resolve on retry. + TemporaryNodeFailure, + /// These has been a permanent processing failure on the node which will not resolve on retry. + PermanentNodeFailure, + /// The HTLC does not implement a feature that is required by our node. + /// + /// The sender may have outdated gossip, or a bug in its implementation. + RequiredNodeFeature, + /// The onion version specified by the HTLC packet is unknown to our node. + InvalidOnionVersion, + /// The integrity of the HTLC packet cannot be verified because it has an invalid HMAC. + InvalidOnionHMAC, + /// The onion packet has an invalid ephemeral key, so the HTLC cannot be processed. + InvalidOnionKey, + /// A temporary forwarding error has occurred which may resolve on retry. + TemporaryChannelFailure, + /// A permanent forwarding error has occurred which will not resolve on retry. + PermanentChannelFailure, + /// The HTLC does not implement a feature that is required by our channel for processing. + RequiredChannelFeature, + /// The HTLC's target outgoing channel that is not known to our node. + UnknownNextPeer, + /// The HTLC amount is below our advertised htlc_minimum_msat. + /// + /// The sender may have outdated gossip, or a bug in its implementation. + AmountBelowMinimum, + /// The HTLC does not pay sufficient fees. + /// + /// The sender may have outdated gossip, or a bug in its implementation. + FeeInsufficient, + /// The HTLC does not meet the cltv_expiry_delta advertised by our node, set by + /// [`ChannelConfig::cltv_expiry_delta`]. + /// + /// The sender may have outdated gossip, or a bug in its implementation. + /// + /// [`ChannelConfig::cltv_expiry_delta`]: crate::util::config::ChannelConfig::cltv_expiry_delta + IncorrectCLTVExpiry, + /// The HTLC expires too close to the current block height to be safely processed. + CLTVExpiryTooSoon, + /// A payment was made to our node that either had incorrect payment information, or was + /// unknown to us. + IncorrectPaymentDetails, + /// The HTLC's expiry is less than the expiry height specified by the sender. + /// + /// The forwarding node has either tampered with this value, or the sending node has an + /// old best block height. + FinalIncorrectCLTVExpiry, + /// The HTLC's amount is less than the amount specified by the sender. + /// + /// The forwarding node has tampered with this value, or has a bug in its implementation. + FinalIncorrectHTLCAmount, + /// The channel has been marked as disabled because the channel peer is offline. + ChannelDisabled, + /// The HTLC expires too far in the future, so it is rejected to avoid the worst-case outcome + /// of funds being held for extended periods of time. + /// + /// Limit set by ['crate::ln::channelmanager::CLTV_FAR_FAR_AWAY`]. + CLTVExpiryTooFar, + /// The HTLC payload contained in the onion packet could not be understood by our node. + InvalidOnionPayload, + /// The total amount for a multi-part payment did not arrive in time, so the HTLCs partially + /// paying the amount were canceled. + MPPTimeout, + /// Our node was selected as part of a blinded path, but the packet we received was not + /// properly constructed, or had incorrect values for the blinded path. + /// + /// This may happen if the forwarding node tamperd with the HTLC or the sender or recipient + /// implementations have a bug. + InvalidOnionBlinding, + /// A HTLC forward was failed back rather than forwarded on the proposed outgoing channel + /// because its expiry is too close to the current block height to leave time to safely claim + /// it on chain if the channel force closes. + ForwardExpiryBuffer, + /// The HTLC was failed because it has invalid trampoline forwarding information. + InvalidTrampolineForward, + /// A HTLC receive was failed back rather than claimed because its expiry is too close to + /// the current block height to leave time to safely claim it on chain if the channel force + /// closes. + PaymentClaimBuffer, + /// The HTLC was failed because accepting it would push our commitment's total amount of dust + /// HTLCs over the limit that we allow to be burned to miner fees if the channel closed while + /// they are unresolved. + DustLimitHolder, + /// The HTLC was failed because accepting it would push our counterparty's total amount of + /// dust (small) HTLCs over the limit that we allow to be burned to miner fees if the channel + /// closes while they are unresolved. + DustLimitCounterparty, + /// The HTLC was failed because it would drop the remote party's channel balance such that it + /// cannot cover the fees it is required to pay at various fee rates. This buffer is maintained + /// so that channels can always maintain reasonable fee rates. + FeeSpikeBuffer, + /// The HTLC that requested to be forwarded over a private channel was rejected to prevent + /// revealing the existence of the channel. + PrivateChannelForward, + /// The HTLC was failed because it made a request to forward over the real channel ID of a + /// channel that implements `option_scid_alias` which is a privacy feature to prevent the + /// real + RealSCIDForward, + /// The HTLC was rejected because our channel has not yet reached sufficient depth to be used. + ChannelNotReady, + /// A keysend payment with a preimage that did not match the HTLC has was rejected. + InvalidKeysendPreimage, + /// The HTLC was failed because it had an invalid trampoline payload. + InvalidTrampolineHop, + /// A payment was rejected because it did not include the correct payment secret from an + /// invoice. + PaymentSecretRequired, + /// The HTLC was failed because its expiry is too close to the current block height, and we + /// expect that it will immediately be failed back by our downstream peer. + OutgoingCLTVTooSoon, + /// The HTLC was pending on a channel which is now in the process of being closed. + /// It was not fully committed to, so can just be immediately failed back. + DroppedPending, + /// The HTLC was failed back because its channel is closed and it has timed out on chain. + ChannelClosed, + /// UnknownFaliureCode represents BOLT04 failure codes that we are not familiar with. We will + /// encounter this if: + /// - A peer sends us a new failure code that LDK has not yet been upgraded to understand. + /// - We read a deprecated failure code from disk that LDK no longer uses. + /// + /// See + /// for latesst defined error codes. + UnknownFailureCode { + /// The bolt 04 failure code. + code: u16, + }, +} + +impl LocalHTLCFailureReason { + pub(super) fn failure_code(&self) -> u16 { + match self { + Self::TemporaryNodeFailure | Self::ForwardExpiryBuffer => NODE | 2, + Self::PermanentNodeFailure => PERM | NODE | 2, + Self::RequiredNodeFeature | Self::PaymentSecretRequired => PERM | NODE | 3, + Self::InvalidOnionVersion => BADONION | PERM | 4, + Self::InvalidOnionHMAC => BADONION | PERM | 5, + Self::InvalidOnionKey => BADONION | PERM | 6, + Self::TemporaryChannelFailure + | Self::DustLimitHolder + | Self::DustLimitCounterparty + | Self::FeeSpikeBuffer + | Self::ChannelNotReady => UPDATE | 7, + Self::PermanentChannelFailure | Self::ChannelClosed | Self::DroppedPending => PERM | 8, + Self::RequiredChannelFeature => PERM | 9, + Self::UnknownNextPeer + | Self::PrivateChannelForward + | Self::RealSCIDForward + | Self::InvalidTrampolineForward => PERM | 10, + Self::AmountBelowMinimum => UPDATE | 11, + Self::FeeInsufficient => UPDATE | 12, + Self::IncorrectCLTVExpiry => UPDATE | 13, + Self::CLTVExpiryTooSoon | Self::OutgoingCLTVTooSoon => UPDATE | 14, + Self::IncorrectPaymentDetails | Self::PaymentClaimBuffer => PERM | 15, + Self::FinalIncorrectCLTVExpiry => 18, + Self::FinalIncorrectHTLCAmount => 19, + Self::ChannelDisabled => UPDATE | 20, + Self::CLTVExpiryTooFar => 21, + Self::InvalidOnionPayload + | Self::InvalidTrampolineHop + | Self::InvalidKeysendPreimage => PERM | 22, + Self::MPPTimeout => 23, + Self::InvalidOnionBlinding => BADONION | PERM | 24, + Self::UnknownFailureCode { code } => *code, + } + } + + pub(super) fn is_temporary(&self) -> bool { + self.failure_code() & UPDATE == UPDATE + } + + #[cfg(test)] + pub(super) fn is_permanent(&self) -> bool { + self.failure_code() & PERM == PERM + } +} + +impl Into for u16 { + fn into(self) -> LocalHTLCFailureReason { + if self == (NODE | 2) { + LocalHTLCFailureReason::TemporaryNodeFailure + } else if self == (PERM | NODE | 2) { + LocalHTLCFailureReason::PermanentNodeFailure + } else if self == (PERM | NODE | 3) { + LocalHTLCFailureReason::RequiredNodeFeature + } else if self == (BADONION | PERM | 4) { + LocalHTLCFailureReason::InvalidOnionVersion + } else if self == (BADONION | PERM | 5) { + LocalHTLCFailureReason::InvalidOnionHMAC + } else if self == (BADONION | PERM | 6) { + LocalHTLCFailureReason::InvalidOnionKey + } else if self == (UPDATE | 7) { + LocalHTLCFailureReason::TemporaryChannelFailure + } else if self == (PERM | 8) { + LocalHTLCFailureReason::PermanentChannelFailure + } else if self == (PERM | 9) { + LocalHTLCFailureReason::RequiredChannelFeature + } else if self == (PERM | 10) { + LocalHTLCFailureReason::UnknownNextPeer + } else if self == (UPDATE | 11) { + LocalHTLCFailureReason::AmountBelowMinimum + } else if self == (UPDATE | 12) { + LocalHTLCFailureReason::FeeInsufficient + } else if self == (UPDATE | 13) { + LocalHTLCFailureReason::IncorrectCLTVExpiry + } else if self == (UPDATE | 14) { + LocalHTLCFailureReason::CLTVExpiryTooSoon + } else if self == (PERM | 15) { + LocalHTLCFailureReason::IncorrectPaymentDetails + } else if self == 18 { + LocalHTLCFailureReason::FinalIncorrectCLTVExpiry + } else if self == 19 { + LocalHTLCFailureReason::FinalIncorrectHTLCAmount + } else if self == (UPDATE | 20) { + LocalHTLCFailureReason::ChannelDisabled + } else if self == 21 { + LocalHTLCFailureReason::CLTVExpiryTooFar + } else if self == (PERM | 22) { + LocalHTLCFailureReason::InvalidOnionPayload + } else if self == 23 { + LocalHTLCFailureReason::MPPTimeout + } else if self == (BADONION | PERM | 24) { + LocalHTLCFailureReason::InvalidOnionBlinding + } else { + LocalHTLCFailureReason::UnknownFailureCode { code: self } + } + } +} + #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug #[cfg_attr(test, derive(PartialEq))] pub(super) struct HTLCFailReason(HTLCFailReasonRepr); @@ -1411,50 +1648,78 @@ impl_writeable_tlv_based_enum!(HTLCFailReasonRepr, ); impl HTLCFailReason { - #[rustfmt::skip] - pub(super) fn reason(failure_code: u16, data: Vec) -> Self { - const BADONION: u16 = 0x8000; - const PERM: u16 = 0x4000; - const NODE: u16 = 0x2000; - const UPDATE: u16 = 0x1000; - - if failure_code == 2 | NODE { debug_assert!(data.is_empty()) } - else if failure_code == 2 | PERM | NODE { debug_assert!(data.is_empty()) } - else if failure_code == 3 | PERM | NODE { debug_assert!(data.is_empty()) } - else if failure_code == 4 | BADONION | PERM { debug_assert_eq!(data.len(), 32) } - else if failure_code == 5 | BADONION | PERM { debug_assert_eq!(data.len(), 32) } - else if failure_code == 6 | BADONION | PERM { debug_assert_eq!(data.len(), 32) } - else if failure_code == 7 | UPDATE { - debug_assert_eq!(data.len() - 2, u16::from_be_bytes(data[0..2].try_into().unwrap()) as usize) } - else if failure_code == 8 | PERM { debug_assert!(data.is_empty()) } - else if failure_code == 9 | PERM { debug_assert!(data.is_empty()) } - else if failure_code == 10 | PERM { debug_assert!(data.is_empty()) } - else if failure_code == 11 | UPDATE { - debug_assert_eq!(data.len() - 2 - 8, u16::from_be_bytes(data[8..10].try_into().unwrap()) as usize) } - else if failure_code == 12 | UPDATE { - debug_assert_eq!(data.len() - 2 - 8, u16::from_be_bytes(data[8..10].try_into().unwrap()) as usize) } - else if failure_code == 13 | UPDATE { - debug_assert_eq!(data.len() - 2 - 4, u16::from_be_bytes(data[4..6].try_into().unwrap()) as usize) } - else if failure_code == 14 | UPDATE { - debug_assert_eq!(data.len() - 2, u16::from_be_bytes(data[0..2].try_into().unwrap()) as usize) } - else if failure_code == 15 | PERM { debug_assert_eq!(data.len(), 12) } - else if failure_code == 18 { debug_assert_eq!(data.len(), 4) } - else if failure_code == 19 { debug_assert_eq!(data.len(), 8) } - else if failure_code == 20 | UPDATE { - debug_assert_eq!(data.len() - 2 - 2, u16::from_be_bytes(data[2..4].try_into().unwrap()) as usize) } - else if failure_code == 21 { debug_assert!(data.is_empty()) } - else if failure_code == 22 | PERM { debug_assert!(data.len() <= 11) } - else if failure_code == 23 { debug_assert!(data.is_empty()) } - else if failure_code & BADONION != 0 { - // We set some bogus BADONION failure codes in test, so ignore unknown ones. + pub(super) fn reason(failure_reason: LocalHTLCFailureReason, data: Vec) -> Self { + match failure_reason { + LocalHTLCFailureReason::TemporaryNodeFailure + | LocalHTLCFailureReason::ForwardExpiryBuffer => debug_assert!(data.is_empty()), + LocalHTLCFailureReason::PermanentNodeFailure => debug_assert!(data.is_empty()), + LocalHTLCFailureReason::RequiredNodeFeature + | LocalHTLCFailureReason::PaymentSecretRequired => debug_assert!(data.is_empty()), + LocalHTLCFailureReason::InvalidOnionVersion => debug_assert_eq!(data.len(), 32), + LocalHTLCFailureReason::InvalidOnionHMAC => debug_assert_eq!(data.len(), 32), + LocalHTLCFailureReason::InvalidOnionKey => debug_assert_eq!(data.len(), 32), + LocalHTLCFailureReason::TemporaryChannelFailure + | LocalHTLCFailureReason::DustLimitHolder + | LocalHTLCFailureReason::DustLimitCounterparty + | LocalHTLCFailureReason::FeeSpikeBuffer + | LocalHTLCFailureReason::ChannelNotReady => { + debug_assert_eq!( + data.len() - 2, + u16::from_be_bytes(data[0..2].try_into().unwrap()) as usize + ) + }, + LocalHTLCFailureReason::PermanentChannelFailure + | LocalHTLCFailureReason::ChannelClosed + | LocalHTLCFailureReason::DroppedPending => debug_assert!(data.is_empty()), + LocalHTLCFailureReason::RequiredChannelFeature => debug_assert!(data.is_empty()), + LocalHTLCFailureReason::UnknownNextPeer + | LocalHTLCFailureReason::PrivateChannelForward + | LocalHTLCFailureReason::RealSCIDForward + | LocalHTLCFailureReason::InvalidTrampolineForward => debug_assert!(data.is_empty()), + LocalHTLCFailureReason::AmountBelowMinimum => debug_assert_eq!( + data.len() - 2 - 8, + u16::from_be_bytes(data[8..10].try_into().unwrap()) as usize + ), + LocalHTLCFailureReason::FeeInsufficient => debug_assert_eq!( + data.len() - 2 - 8, + u16::from_be_bytes(data[8..10].try_into().unwrap()) as usize + ), + LocalHTLCFailureReason::IncorrectCLTVExpiry => debug_assert_eq!( + data.len() - 2 - 4, + u16::from_be_bytes(data[4..6].try_into().unwrap()) as usize + ), + LocalHTLCFailureReason::CLTVExpiryTooSoon + | LocalHTLCFailureReason::OutgoingCLTVTooSoon => debug_assert_eq!( + data.len() - 2, + u16::from_be_bytes(data[0..2].try_into().unwrap()) as usize + ), + LocalHTLCFailureReason::IncorrectPaymentDetails + | LocalHTLCFailureReason::PaymentClaimBuffer => debug_assert_eq!(data.len(), 12), + LocalHTLCFailureReason::FinalIncorrectCLTVExpiry => debug_assert_eq!(data.len(), 4), + LocalHTLCFailureReason::FinalIncorrectHTLCAmount => debug_assert_eq!(data.len(), 8), + LocalHTLCFailureReason::ChannelDisabled => debug_assert_eq!( + data.len() - 2 - 2, + u16::from_be_bytes(data[2..4].try_into().unwrap()) as usize + ), + LocalHTLCFailureReason::CLTVExpiryTooFar => debug_assert!(data.is_empty()), + LocalHTLCFailureReason::InvalidOnionPayload + | LocalHTLCFailureReason::InvalidTrampolineHop + | LocalHTLCFailureReason::InvalidKeysendPreimage => debug_assert!(data.len() <= 11), + LocalHTLCFailureReason::MPPTimeout => debug_assert!(data.is_empty()), + LocalHTLCFailureReason::InvalidOnionBlinding => debug_assert_eq!(data.len(), 32), + LocalHTLCFailureReason::UnknownFailureCode { code } => { + // We set some bogus BADONION failure codes in tests, so allow unknown BADONION. + if code & BADONION == 0 { + debug_assert!(false, "Unknown failure code: {}", code) + } + }, } - else { debug_assert!(false, "Unknown failure code: {}", failure_code) } - Self(HTLCFailReasonRepr::Reason { failure_code, data }) + Self(HTLCFailReasonRepr::Reason { failure_code: failure_reason.failure_code(), data }) } - pub(super) fn from_failure_code(failure_code: u16) -> Self { - Self::reason(failure_code, Vec::new()) + pub(super) fn from_failure_code(failure_reason: LocalHTLCFailureReason) -> Self { + Self::reason(failure_reason, Vec::new()) } pub(super) fn from_msg(msg: &msgs::UpdateFailHTLC) -> Self { @@ -1473,15 +1738,23 @@ impl HTLCFailReason { ) -> msgs::OnionErrorPacket { match self.0 { HTLCFailReasonRepr::Reason { ref failure_code, ref data } => { + let failure_code = *failure_code; if let Some(secondary_shared_secret) = secondary_shared_secret { - let mut packet = - build_failure_packet(secondary_shared_secret, *failure_code, &data[..]); + let mut packet = build_failure_packet( + secondary_shared_secret, + failure_code.into(), + &data[..], + ); crypt_failure_packet(incoming_packet_shared_secret, &mut packet); packet } else { - build_failure_packet(incoming_packet_shared_secret, *failure_code, &data[..]) + build_failure_packet( + incoming_packet_shared_secret, + failure_code.into(), + &data[..], + ) } }, HTLCFailReasonRepr::LightningError { ref err } => { @@ -1672,14 +1945,14 @@ impl Hop { #[derive(Debug)] pub(crate) enum OnionDecodeErr { /// The HMAC of the onion packet did not match the hop data. - Malformed { err_msg: &'static str, err_code: u16 }, + Malformed { err_msg: &'static str, reason: LocalHTLCFailureReason }, /// We failed to decode the onion payload. /// /// If the payload we failed to decode belonged to a Trampoline onion, following the successful /// decoding of the outer onion, the trampoline_shared_secret field should be set. Relay { err_msg: &'static str, - err_code: u16, + reason: LocalHTLCFailureReason, shared_secret: SharedSecret, trampoline_shared_secret: Option, }, @@ -1730,12 +2003,12 @@ where return Err(OnionDecodeErr::Malformed { err_msg: "Final Node OnionHopData provided for us as an intermediary node", - err_code: INVALID_ONION_BLINDING, + reason: LocalHTLCFailureReason::InvalidOnionBlinding, }); } Err(OnionDecodeErr::Relay { err_msg: "Final Node OnionHopData provided for us as an intermediary node", - err_code: 0x4000 | 22, + reason: LocalHTLCFailureReason::InvalidOnionPayload, shared_secret, trampoline_shared_secret: None, }) @@ -1830,12 +2103,12 @@ where Ok((_, None)) => Err(OnionDecodeErr::Malformed { err_msg: "Non-final Trampoline onion data provided to us as last hop", // todo: find more suitable error code - err_code: 0x4000 | 22, + reason: LocalHTLCFailureReason::InvalidTrampolineHop, }), Ok((_, Some(_))) => Err(OnionDecodeErr::Malformed { err_msg: "Final Trampoline onion data provided to us as intermediate hop", // todo: find more suitable error code - err_code: 0x4000 | 22, + reason: LocalHTLCFailureReason::InvalidTrampolineHop, }), Err(e) => Err(e), } @@ -1844,12 +2117,12 @@ where if blinding_point.is_some() { return Err(OnionDecodeErr::Malformed { err_msg: "Intermediate Node OnionHopData provided for us as a final node", - err_code: INVALID_ONION_BLINDING, + reason: LocalHTLCFailureReason::InvalidOnionBlinding, }); } Err(OnionDecodeErr::Relay { err_msg: "Intermediate Node OnionHopData provided for us as a final node", - err_code: 0x4000 | 22, + reason: LocalHTLCFailureReason::InvalidOnionPayload, shared_secret, trampoline_shared_secret: None, }) @@ -1978,7 +2251,7 @@ fn decode_next_hop, N: NextPacketBytes>( if !fixed_time_eq(&Hmac::from_engine(hmac).to_byte_array(), &hmac_bytes) { return Err(OnionDecodeErr::Malformed { err_msg: "HMAC Check failed", - err_code: 0x8000 | 0x4000 | 5, + reason: LocalHTLCFailureReason::InvalidOnionHMAC, }); } @@ -1986,19 +2259,19 @@ fn decode_next_hop, N: NextPacketBytes>( let mut chacha_stream = ChaChaReader { chacha: &mut chacha, read: Cursor::new(&hop_data[..]) }; match R::read(&mut chacha_stream, read_args) { Err(err) => { - let error_code = match err { + let reason = match err { // Unknown realm byte - msgs::DecodeError::UnknownVersion => 0x8000 | 0x4000 | 1, + msgs::DecodeError::UnknownVersion => LocalHTLCFailureReason::InvalidOnionVersion, // invalid_onion_payload msgs::DecodeError::UnknownRequiredFeature | msgs::DecodeError::InvalidValue - | msgs::DecodeError::ShortRead => 0x4000 | 22, + | msgs::DecodeError::ShortRead => LocalHTLCFailureReason::InvalidOnionPayload, // Should never happen - _ => 0x2000 | 2, + _ => LocalHTLCFailureReason::TemporaryNodeFailure, }; return Err(OnionDecodeErr::Relay { err_msg: "Unable to decode our hop data", - err_code: error_code, + reason, shared_secret: SharedSecret::from_bytes(shared_secret), trampoline_shared_secret: None, }); @@ -2008,7 +2281,7 @@ fn decode_next_hop, N: NextPacketBytes>( if let Err(_) = chacha_stream.read_exact(&mut hmac[..]) { return Err(OnionDecodeErr::Relay { err_msg: "Unable to decode our hop data", - err_code: 0x4000 | 22, + reason: LocalHTLCFailureReason::InvalidOnionPayload, shared_secret: SharedSecret::from_bytes(shared_secret), trampoline_shared_secret: None, }); @@ -2392,7 +2665,7 @@ mod tests { let onion_keys = build_test_onion_keys(); let mut onion_error = super::build_unencrypted_failure_packet( onion_keys[4].shared_secret.as_ref(), - 0x2002, + LocalHTLCFailureReason::TemporaryNodeFailure, &[0; 0], ); let hex = "4c2fc8bc08510334b6833ad9c3e79cd1b52ae59dfe5c2a4b23ead50f09f7ee0b0002200200fe0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; @@ -2563,7 +2836,7 @@ mod tests { { // Ensure error decryption works without the Trampoline hops having been hit. - let error_code = 0x2002; + let error_code = LocalHTLCFailureReason::TemporaryNodeFailure; let mut first_hop_error_packet = build_unencrypted_failure_packet( outer_onion_keys[0].shared_secret.as_ref(), error_code, @@ -2577,12 +2850,12 @@ mod tests { let decrypted_failure = process_onion_failure(&secp_ctx, &logger, &htlc_source, first_hop_error_packet); - assert_eq!(decrypted_failure.onion_error_code, Some(error_code)); + assert_eq!(decrypted_failure.onion_error_code, Some(error_code.failure_code())); }; { // Ensure error decryption works from the first Trampoline hop, but at the outer onion. - let error_code = 0x2003; + let error_code = 0x2003.into(); let mut trampoline_outer_hop_error_packet = build_unencrypted_failure_packet( outer_onion_keys[1].shared_secret.as_ref(), error_code, @@ -2605,12 +2878,12 @@ mod tests { &htlc_source, trampoline_outer_hop_error_packet, ); - assert_eq!(decrypted_failure.onion_error_code, Some(error_code)); + assert_eq!(decrypted_failure.onion_error_code, Some(error_code.failure_code())); }; { // Ensure error decryption works from the Trampoline inner onion. - let error_code = 0x2004; + let error_code = 0x2004.into(); let mut trampoline_inner_hop_error_packet = build_unencrypted_failure_packet( trampoline_onion_keys[0].shared_secret.as_ref(), error_code, @@ -2638,12 +2911,12 @@ mod tests { &htlc_source, trampoline_inner_hop_error_packet, ); - assert_eq!(decrypted_failure.onion_error_code, Some(error_code)); + assert_eq!(decrypted_failure.onion_error_code, Some(error_code.failure_code())); } { // Ensure error decryption works from a later hop in the Trampoline inner onion. - let error_code = 0x2005; + let error_code = 0x2005.into(); let mut trampoline_second_hop_error_packet = build_unencrypted_failure_packet( trampoline_onion_keys[1].shared_secret.as_ref(), error_code, @@ -2676,7 +2949,7 @@ mod tests { &htlc_source, trampoline_second_hop_error_packet, ); - assert_eq!(decrypted_failure.onion_error_code, Some(error_code)); + assert_eq!(decrypted_failure.onion_error_code, Some(error_code.failure_code())); } } } diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 5adc2d66b11..27693bb5cac 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -23,7 +23,7 @@ use crate::ln::types::ChannelId; use crate::types::payment::{PaymentHash, PaymentSecret, PaymentPreimage}; use crate::ln::chan_utils; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; -use crate::ln::onion_utils; +use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, ProbeSendFailure, Retry, RetryableSendFailure}; use crate::routing::gossip::{EffectiveCapacity, RoutingFees}; use crate::routing::router::{get_route, Path, PaymentParameters, Route, Router, RouteHint, RouteHintHop, RouteHop, RouteParameters}; @@ -344,7 +344,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { check_added_monitors!(nodes[1], 1); commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates_1_0.commitment_signed, false); - expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(23, &[][..])); + expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(LocalHTLCFailureReason::MPPTimeout, &[][..])); } else { // Pass half of the payment along the second path. let node_2_msgs = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); @@ -1952,7 +1952,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { let fail_conditions = PaymentFailedConditions::new() .blamed_scid(intercept_scid) .blamed_chan_closed(true) - .expected_htlc_error_data(0x4000 | 10, &[]); + .expected_htlc_error_data(LocalHTLCFailureReason::UnknownNextPeer, &[]); expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions); } else if test == InterceptTest::Forward { // Check that we'll fail as expected when sending to a channel that isn't in `ChannelReady` yet. @@ -2025,7 +2025,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false); - expect_payment_failed!(nodes[0], payment_hash, false, 0x2000 | 2, []); + expect_payment_failed!(nodes[0], payment_hash, false, LocalHTLCFailureReason::TemporaryNodeFailure, []); // Check for unknown intercept id error. let (_, channel_id) = open_zero_conf_channel(&nodes[1], &nodes[2], None); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index c482d97ea8b..dfa0e8817ed 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -14,6 +14,7 @@ use crate::chain::ChannelMonitorUpdateStatus; use crate::events::{ClosureReason, Event, HTLCDestination}; use crate::ln::channelmanager::{MIN_CLTV_EXPIRY_DELTA, PaymentId, RecipientOnionFields}; +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::routing::gossip::RoutingFees; use crate::routing::router::{PaymentParameters, RouteHint, RouteHintHop}; use crate::types::features::ChannelTypeFeatures; @@ -456,7 +457,7 @@ fn test_inbound_scid_privacy() { expect_payment_failed_conditions(&nodes[0], payment_hash_2, false, PaymentFailedConditions::new().blamed_scid(last_hop[0].short_channel_id.unwrap()) - .blamed_chan_closed(true).expected_htlc_error_data(0x4000|10, &[0; 0])); + .blamed_chan_closed(true).expected_htlc_error_data(LocalHTLCFailureReason::UnknownNextPeer, &[0; 0])); } #[test] @@ -513,7 +514,7 @@ fn test_scid_alias_returned() { let err_data = 0u16.to_be_bytes(); expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().blamed_scid(last_hop[0].inbound_scid_alias.unwrap()) - .blamed_chan_closed(false).expected_htlc_error_data(0x1000|7, &err_data)); + .blamed_chan_closed(false).expected_htlc_error_data(LocalHTLCFailureReason::TemporaryChannelFailure, &err_data)); route.paths[0].hops[1].fee_msat = 10_000; // Reset to the correct payment amount route.paths[0].hops[0].fee_msat = 0; // But set fee paid to the middle hop to 0 @@ -542,7 +543,7 @@ fn test_scid_alias_returned() { err_data.extend_from_slice(&0u16.to_be_bytes()); expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().blamed_scid(last_hop[0].inbound_scid_alias.unwrap()) - .blamed_chan_closed(false).expected_htlc_error_data(0x1000|12, &err_data)); + .blamed_chan_closed(false).expected_htlc_error_data(LocalHTLCFailureReason::FeeInsufficient, &err_data)); } #[test] diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index e49bc4e83be..b14e2bf06a8 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -20,7 +20,7 @@ use crate::routing::router::{PaymentParameters, get_route, RouteParameters}; use crate::ln::msgs; use crate::ln::types::ChannelId; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, ErrorAction, MessageSendEvent}; -use crate::ln::onion_utils::INVALID_ONION_BLINDING; +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::ln::script::ShutdownScript; use crate::util::test_utils; use crate::util::test_utils::OnGetShutdownScriptpubkey; @@ -484,7 +484,7 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { if blinded_recipient { expect_payment_failed_conditions(&nodes[0], our_payment_hash, false, - PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32])); + PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32])); } else { expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_2.0.contents.short_channel_id, true); } From 7bbb177b5982ee1ee8164c378d21ee647f881aa1 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Thu, 20 Mar 2025 13:46:45 -0400 Subject: [PATCH 4/7] ln: persist failure_reason with HTLCFailureReason ln: persist failure_reason with HTLCFailureReason --- lightning/src/ln/onion_utils.rs | 88 +++++++++++++++++++++++++-------- 1 file changed, 68 insertions(+), 20 deletions(-) diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index a336f2a55cb..5ac32b28034 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -22,7 +22,9 @@ use crate::types::features::{ChannelFeatures, NodeFeatures}; use crate::types::payment::{PaymentHash, PaymentPreimage}; use crate::util::errors::{self, APIError}; use crate::util::logger::Logger; -use crate::util::ser::{LengthCalculatingWriter, Readable, ReadableArgs, Writeable, Writer}; +use crate::util::ser::{ + LengthCalculatingWriter, Readable, ReadableArgs, RequiredWrapper, Writeable, Writer, +}; use bitcoin::hashes::cmp::fixed_time_eq; use bitcoin::hashes::hmac::{Hmac, HmacEngine}; @@ -1595,6 +1597,49 @@ impl Into for u16 { } } +impl_writeable_tlv_based_enum!(LocalHTLCFailureReason, + (0, TemporaryNodeFailure) => {}, + (1, PermanentNodeFailure) => {}, + (2, RequiredNodeFeature) => {}, + (3, InvalidOnionVersion) => {}, + (4, InvalidOnionHMAC) => {}, + (5, InvalidOnionKey) => {}, + (6, TemporaryChannelFailure) => {}, + (7, PermanentChannelFailure) => {}, + (8, RequiredChannelFeature) => {}, + (9, UnknownNextPeer) => {}, + (10, AmountBelowMinimum) => {}, + (11, FeeInsufficient) => {}, + (12, IncorrectCLTVExpiry) => {}, + (13, CLTVExpiryTooSoon) => {}, + (14, IncorrectPaymentDetails) => {}, + (15, FinalIncorrectCLTVExpiry) => {}, + (16, FinalIncorrectHTLCAmount) => {}, + (17, ChannelDisabled) => {}, + (18, CLTVExpiryTooFar) => {}, + (19, InvalidOnionPayload) => {}, + (20, MPPTimeout) => {}, + (21, InvalidOnionBlinding) => {}, + (22, InvalidTrampolineForward) => {}, + (23, PaymentClaimBuffer) => {}, + (24, DustLimitHolder) => {}, + (25, DustLimitCounterparty) => {}, + (26, FeeSpikeBuffer) => {}, + (27, DroppedPending) => {}, + (28, PrivateChannelForward) => {}, + (29, RealSCIDForward) => {}, + (30, ChannelNotReady) => {}, + (31, InvalidKeysendPreimage) => {}, + (32, InvalidTrampolineHop) => {}, + (33, PaymentSecretRequired) => {}, + (34, ForwardExpiryBuffer) => {}, + (35, OutgoingCLTVTooSoon) => {}, + (36, ChannelClosed) => {}, + (37, UnknownFailureCode) => { + (0, code, required), + } +); + #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug #[cfg_attr(test, derive(PartialEq))] pub(super) struct HTLCFailReason(HTLCFailReasonRepr); @@ -1603,14 +1648,14 @@ pub(super) struct HTLCFailReason(HTLCFailReasonRepr); #[cfg_attr(test, derive(PartialEq))] enum HTLCFailReasonRepr { LightningError { err: msgs::OnionErrorPacket }, - Reason { failure_code: u16, data: Vec }, + Reason { data: Vec, reason: LocalHTLCFailureReason }, } impl core::fmt::Debug for HTLCFailReason { fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { match self.0 { - HTLCFailReasonRepr::Reason { ref failure_code, .. } => { - write!(f, "HTLC error code {}", failure_code) + HTLCFailReasonRepr::Reason { ref reason, .. } => { + write!(f, "HTLC error code {}", reason.failure_code()) }, HTLCFailReasonRepr::LightningError { .. } => { write!(f, "pre-built LightningError") @@ -1642,8 +1687,19 @@ impl_writeable_tlv_based_enum!(HTLCFailReasonRepr, (_unused, err, (static_value, msgs::OnionErrorPacket { data: data.ok_or(DecodeError::InvalidValue)? })), }, (1, Reason) => { - (0, failure_code, required), + (0, _failure_code, (legacy, u16, + |r: &HTLCFailReasonRepr| Some(r.clone()) )), (2, data, required_vec), + // failure_code was required, and is replaced by reason so any time we do not have a + // reason available failure_code will be Some so we can require reason. + (4, reason, (default_value, + if let Some(code) = _failure_code { + let failure_reason: LocalHTLCFailureReason = code.into(); + RequiredWrapper::from(failure_reason) + } else { + reason + } + )), }, ); @@ -1715,7 +1771,7 @@ impl HTLCFailReason { }, } - Self(HTLCFailReasonRepr::Reason { failure_code: failure_reason.failure_code(), data }) + Self(HTLCFailReasonRepr::Reason { data, reason: failure_reason }) } pub(super) fn from_failure_code(failure_reason: LocalHTLCFailureReason) -> Self { @@ -1737,24 +1793,16 @@ impl HTLCFailReason { &self, incoming_packet_shared_secret: &[u8; 32], secondary_shared_secret: &Option<[u8; 32]>, ) -> msgs::OnionErrorPacket { match self.0 { - HTLCFailReasonRepr::Reason { ref failure_code, ref data } => { - let failure_code = *failure_code; + HTLCFailReasonRepr::Reason { ref data, ref reason } => { if let Some(secondary_shared_secret) = secondary_shared_secret { - let mut packet = build_failure_packet( - secondary_shared_secret, - failure_code.into(), - &data[..], - ); + let mut packet = + build_failure_packet(secondary_shared_secret, *reason, &data[..]); crypt_failure_packet(incoming_packet_shared_secret, &mut packet); packet } else { - build_failure_packet( - incoming_packet_shared_secret, - failure_code.into(), - &data[..], - ) + build_failure_packet(incoming_packet_shared_secret, *reason, &data[..]) } }, HTLCFailReasonRepr::LightningError { ref err } => { @@ -1778,7 +1826,7 @@ impl HTLCFailReason { process_onion_failure(secp_ctx, logger, &htlc_source, err.clone()) }, #[allow(unused)] - HTLCFailReasonRepr::Reason { ref failure_code, ref data, .. } => { + HTLCFailReasonRepr::Reason { ref data, ref reason } => { // we get a fail_malformed_htlc from the first hop // TODO: We'd like to generate a NetworkUpdate for temporary // failures here, but that would be insufficient as find_route @@ -1791,7 +1839,7 @@ impl HTLCFailReason { short_channel_id: Some(path.hops[0].short_channel_id), failed_within_blinded_path: false, #[cfg(any(test, feature = "_test_utils"))] - onion_error_code: Some(*failure_code), + onion_error_code: Some(reason.failure_code()), #[cfg(any(test, feature = "_test_utils"))] onion_error_data: Some(data.clone()), } From ae178c9417a6ad934602e549a6ff3f2fdd05a0e8 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Thu, 20 Mar 2025 13:52:48 -0400 Subject: [PATCH 5/7] ln/refactor: split up construct_pending_htlc_status to get error To be able to obtain the underlying error reason for the pending HTLC, break up the helper method into two parts. This also removes some unnecessary wrapping/unwrapping of messages in PendingHTLCStatus types. --- lightning/src/ln/async_payments_tests.rs | 2 +- lightning/src/ln/channelmanager.rs | 115 +++++++++-------------- 2 files changed, 48 insertions(+), 69 deletions(-) diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index 1d9c6fb84c7..cf663ee1989 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -194,7 +194,7 @@ fn invalid_keysend_payment_secret() { nodes[1].node.get_our_node_id(), &updates_1_0.update_fail_htlcs[0], ); - do_commitment_signed_dance(&nodes[0], &nodes[1], &updates_1_0.commitment_signed, false, false); + commitment_signed_dance!(&nodes[0], &nodes[1], &updates_1_0.commitment_signed, false, false); expect_payment_failed_conditions( &nodes[0], payment_hash, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 61129cd91af..7d3749b7dd7 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -4484,85 +4484,67 @@ where }) } - fn construct_pending_htlc_status<'a>( - &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, shared_secret: [u8; 32], - decoded_hop: onion_utils::Hop, allow_underpay: bool, - next_packet_pubkey_opt: Option>, - ) -> PendingHTLCStatus { - macro_rules! return_err { - ($msg: expr, $reason: expr, $data: expr) => { - { - let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash)); - log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg); - if msg.blinding_point.is_some() { - return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed( - msgs::UpdateFailMalformedHTLC { - channel_id: msg.channel_id, - htlc_id: msg.htlc_id, - sha256_of_onion: [0; 32], - failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), - } - )) + fn construct_pending_htlc_fail_msg<'a>(&self, msg: &msgs::UpdateAddHTLC, + counterparty_node_id: &PublicKey, shared_secret: [u8; 32], inbound_err: InboundHTLCErr) -> HTLCFailureMsg { + let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash)); + log_info!(logger, "Failed to accept/forward incoming HTLC: {}", inbound_err.msg); + + if msg.blinding_point.is_some() { + return HTLCFailureMsg::Malformed( + msgs::UpdateFailMalformedHTLC { + channel_id: msg.channel_id, + htlc_id: msg.htlc_id, + sha256_of_onion: [0; 32], + failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), } - let failure = HTLCFailReason::reason($reason, $data.to_vec()) - .get_encrypted_failure_packet(&shared_secret, &None); - return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { - channel_id: msg.channel_id, - htlc_id: msg.htlc_id, - reason: failure.data, - })); - } - } + ) } + + let failure = HTLCFailReason::reason(inbound_err.reason, inbound_err.err_data.to_vec()) + .get_encrypted_failure_packet(&shared_secret, &None); + return HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { + channel_id: msg.channel_id, + htlc_id: msg.htlc_id, + reason: failure.data, + }); + } + + fn get_pending_htlc_info<'a>( + &self, msg: &msgs::UpdateAddHTLC, shared_secret: [u8; 32], + decoded_hop: onion_utils::Hop, allow_underpay: bool, + next_packet_pubkey_opt: Option>, + ) -> Result { match decoded_hop { onion_utils::Hop::Receive { .. } | onion_utils::Hop::BlindedReceive { .. } => { // OUR PAYMENT! + // Note that we could obviously respond immediately with an update_fulfill_htlc + // message, however that would leak that we are the recipient of this payment, so + // instead we stay symmetric with the forwarding case, only responding (after a + // delay) once they've send us a commitment_signed! let current_height: u32 = self.best_block.read().unwrap().height; - match create_recv_pending_htlc_info(decoded_hop, shared_secret, msg.payment_hash, + create_recv_pending_htlc_info(decoded_hop, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat, current_height) - { - Ok(info) => { - // Note that we could obviously respond immediately with an update_fulfill_htlc - // message, however that would leak that we are the recipient of this payment, so - // instead we stay symmetric with the forwarding case, only responding (after a - // delay) once they've sent us a commitment_signed! - PendingHTLCStatus::Forward(info) - }, - Err(InboundHTLCErr { reason, err_data, msg }) => return_err!(msg, reason, &err_data) - } }, #[cfg(trampoline)] onion_utils::Hop::TrampolineReceive { .. } | onion_utils::Hop::TrampolineBlindedReceive { .. } => { // OUR PAYMENT! + // Note that we could obviously respond immediately with an update_fulfill_htlc + // message, however that would leak that we are the recipient of this payment, so + // instead we stay symmetric with the forwarding case, only responding (after a + // delay) once they've send us a commitment_signed! let current_height: u32 = self.best_block.read().unwrap().height; - match create_recv_pending_htlc_info(decoded_hop, shared_secret, msg.payment_hash, + create_recv_pending_htlc_info(decoded_hop, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat, current_height) - { - Ok(info) => { - // Note that we could obviously respond immediately with an update_fulfill_htlc - // message, however that would leak that we are the recipient of this payment, so - // instead we stay symmetric with the forwarding case, only responding (after a - // delay) once they've sent us a commitment_signed! - PendingHTLCStatus::Forward(info) - }, - Err(InboundHTLCErr { reason, err_data, msg }) => return_err!(msg, reason , &err_data) - } }, onion_utils::Hop::Forward { .. } | onion_utils::Hop::BlindedForward { .. } => { - match create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt) { - Ok(info) => PendingHTLCStatus::Forward(info), - Err(InboundHTLCErr { reason, err_data, msg }) => return_err!(msg, reason, &err_data) - } + create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt) }, #[cfg(trampoline)] onion_utils::Hop::TrampolineForward { .. } | onion_utils::Hop::TrampolineBlindedForward { .. } => { - match create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt) { - Ok(info) => PendingHTLCStatus::Forward(info), - Err(InboundHTLCErr { reason, err_data, msg }) => return_err!(msg, reason, &err_data) - } - } + create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt) + }, } } @@ -5863,16 +5845,14 @@ where } } - match self.construct_pending_htlc_status( - &update_add_htlc, &incoming_counterparty_node_id, shared_secret, next_hop, - incoming_accept_underpaying_htlcs, next_packet_details_opt.map(|d| d.next_packet_pubkey), + match self.get_pending_htlc_info( + &update_add_htlc, shared_secret, next_hop, incoming_accept_underpaying_htlcs, + next_packet_details_opt.map(|d| d.next_packet_pubkey), ) { - PendingHTLCStatus::Forward(htlc_forward) => { - htlc_forwards.push((htlc_forward, update_add_htlc.htlc_id)); - }, - PendingHTLCStatus::Fail(htlc_fail) => { + Ok(info) => htlc_forwards.push((info, update_add_htlc.htlc_id)), + Err(inbound_err) => { let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); - htlc_fails.push((htlc_fail, htlc_destination)); + htlc_fails.push((self.construct_pending_htlc_fail_msg(&update_add_htlc, &incoming_counterparty_node_id, shared_secret, inbound_err), htlc_destination)); }, } } @@ -11825,7 +11805,6 @@ where payment.htlcs.retain(|htlc| { // If height is approaching the number of blocks we think it takes us to get // our commitment transaction confirmed before the HTLC expires, plus the - // number of blocks we generally consider it to take to do a commitment update, // just give up on it and fail the HTLC. if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); From e7ce43ec170af0d5bc8731945dcd92642f851f45 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Thu, 20 Mar 2025 09:50:36 -0400 Subject: [PATCH 6/7] ln/refactor: pass failure reason into do_commitment_signed_dance In upcoming commits, we'll make stronger assertions about the type of failure that we're getting when we set fail_backwards. This will be different depending on the type of route we're using - blinded routes fail malformed onion blinded and regular routes just see a downstream failure. This refactor prepares for adding those assertions by threading a failure reason through. The commitment_signed_dance macro is not updated because it already has an invocation with 5 parameters. --- lightning/src/ln/async_payments_tests.rs | 10 +++- lightning/src/ln/blinded_payment_tests.rs | 56 +++++++++---------- lightning/src/ln/chanmon_update_fail_tests.rs | 6 +- lightning/src/ln/functional_test_utils.rs | 27 +++++++-- lightning/src/ln/offers_tests.rs | 2 +- lightning/src/ln/payment_tests.rs | 6 +- 6 files changed, 64 insertions(+), 43 deletions(-) diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index cf663ee1989..f586b7aafe5 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -186,7 +186,13 @@ fn invalid_keysend_payment_secret() { nodes[1] .node .handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed); - do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, true, false); + do_commitment_signed_dance( + &nodes[1], + &nodes[2], + &updates_2_1.commitment_signed, + Some(FailureType::Blinded), + false, + ); let updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert_eq!(updates_1_0.update_fail_htlcs.len(), 1); @@ -194,7 +200,7 @@ fn invalid_keysend_payment_secret() { nodes[1].node.get_our_node_id(), &updates_1_0.update_fail_htlcs[0], ); - commitment_signed_dance!(&nodes[0], &nodes[1], &updates_1_0.commitment_signed, false, false); + do_commitment_signed_dance(&nodes[0], &nodes[1], &updates_1_0.commitment_signed, None, false); expect_payment_failed_conditions( &nodes[0], payment_hash, diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index f3148abadbc..82fc4e49e62 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -128,7 +128,7 @@ pub fn fail_blinded_htlc_backwards( nodes[i-1].node.handle_update_fail_htlc( nodes[i].node.get_our_node_id(), &unblinded_node_updates.update_fail_htlcs[i-1] ); - do_commitment_signed_dance(&nodes[i-1], &nodes[i], &unblinded_node_updates.commitment_signed, false, false); + do_commitment_signed_dance(&nodes[i-1], &nodes[i], &unblinded_node_updates.commitment_signed, None, false); }, _ => { let blinded_node_updates = get_htlc_update_msgs!(nodes[i], nodes[i-1].node.get_our_node_id()); @@ -137,7 +137,7 @@ pub fn fail_blinded_htlc_backwards( assert_eq!(update_malformed.sha256_of_onion, [0; 32]); assert_eq!(update_malformed.failure_code, LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()); nodes[i-1].node.handle_update_fail_malformed_htlc(nodes[i].node.get_our_node_id(), update_malformed); - do_commitment_signed_dance(&nodes[i-1], &nodes[i], &blinded_node_updates.commitment_signed, true, false); + do_commitment_signed_dance(&nodes[i-1], &nodes[i], &blinded_node_updates.commitment_signed, Some(FailureType::Blinded), false); } } } @@ -414,7 +414,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); check_added_monitors!(nodes[1], 0); - do_commitment_signed_dance(&nodes[1], &nodes[0], &updates_0_1.commitment_signed, true, true); + do_commitment_signed_dance(&nodes[1], &nodes[0], &updates_0_1.commitment_signed, Some(FailureType::Blinded), true); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); @@ -422,7 +422,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { if intro_fails { let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); - do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); + do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, None, false); let failed_destination = match check { ForwardCheckFail::InboundOnionCheck => HTLCDestination::InvalidOnion, ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCDestination::InvalidOnion, @@ -452,7 +452,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add); check_added_monitors!(nodes[2], 0); - do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true); + do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, Some(FailureType::Blinded), true); expect_pending_htlcs_forwardable!(nodes[2]); let failed_destination = match check { @@ -474,11 +474,11 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { update_malformed.sha256_of_onion = [1; 32]; update_malformed.failure_code = LocalHTLCFailureReason::InvalidOnionBlinding.failure_code() ^ 1; nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed); - do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, true, false); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, Some(FailureType::Blinded), false); let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); - do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); + do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, None, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32])); } @@ -510,7 +510,7 @@ fn failed_backwards_to_intro_node() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); - do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); + do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, None, false); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(&nodes[1], 1); @@ -523,7 +523,7 @@ fn failed_backwards_to_intro_node() { payment_event.msgs[0].onion_routing_packet.hop_data[0] ^= 1; nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[2], 0); - do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, true, true); + do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, Some(FailureType::Blinded), true); expect_pending_htlcs_forwardable!(nodes[2]); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); @@ -539,11 +539,11 @@ fn failed_backwards_to_intro_node() { // converts it to the correct error. update_malformed.sha256_of_onion = [1; 32]; nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed); - do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, true, false); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, Some(FailureType::Blinded), false); let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); - do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); + do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, None, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32])); } @@ -594,7 +594,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); - do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false); + do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, None, false); macro_rules! cause_error { ($prev_node: expr, $curr_node: expr, $next_node: expr, $failed_chan_id: expr, $failed_scid: expr) => { @@ -646,7 +646,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, let mut update_add = &mut updates_1_2.update_add_htlcs[0]; nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add); check_added_monitors!(nodes[2], 0); - do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true); + do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, Some(FailureType::Blinded), true); cause_error!(nodes[1], nodes[2], nodes[3], chan_id_2_3, chan_upd_2_3.short_channel_id); check_added_monitors!(nodes[2], 1); @@ -660,11 +660,11 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, update_malformed.sha256_of_onion = [1; 32]; update_malformed.failure_code = LocalHTLCFailureReason::InvalidOnionBlinding.failure_code() ^ 1; nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed); - do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, true, false); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, Some(FailureType::Blinded), false); let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); - do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); + do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, None, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32])); } @@ -934,7 +934,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { }; nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]); check_added_monitors!(nodes[1], 0); - do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, false, false); + do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, None, false); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(&nodes[1], 1); @@ -949,7 +949,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { ReceiveCheckFail::RecipientFail => { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); check_added_monitors!(nodes[2], 0); - do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); + do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, Some(FailureType::Blinded), true); expect_pending_htlcs_forwardable!(nodes[2]); check_payment_claimable( &nodes[2].node.get_and_clear_pending_events()[0], payment_hash, payment_secret, amt_msat, @@ -985,7 +985,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { ).unwrap(); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update_add); check_added_monitors!(nodes[2], 0); - do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); + do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, Some(FailureType::Blinded), true); expect_pending_htlcs_forwardable!(nodes[2]); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); check_added_monitors(&nodes[2], 1); @@ -995,7 +995,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { update_add.amount_msat -= 1; nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update_add); check_added_monitors!(nodes[2], 0); - do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); + do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, Some(FailureType::Blinded), true); expect_pending_htlcs_forwardable!(nodes[2]); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); check_added_monitors(&nodes[2], 1); @@ -1020,7 +1020,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { assert_eq!(payment_event_1_2.msgs[0].cltv_expiry, nodes[0].best_block_info().1 + 1 + excess_final_cltv_delta_opt.unwrap() as u32 + TEST_FINAL_CLTV); nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); check_added_monitors!(nodes[2], 0); - do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); + do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, Some(FailureType::Blinded), true); expect_pending_htlcs_forwardable!(nodes[2]); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash }]); @@ -1029,7 +1029,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { ReceiveCheckFail::PaymentConstraints => { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_1_2.msgs[0]); check_added_monitors!(nodes[2], 0); - do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, true, true); + do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event_1_2.commitment_msg, Some(FailureType::Blinded), true); expect_pending_htlcs_forwardable!(nodes[2]); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]); check_added_monitors(&nodes[2], 1); @@ -1042,7 +1042,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { assert_eq!(update_malformed.sha256_of_onion, [0; 32]); assert_eq!(update_malformed.failure_code, LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()); nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed); - do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, true, false); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, Some(FailureType::Blinded), false); let updates_1_0 = if check == ReceiveCheckFail::ChannelCheck { let events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -1060,7 +1060,7 @@ fn do_multi_hop_receiver_fail(check: ReceiveCheckFail) { } else { get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()) }; assert_eq!(updates_1_0.update_fail_htlcs.len(), 1); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates_1_0.update_fail_htlcs[0]); - do_commitment_signed_dance(&nodes[0], &nodes[1], &updates_1_0.commitment_signed, false, false); + do_commitment_signed_dance(&nodes[0], &nodes[1], &updates_1_0.commitment_signed, None, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32])); } @@ -1131,12 +1131,12 @@ fn blinded_path_retries() { assert_eq!(update_malformed.sha256_of_onion, [0; 32]); assert_eq!(update_malformed.failure_code, LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()); $intro_node.node.handle_update_fail_malformed_htlc(nodes[3].node.get_our_node_id(), update_malformed); - do_commitment_signed_dance(&$intro_node, &nodes[3], &updates.commitment_signed, true, false); + do_commitment_signed_dance(&$intro_node, &nodes[3], &updates.commitment_signed, Some(FailureType::Blinded), false); let updates = get_htlc_update_msgs!($intro_node, nodes[0].node.get_our_node_id()); assert_eq!(updates.update_fail_htlcs.len(), 1); nodes[0].node.handle_update_fail_htlc($intro_node.node.get_our_node_id(), &updates.update_fail_htlcs[0]); - do_commitment_signed_dance(&nodes[0], &$intro_node, &updates.commitment_signed, false, false); + do_commitment_signed_dance(&nodes[0], &$intro_node, &updates.commitment_signed, None, false); let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); @@ -1238,7 +1238,7 @@ fn min_htlc() { }; nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]); check_added_monitors!(nodes[1], 0); - do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, true, true); + do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, Some(FailureType::Blinded), true); expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), @@ -1247,7 +1247,7 @@ fn min_htlc() { check_added_monitors(&nodes[1], 1); let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); - do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); + do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, None, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32])); } @@ -1431,7 +1431,7 @@ fn fails_receive_tlvs_authentication() { let mut payment_event = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, true, true); + do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, Some(FailureType::Blinded), true); expect_pending_htlcs_forwardable!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index e99cf017b66..ea96f83bf4f 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -2955,7 +2955,7 @@ fn test_blocked_chan_preimage_release() { let cs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &cs_htlc_fulfill_updates.update_fulfill_htlcs[0]); - do_commitment_signed_dance(&nodes[1], &nodes[2], &cs_htlc_fulfill_updates.commitment_signed, false, false); + do_commitment_signed_dance(&nodes[1], &nodes[2], &cs_htlc_fulfill_updates.commitment_signed, None, false); check_added_monitors(&nodes[1], 0); // Now claim the second payment on nodes[0], which will ultimately result in nodes[1] trying to @@ -2998,7 +2998,7 @@ fn test_blocked_chan_preimage_release() { check_added_monitors(&nodes[1], 1); nodes[2].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_htlc_fulfill_updates.update_fulfill_htlcs[0]); - do_commitment_signed_dance(&nodes[2], &nodes[1], &bs_htlc_fulfill_updates.commitment_signed, false, false); + do_commitment_signed_dance(&nodes[2], &nodes[1], &bs_htlc_fulfill_updates.commitment_signed, None, false); expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true); } @@ -3153,7 +3153,7 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ check_added_monitors(&nodes[1], 1); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); - do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, false); + do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, None, false); expect_payment_forwarded!(nodes[1], &nodes[0], &nodes[2], Some(1_000), false, !with_latest_manager); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 0d3ce0432c7..484004a4cec 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2035,9 +2035,12 @@ macro_rules! expect_pending_htlcs_forwardable_from_events { #[macro_export] /// Performs the "commitment signed dance" - the series of message exchanges which occur after a /// commitment update. +/// +/// If `fail_backwards` is true, asserts that `node_b` failed a HTLC back to `node_a` in the +/// exchange. Use [`commitment_signed_dance`] if a different failure type is required. macro_rules! commitment_signed_dance { ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */) => { - $crate::ln::functional_test_utils::do_commitment_signed_dance(&$node_a, &$node_b, &$commitment_signed, $fail_backwards, true); + $crate::ln::functional_test_utils::do_commitment_signed_dance(&$node_a, &$node_b, &$commitment_signed, if $fail_backwards { Some(FailureType::Downstream) } else { None }, true); }; ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */, true /* return last RAA */) => { $crate::ln::functional_test_utils::do_main_commitment_signed_dance(&$node_a, &$node_b, $fail_backwards) @@ -2057,7 +2060,7 @@ macro_rules! commitment_signed_dance { assert!($crate::ln::functional_test_utils::commitment_signed_dance_through_cp_raa(&$node_a, &$node_b, $fail_backwards, $incl_claim).is_none()); }; ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => { - $crate::ln::functional_test_utils::do_commitment_signed_dance(&$node_a, &$node_b, &$commitment_signed, $fail_backwards, false); + $crate::ln::functional_test_utils::do_commitment_signed_dance(&$node_a, &$node_b, &$commitment_signed, if $fail_backwards { Some(FailureType::Downstream) } else { None }, false); } } @@ -2110,12 +2113,24 @@ pub fn do_main_commitment_signed_dance(node_a: &Node<'_, '_, '_>, node_b: &Node< (extra_msg_option, bs_revoke_and_ack) } +/// Describes the type of HTLC that's being failed backwards. +#[derive(Copy, Clone)] +pub enum FailureType { + /// Payment was failed within a blinded route. + Blinded, + /// Payment was failed by the downstream peer. + Downstream, +} + /// Runs a full commitment_signed dance, delivering a commitment_signed, the responding /// `revoke_and_ack` and `commitment_signed`, and then the final `revoke_and_ack` response. /// /// If `skip_last_step` is unset, also checks for the payment failure update for the previous hop /// on failure or that no new messages are left over on success. -pub fn do_commitment_signed_dance(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, commitment_signed: &msgs::CommitmentSigned, fail_backwards: bool, skip_last_step: bool) { +/// +/// `fail_backwards` should be Some if a HTLC is expected to be failed backwards by the commitment +/// update. +pub fn do_commitment_signed_dance(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, commitment_signed: &msgs::CommitmentSigned, fail_backwards: Option, skip_last_step: bool) { check_added_monitors!(node_a, 0); assert!(node_a.node.get_and_clear_pending_msg_events().is_empty()); node_a.node.handle_commitment_signed(node_b.node.get_our_node_id(), commitment_signed); @@ -2123,12 +2138,12 @@ pub fn do_commitment_signed_dance(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, ' // If this commitment signed dance was due to a claim, don't check for an RAA monitor update. let got_claim = node_a.node.test_raa_monitor_updates_held(node_b.node.get_our_node_id(), commitment_signed.channel_id); - if fail_backwards { assert!(!got_claim); } - commitment_signed_dance!(node_a, node_b, (), fail_backwards, true, false, got_claim); + if fail_backwards.is_some() { assert!(!got_claim); } + commitment_signed_dance!(node_a, node_b, (), fail_backwards.is_some(), true, false, got_claim); if skip_last_step { return; } - if fail_backwards { + if fail_backwards.is_some() { expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node_a, vec![crate::events::HTLCDestination::NextHopChannel{ node_id: Some(node_b.node.get_our_node_id()), channel_id: commitment_signed.channel_id }]); check_added_monitors!(node_a, 1); diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index e01f8d847b6..740bc6d2cfe 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -2314,7 +2314,7 @@ fn rejects_keysend_to_non_static_invoice_path() { do_pass_along_path(args); let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); - do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); + do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, None, false); expect_payment_failed_conditions(&nodes[0], payment_hash, true, PaymentFailedConditions::new()); } diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 27693bb5cac..37f5fd9b555 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -398,7 +398,7 @@ fn do_test_keysend_payments(public_node: bool) { check_added_monitors!(nodes[0], 1); let send_event = SendEvent::from_node(&nodes[0]); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); - do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); + do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, None, false); expect_pending_htlcs_forwardable!(nodes[1]); // Previously, a refactor caused us to stop including the payment preimage in the onion which // is sent as a part of keysend payments. Thus, to be extra careful here, we scope the preimage @@ -2097,7 +2097,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { // Forward the intercepted payments. for (idx, ev) in events.into_iter().enumerate() { nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &ev.msgs[0]); - do_commitment_signed_dance(&nodes[1], &nodes[0], &ev.commitment_msg, false, true); + commitment_signed_dance!(&nodes[1], &nodes[0], &ev.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[1]); let events = nodes[1].node.get_and_clear_pending_events(); @@ -2125,7 +2125,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { SendEvent::from_event(events.remove(0)) }; nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); - do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, false, true); + commitment_signed_dance!(&nodes[2], &nodes[1], &payment_event.commitment_msg, false, true); if idx == num_mpp_parts - 1 { expect_pending_htlcs_forwardable!(nodes[2]); } From bb45736a2cbdae6a5a59ce6bb03eff68a6d7c779 Mon Sep 17 00:00:00 2001 From: Carla Kirk-Cohen Date: Thu, 20 Mar 2025 14:04:37 -0400 Subject: [PATCH 7/7] ln+events: add HTLCDestinationFailure to NextHopChannel Surface the reason for HTLC failure for forwards. Additional information is unlikely to be useful for InvalidOnion and UnknownNextHop, and adding information to FailedPayment is left for future PRs to keep the scope of this PR down. --- lightning/src/events/mod.rs | 24 ++++ lightning/src/ln/blinded_payment_tests.rs | 28 ++++- lightning/src/ln/chanmon_update_fail_tests.rs | 27 +++- lightning/src/ln/channelmanager.rs | 57 ++++++--- lightning/src/ln/functional_test_utils.rs | 25 +++- lightning/src/ln/functional_tests.rs | 110 ++++++++++++---- lightning/src/ln/monitor_tests.rs | 13 +- lightning/src/ln/onion_route_tests.rs | 118 ++++++++++++------ lightning/src/ln/onion_utils.rs | 16 +++ lightning/src/ln/payment_tests.rs | 79 +++++++++--- lightning/src/ln/priv_short_conf_tests.rs | 27 ++-- lightning/src/ln/reload_tests.rs | 14 ++- lightning/src/ln/reorg_tests.rs | 7 +- lightning/src/ln/shutdown_tests.rs | 6 +- 14 files changed, 427 insertions(+), 124 deletions(-) diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index a0f26bfbac0..1a63758116f 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -23,6 +23,7 @@ use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, Paym use crate::chain::transaction; use crate::ln::channelmanager::{InterceptId, PaymentId, RecipientOnionFields}; use crate::ln::channel::FUNDING_CONF_DEADLINE_BLOCKS; +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::types::features::ChannelTypeFeatures; use crate::ln::msgs; use crate::ln::types::ChannelId; @@ -465,6 +466,25 @@ impl_writeable_tlv_based_enum_upgradable!(ClosureReason, }, ); +/// The reason for HTLC failures in [`HTLCDestination`]. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum HTLCDestinationFailure { + /// The forwarded HTLC was failed back by the downstream node with an encrypted error reason. + Downstream, + /// The HTLC was failed locally by our node. + Local{ + /// The reason that our node chose to fail the HTLC. + reason: LocalHTLCFailureReason + }, +} + +impl_writeable_tlv_based_enum!(HTLCDestinationFailure, + (0, Downstream) => {}, + (1, Local) => { + (0, reason, required), + }, +); + /// Intended destination of a failed HTLC as indicated in [`Event::HTLCHandlingFailed`]. #[derive(Clone, Debug, PartialEq, Eq)] pub enum HTLCDestination { @@ -477,6 +497,9 @@ pub enum HTLCDestination { node_id: Option, /// The outgoing `channel_id` between us and the next node. channel_id: ChannelId, + /// The reason that the HTLC forward was failed. For backwards compatibility, this field is + /// marked as optional, versions prior to 0.1.1 will set this value to None. + reason: Option }, /// Scenario where we are unsure of the next node to forward the HTLC to. UnknownNextHop { @@ -510,6 +533,7 @@ pub enum HTLCDestination { impl_writeable_tlv_based_enum_upgradable!(HTLCDestination, (0, NextHopChannel) => { (0, node_id, required), + (1, reason, option), (2, channel_id, required), }, (1, InvalidForward) => { diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index 82fc4e49e62..cc04eb61d8d 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -427,7 +427,11 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { ForwardCheckFail::InboundOnionCheck => HTLCDestination::InvalidOnion, ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCDestination::InvalidOnion, ForwardCheckFail::OutboundChannelCheck => - HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }, + HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_1_2.2, + reason: Some(LocalHTLCFailureReason::ChannelNotReady.into()), + }, }; expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), &[failed_destination.clone()] @@ -458,7 +462,11 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { let failed_destination = match check { ForwardCheckFail::InboundOnionCheck|ForwardCheckFail::ForwardPayloadEncodedAsReceive => HTLCDestination::InvalidOnion, ForwardCheckFail::OutboundChannelCheck => - HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }, + HTLCDestination::NextHopChannel { + node_id: Some(nodes[3].node.get_our_node_id()), + channel_id: chan_2_3.2, + reason: Some(LocalHTLCFailureReason::ChannelNotReady.into()), + }, }; expect_htlc_handling_failed_destinations!( nodes[2].node.get_and_clear_pending_events(), &[failed_destination.clone()] @@ -474,7 +482,7 @@ fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) { update_malformed.sha256_of_onion = [1; 32]; update_malformed.failure_code = LocalHTLCFailureReason::InvalidOnionBlinding.failure_code() ^ 1; nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed); - do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, Some(FailureType::Blinded), false); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, Some(FailureType::Custom(update_malformed.failure_code.into())), false); let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); @@ -605,7 +613,11 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, $curr_node.node.peer_disconnected($next_node.node.get_our_node_id()); expect_pending_htlcs_forwardable!($curr_node); expect_htlc_handling_failed_destinations!($curr_node.node.get_and_clear_pending_events(), - vec![HTLCDestination::NextHopChannel { node_id: Some($next_node.node.get_our_node_id()), channel_id: $failed_chan_id }]); + vec![HTLCDestination::NextHopChannel { + node_id: Some($next_node.node.get_our_node_id()), + channel_id: $failed_chan_id, + reason: Some(LocalHTLCFailureReason::ChannelNotReady.into()), + }]); }, ProcessPendingHTLCsCheck::FwdChannelClosed => { // Force close the next-hop channel so when we go to forward in process_pending_htlc_forwards, @@ -660,7 +672,7 @@ fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, update_malformed.sha256_of_onion = [1; 32]; update_malformed.failure_code = LocalHTLCFailureReason::InvalidOnionBlinding.failure_code() ^ 1; nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed); - do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, Some(FailureType::Blinded), false); + do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, Some(FailureType::Custom(update_malformed.failure_code.into())), false); let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); @@ -1242,7 +1254,11 @@ fn min_htlc() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1_2.2 }] + &[HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_1_2.2, + reason: Some(LocalHTLCFailureReason::AmountBelowMinimum.into()), + }] ); check_added_monitors(&nodes[1], 1); let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index ea96f83bf4f..249470fa25f 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -17,10 +17,11 @@ use bitcoin::hash_types::BlockHash; use bitcoin::network::Network; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor}; use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; -use crate::events::{Event, PaymentPurpose, ClosureReason, HTLCDestination}; +use crate::events::{Event, PaymentPurpose, ClosureReason, HTLCDestination, HTLCDestinationFailure}; use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; use crate::ln::channel::AnnouncementSigsState; use crate::ln::msgs; +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::ln::types::ChannelId; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, RoutingMessageHandler, MessageSendEvent}; use crate::util::test_channel_signer::TestChannelSigner; @@ -905,7 +906,11 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2.2, latest_update); check_added_monitors!(nodes[1], 0); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2, + reason: Some(HTLCDestinationFailure::Downstream{}), + }]); check_added_monitors!(nodes[1], 1); let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events(); @@ -1752,7 +1757,11 @@ fn test_monitor_update_on_pending_forwards() { commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2, + reason: Some(HTLCDestinationFailure::Downstream{}), + }]); check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); @@ -2159,7 +2168,11 @@ fn test_fail_htlc_on_broadcast_after_claim() { check_closed_broadcast!(nodes[1], true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_id_2, + reason: Some(LocalHTLCFailureReason::ChannelClosed.into()), + }]); nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); @@ -2549,7 +2562,11 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); reconnect_args.pending_htlc_fails.0 = 1; reconnect_nodes(reconnect_args); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_id_2, + reason: Some(HTLCDestinationFailure::Downstream{}), + }]); } else { let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); reconnect_args.pending_htlc_claims.0 = 1; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7d3749b7dd7..8f8668c9606 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3293,7 +3293,8 @@ macro_rules! handle_monitor_update_completion { } $self.finalize_claims(updates.finalized_claimed_htlcs); for failure in updates.failed_htlcs.drain(..) { - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), + channel_id, reason: Some(failure.2.clone().into()) }; $self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver); } } } @@ -3913,7 +3914,8 @@ where for htlc_source in failed_htlcs.drain(..) { let failure_reason = LocalHTLCFailureReason::DroppedPending; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id }; + let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), + channel_id: *channel_id, reason: Some(failure_reason.into()) }; self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } @@ -4037,7 +4039,8 @@ where let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let failure_reason = LocalHTLCFailureReason::DroppedPending; let reason = HTLCFailReason::from_failure_code(failure_reason); - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), + channel_id, reason: Some(failure_reason.into()) }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update { @@ -5751,13 +5754,14 @@ where let mut decode_update_add_htlcs = new_hash_map(); mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap()); - let get_failed_htlc_destination = |outgoing_scid_opt: Option, payment_hash: PaymentHash| { + let get_failed_htlc_destination = |outgoing_scid_opt: Option, payment_hash: PaymentHash, reason: LocalHTLCFailureReason| { if let Some(outgoing_scid) = outgoing_scid_opt { match self.short_to_chan_info.read().unwrap().get(&outgoing_scid) { Some((outgoing_counterparty_node_id, outgoing_channel_id)) => HTLCDestination::NextHopChannel { node_id: Some(*outgoing_counterparty_node_id), channel_id: *outgoing_channel_id, + reason: Some(reason.into()), }, None => HTLCDestination::UnknownNextHop { requested_forward_scid: outgoing_scid, @@ -5819,10 +5823,10 @@ where Some(Ok(_)) => {}, Some(Err((err, reason))) => { let htlc_fail = self.htlc_failure_from_update_add_err( - &update_add_htlc, &incoming_counterparty_node_id, err, reason, + &update_add_htlc, &incoming_counterparty_node_id, err, reason.clone(), is_intro_node_blinded_forward, &shared_secret, ); - let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); + let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash, reason); htlc_fails.push((htlc_fail, htlc_destination)); continue; }, @@ -5839,7 +5843,7 @@ where &update_add_htlc, &incoming_counterparty_node_id, err, reason, is_intro_node_blinded_forward, &shared_secret, ); - let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); + let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash, reason); htlc_fails.push((htlc_fail, htlc_destination)); continue; } @@ -5851,7 +5855,7 @@ where ) { Ok(info) => htlc_forwards.push((info, update_add_htlc.htlc_id)), Err(inbound_err) => { - let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash); + let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash, inbound_err.reason); htlc_fails.push((self.construct_pending_htlc_fail_msg(&update_add_htlc, &incoming_counterparty_node_id, shared_secret, inbound_err), htlc_destination)); }, } @@ -6127,7 +6131,11 @@ where let data = self.get_htlc_inbound_temp_fail_data(reason); failed_forwards.push((htlc_source, payment_hash, HTLCFailReason::reason(reason, data), - HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id } + HTLCDestination::NextHopChannel { + node_id: Some(chan.context.get_counterparty_node_id()), + channel_id: forward_chan_id, + reason: Some(reason.into()), + } )); } else { forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); @@ -6982,8 +6990,12 @@ where }; for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { - let reason = HTLCFailReason::reason(failure_reason, onion_failure_data.clone()); - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id }; + let reason = HTLCFailReason::reason(failure_reason.clone(), onion_failure_data.clone()); + let receiver = HTLCDestination::NextHopChannel { + node_id: Some(counterparty_node_id.clone()), + channel_id, + reason: Some(failure_reason.into()), + }; self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver); } } @@ -8768,8 +8780,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } for htlc_source in dropped_htlcs.drain(..) { - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id }; - let reason = HTLCFailReason::from_failure_code(LocalHTLCFailureReason::DroppedPending); + let failure_reason = LocalHTLCFailureReason::DroppedPending; + let reason = HTLCFailReason::from_failure_code(failure_reason); + let receiver = HTLCDestination::NextHopChannel { + node_id: Some(counterparty_node_id.clone()), + channel_id: msg.channel_id, + reason: Some(failure_reason.into()), + }; self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } if let Some(shutdown_res) = finish_shutdown { @@ -9595,7 +9612,11 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } else { log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash); let failure_reason = LocalHTLCFailureReason::ChannelClosed; - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCDestination::NextHopChannel { + node_id: Some(counterparty_node_id), + channel_id, + reason: Some(failure_reason.into()), + }; let reason = HTLCFailReason::from_failure_code(failure_reason); self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver); } @@ -11689,7 +11710,11 @@ where let reason = LocalHTLCFailureReason::CLTVExpiryTooSoon; let data = self.get_htlc_inbound_temp_fail_data(reason); timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(reason, data), - HTLCDestination::NextHopChannel { node_id: Some(funded_channel.context.get_counterparty_node_id()), channel_id: funded_channel.context.channel_id() })); + HTLCDestination::NextHopChannel { + node_id: Some(funded_channel.context.get_counterparty_node_id()), + channel_id: funded_channel.context.channel_id(), + reason: Some(reason.into()), + })); } let logger = WithChannelContext::from(&self.logger, &funded_channel.context, None); if let Some(channel_ready) = channel_ready_opt { @@ -14916,7 +14941,7 @@ where for htlc_source in failed_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let failure_reason = LocalHTLCFailureReason::DroppedPending; - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; + let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id, reason: Some(failure_reason.into()) }; let reason = HTLCFailReason::from_failure_code(failure_reason); channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 484004a4cec..00ebf5ef775 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -13,7 +13,7 @@ use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch, chainmonitor::Persist}; use crate::chain::channelmonitor::ChannelMonitor; use crate::chain::transaction::OutPoint; -use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCDestination, PathFailure, PaymentPurpose, PaymentFailureReason}; +use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCDestination, PathFailure, PaymentPurpose, PaymentFailureReason, HTLCDestinationFailure}; use crate::events::bump_transaction::{BumpTransactionEvent, BumpTransactionEventHandler, Wallet, WalletSource}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret}; @@ -2120,6 +2120,9 @@ pub enum FailureType { Blinded, /// Payment was failed by the downstream peer. Downstream, + /// Payment was failed with a custom failure coded, used when tests deliberately interfere + /// with message failure codes. + Custom(LocalHTLCFailureReason) } /// Runs a full commitment_signed dance, delivering a commitment_signed, the responding @@ -2143,9 +2146,19 @@ pub fn do_commitment_signed_dance(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, ' if skip_last_step { return; } - if fail_backwards.is_some() { + if let Some(fail_type) = fail_backwards { expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node_a, - vec![crate::events::HTLCDestination::NextHopChannel{ node_id: Some(node_b.node.get_our_node_id()), channel_id: commitment_signed.channel_id }]); + vec![crate::events::HTLCDestination::NextHopChannel{ + node_id: Some(node_b.node.get_our_node_id()), + channel_id: commitment_signed.channel_id, + // Blinded paths pass back malformed errors all the way to the introduction, + // unblinded failures will just receive the encrypted error from downstream. + reason: Some(match fail_type { + FailureType::Blinded => LocalHTLCFailureReason::InvalidOnionBlinding.into(), + FailureType::Downstream => HTLCDestinationFailure::Downstream{}, + FailureType::Custom(reason) => reason.into(), + }), + }]); check_added_monitors!(node_a, 1); let node_a_per_peer_state = node_a.node.per_peer_state.read().unwrap(); @@ -3209,7 +3222,11 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe node.node.handle_update_fail_htlc(prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0); commitment_signed_dance!(node, prev_node, next_msgs.as_ref().unwrap().1, update_next_node); if !update_next_node { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node, vec![HTLCDestination::NextHopChannel { node_id: Some(prev_node.node.get_our_node_id()), channel_id: next_msgs.as_ref().unwrap().0.channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node, vec![HTLCDestination::NextHopChannel { + node_id: Some(prev_node.node.get_our_node_id()), + channel_id: next_msgs.as_ref().unwrap().0.channel_id, + reason: Some(HTLCDestinationFailure::Downstream{}), + }]); } } let events = node.node.get_and_clear_pending_msg_events(); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index adc218c9dd4..4db8f91ea51 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -20,7 +20,7 @@ use crate::chain::transaction::OutPoint; use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider}; use crate::events::bump_transaction::WalletSource; -use crate::events::{Event, FundingInfo, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason}; +use crate::events::{Event, FundingInfo, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason, HTLCDestinationFailure}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentSecret, PaymentHash}; use crate::ln::channel::{get_holder_selected_channel_reserve_satoshis, Channel, InboundV1Channel, OutboundV1Channel, COINBASE_MATURITY, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT}; @@ -1211,7 +1211,11 @@ pub fn holding_cell_htlc_counting() { // We have to forward pending HTLCs twice - once tries to forward the payment forward (and // fails), the second will process the resulting failure and fail the HTLC backward. expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2, + reason: Some(LocalHTLCFailureReason::TemporaryChannelFailure.into()), + }]); check_added_monitors!(nodes[1], 1); let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -2337,7 +2341,8 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), - channel_id: chan_2.2 + channel_id: chan_2.2, + reason: Some(LocalHTLCFailureReason::ChannelClosed.into()), }]); check_added_monitors!(nodes[1], 1); let htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -2359,7 +2364,8 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), - channel_id: chan_2.2 + channel_id: chan_2.2, + reason: Some(LocalHTLCFailureReason::ChannelClosed.into()), }]); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); }, @@ -3475,7 +3481,11 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2, + reason: Some(LocalHTLCFailureReason::ChannelClosed.into()), + }]); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -3541,7 +3551,11 @@ pub fn test_simple_commitment_revoked_fail_backward() { check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2, + reason: Some(LocalHTLCFailureReason::ChannelClosed.into()), + }]); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -4811,7 +4825,11 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { connect_blocks(&nodes[1], 1); if forwarded_htlc { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2, + reason: Some(LocalHTLCFailureReason::CLTVExpiryTooSoon.into()), + }]); check_added_monitors!(nodes[1], 1); let fail_commit = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(fail_commit.len(), 1); @@ -5480,7 +5498,11 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { // Mine the HTLC timeout transaction on node B. mine_transaction(&nodes[1], &htlc_timeout_tx); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2, + reason: Some(LocalHTLCFailureReason::ChannelClosed.into()), + }]); let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -5682,12 +5704,12 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events let failed_destinations_3 = vec![ - HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, - HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, + HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2, reason: Some(HTLCDestinationFailure::Downstream{}) }, + HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2, reason: Some(HTLCDestinationFailure::Downstream{}) }, + HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2, reason: Some(HTLCDestinationFailure::Downstream{}) }, + HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2, reason: Some(HTLCDestinationFailure::Downstream{}) }, + HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2, reason: Some(HTLCDestinationFailure::Downstream{}) }, + HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2, reason: Some(HTLCDestinationFailure::Downstream{}) }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3); check_added_monitors!(nodes[3], 1); @@ -5740,13 +5762,25 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno if deliver_last_raa { expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true); - let expected_destinations: Vec = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect(); + let expected_destinations: Vec = repeat(HTLCDestination::NextHopChannel { + node_id: Some(nodes[3].node.get_our_node_id()), + channel_id: chan_2_3.2, + reason: Some(LocalHTLCFailureReason::ChannelClosed.into()) + }).take(3).collect(); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations); } else { let expected_destinations: Vec = if announce_latest { - repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect() + repeat(HTLCDestination::NextHopChannel { + node_id: Some(nodes[3].node.get_our_node_id()), + channel_id: chan_2_3.2, + reason: Some(LocalHTLCFailureReason::ChannelClosed.into()), + }).take(9).collect() } else { - repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect() + repeat(HTLCDestination::NextHopChannel { + node_id: Some(nodes[3].node.get_our_node_id()), + channel_id: chan_2_3.2, + reason: Some(LocalHTLCFailureReason::ChannelClosed.into()), + }).take(6).collect() }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations); @@ -7302,7 +7336,11 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2, + reason: Some(LocalHTLCFailureReason::InvalidOnionVersion.into()), + }]); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 1); @@ -7356,6 +7394,8 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCDestination::InvalidOnion]); check_added_monitors(&nodes[2], 1); + #[allow(unused_mut, unused_assignments)] + let mut reason: Option = None; let events_3 = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events_3.len(), 1); match events_3[0] { @@ -7363,6 +7403,7 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { let mut update_msg = updates.update_fail_malformed_htlcs[0].clone(); // Set the NODE bit (BADONION and PERM already set in invalid_onion_version error) update_msg.failure_code |= 0x2000; + reason = Some(update_msg.failure_code.into()); nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), &update_msg); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true); @@ -7370,9 +7411,11 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { _ => panic!("Unexpected event"), } - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCDestination::NextHopChannel { - node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2, + reason: Some(reason.unwrap().into()), + }]); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 1); check_added_monitors!(nodes[1], 1); @@ -10031,7 +10074,11 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // additional block built on top of the current chain. nodes[1].chain_monitor.chain_monitor.transactions_confirmed( &nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: channel_id, + reason: Some(LocalHTLCFailureReason::ChannelClosed.into()), + }]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -10053,8 +10100,11 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // When the HTLC times out on the A<->B edge, the B<->C channel will fail the HTLC back to // avoid the A<->B channel closing (even though it already has). This will generate a // spurious HTLCHandlingFailed event. - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCDestination::NextHopChannel { node_id: Some(node_c_id), channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { + node_id: Some(node_c_id), + channel_id, + reason: Some(LocalHTLCFailureReason::ChannelClosed.into()), + }]); } } @@ -10254,7 +10304,11 @@ pub fn test_inconsistent_mpp_params() { nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[3].node.get_our_node_id()), + channel_id: chan_2_3.2, + reason: Some(HTLCDestinationFailure::Downstream{}), + }]); check_added_monitors!(nodes[2], 1); let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); @@ -10735,7 +10789,11 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { let node_id_1 = nodes[1].node.get_our_node_id(); expect_htlc_handling_failed_destinations!( nodes[0].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(node_id_1), channel_id: chan_id_1 }] + &[HTLCDestination::NextHopChannel { + node_id: Some(node_id_1), + channel_id: chan_id_1, + reason: Some(LocalHTLCFailureReason::TemporaryChannelFailure.into()), + }] ); let fail = get_htlc_update_msgs(&nodes[0], &nodes[2].node.get_our_node_id()); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 0a42d0a8b99..d8b93c2fdfe 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -9,6 +9,7 @@ //! Further functional tests which test blockchain reorganizations. +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::sign::{ecdsa::EcdsaChannelSigner, OutputSpender, SignerProvider, SpendableOutputDescriptor}; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS,LATENCY_GRACE_PERIOD_BLOCKS, COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE, Balance, BalanceSource, ChannelMonitorUpdateStep}; use crate::chain::transaction::OutPoint; @@ -86,7 +87,11 @@ fn chanmon_fail_from_stale_commitment() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_id_2, + reason: Some(LocalHTLCFailureReason::ChannelClosed.into()), + }]); check_added_monitors!(nodes[1], 1); let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1417,7 +1422,11 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ .iter().map(|a| *a).collect(); events.retain(|ev| { match ev { - Event::HTLCHandlingFailed { failed_next_destination: HTLCDestination::NextHopChannel { node_id, channel_id }, .. } => { + Event::HTLCHandlingFailed { failed_next_destination: HTLCDestination::NextHopChannel { + node_id, + channel_id, + .. + }, .. } => { assert_eq!(*channel_id, chan_id); assert_eq!(*node_id, Some(nodes[1].node.get_our_node_id())); false diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index 274b08a7007..19829fcf427 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -190,7 +190,7 @@ fn run_onion_failure_test_with_fail_intercept( assert_eq!(*payment_failed_permanently, !expected_retryable); assert_eq!(error_code.is_none(), expected_error_reason.is_none()); if let Some(expected_reason) = expected_error_reason { - assert_eq!(expected_reason, error_code.unwrap().into()) + assert_eq!(expected_reason.failure_code(), error_code.unwrap()) } if expected_channel_update.is_some() { match network_update { @@ -313,7 +313,11 @@ fn test_fee_failures() { run_onion_failure_test("fee_insufficient", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; }, || {}, true, Some(LocalHTLCFailureReason::FeeInsufficient), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), - Some(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 })); + Some(HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: channels[1].2, + reason: Some(LocalHTLCFailureReason::FeeInsufficient.into()), + })); // In an earlier version, we spuriously failed to forward payments if the expected feerate // changed between the channel open and the payment. @@ -359,7 +363,14 @@ fn test_onion_failure() { // positive case send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 40000); - let next_hop_failure = HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channels[1].2 }; + macro_rules! next_hop_failure { + ($reason:expr) => { + HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: channels[1].2, + reason: Some($reason.into()), + } + };} // intermediate node failure let short_channel_id = channels[1].0.contents.short_channel_id; @@ -402,6 +413,7 @@ fn test_onion_failure() { // the following three with run_onion_failure_test_with_fail_intercept() test only the origin node // receiving simulated fail messages // intermediate node failure + let failure_reason = LocalHTLCFailureReason::TemporaryNodeFailure; run_onion_failure_test_with_fail_intercept("temporary_node_failure", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { // trigger error msg.amount_msat -= 1; @@ -409,64 +421,72 @@ fn test_onion_failure() { // and tamper returning error message let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::TemporaryNodeFailure, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), failure_reason, &[0;0]); msg.reason = failure.data; - }, ||{}, true, Some(LocalHTLCFailureReason::TemporaryNodeFailure), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: false}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); + }, ||{}, true, Some(failure_reason), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: false}), Some(route.paths[0].hops[0].short_channel_id), + Some(next_hop_failure!(LocalHTLCFailureReason::FeeInsufficient))); // real failure surfaced in event // final node failure + let failure_reason = LocalHTLCFailureReason::TemporaryNodeFailure; run_onion_failure_test_with_fail_intercept("temporary_node_failure", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { // and tamper returning error message let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), LocalHTLCFailureReason::TemporaryNodeFailure, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), failure_reason, &[0;0]); msg.reason = failure.data; }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, true, Some(LocalHTLCFailureReason::TemporaryNodeFailure), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: false}), Some(route.paths[0].hops[1].short_channel_id), None); + }, true, Some(failure_reason), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: false}), Some(route.paths[0].hops[1].short_channel_id), None); let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); // intermediate node failure + let failure_reason = LocalHTLCFailureReason::PermanentNodeFailure; run_onion_failure_test_with_fail_intercept("permanent_node_failure", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; }, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::PermanentNodeFailure, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), failure_reason, &[0;0]); msg.reason = failure.data; - }, ||{}, true, Some(LocalHTLCFailureReason::PermanentNodeFailure), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); + }, ||{}, true, Some(failure_reason), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), + Some(next_hop_failure!(LocalHTLCFailureReason::FeeInsufficient))); // real failure surfaced in event // final node failure + let failure_reason = LocalHTLCFailureReason::PermanentNodeFailure; run_onion_failure_test_with_fail_intercept("permanent_node_failure", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), LocalHTLCFailureReason::PermanentNodeFailure, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), failure_reason, &[0;0]); msg.reason = failure.data; }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, false, Some(LocalHTLCFailureReason::PermanentNodeFailure), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: true}), Some(route.paths[0].hops[1].short_channel_id), None); + }, false, Some(failure_reason), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: true}), Some(route.paths[0].hops[1].short_channel_id), None); let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); // intermediate node failure + let failure_reason = LocalHTLCFailureReason::RequiredNodeFeature; run_onion_failure_test_with_fail_intercept("required_node_feature_missing", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; }, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::RequiredNodeFeature, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), failure_reason, &[0;0]); msg.reason = failure.data; }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, true, Some(LocalHTLCFailureReason::RequiredNodeFeature), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); + }, true, Some(failure_reason), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), + Some(next_hop_failure!(LocalHTLCFailureReason::FeeInsufficient))); // real failure surfaced in event // final node failure + let failure_reason = LocalHTLCFailureReason::RequiredNodeFeature; run_onion_failure_test_with_fail_intercept("required_node_feature_missing", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), LocalHTLCFailureReason::RequiredNodeFeature, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), failure_reason, &[0;0]); msg.reason = failure.data; }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, false, Some(LocalHTLCFailureReason::RequiredNodeFeature), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: true}), Some(route.paths[0].hops[1].short_channel_id), None); + }, false, Some(failure_reason), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[1].pubkey, is_permanent: true}), Some(route.paths[0].hops[1].short_channel_id), None); let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); // Our immediate peer sent UpdateFailMalformedHTLC because it couldn't understand the onion in @@ -488,51 +508,57 @@ fn test_onion_failure() { err_data.extend_from_slice(&(chan_update.serialized_length() as u16 + 2).to_be_bytes()); err_data.extend_from_slice(&ChannelUpdate::TYPE.to_be_bytes()); err_data.extend_from_slice(&chan_update.encode()); + let failure_reason = LocalHTLCFailureReason::TemporaryChannelFailure; run_onion_failure_test_with_fail_intercept("temporary_channel_failure", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; }, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::TemporaryChannelFailure, &err_data); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), failure_reason, &err_data); msg.reason = failure.data; }, ||{}, true, Some(LocalHTLCFailureReason::TemporaryChannelFailure), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id), Some(next_hop_failure.clone())); + Some(short_channel_id), Some(next_hop_failure!(LocalHTLCFailureReason::FeeInsufficient))); // real failure surfaced in event // Check we can still handle onion failures that include channel updates without a type prefix let err_data_without_type = chan_update.encode_with_len(); + let failure_reason = LocalHTLCFailureReason::TemporaryChannelFailure; run_onion_failure_test_with_fail_intercept("temporary_channel_failure", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; }, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::TemporaryChannelFailure, &err_data_without_type); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), failure_reason, &err_data_without_type); msg.reason = failure.data; }, ||{}, true, Some(LocalHTLCFailureReason::TemporaryChannelFailure), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id), Some(next_hop_failure.clone())); + Some(short_channel_id), Some(next_hop_failure!(LocalHTLCFailureReason::FeeInsufficient))); // real failure surfaced in event let short_channel_id = channels[1].0.contents.short_channel_id; + let failure_reason = LocalHTLCFailureReason::PermanentChannelFailure; run_onion_failure_test_with_fail_intercept("permanent_channel_failure", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; }, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::PermanentChannelFailure, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), failure_reason, &[0;0]); msg.reason = failure.data; // short_channel_id from the processing node - }, ||{}, true, Some(LocalHTLCFailureReason::PermanentChannelFailure), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(next_hop_failure.clone())); + }, ||{}, true, Some(failure_reason), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), + Some(next_hop_failure!(LocalHTLCFailureReason::FeeInsufficient))); // real failure surfaced in event let short_channel_id = channels[1].0.contents.short_channel_id; + let failure_reason = LocalHTLCFailureReason::RequiredChannelFeature; run_onion_failure_test_with_fail_intercept("required_channel_feature_missing", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; }, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), LocalHTLCFailureReason::RequiredChannelFeature, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[0].shared_secret.as_ref(), failure_reason, &[0;0]); msg.reason = failure.data; // short_channel_id from the processing node - }, ||{}, true, Some(LocalHTLCFailureReason::RequiredChannelFeature), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), Some(next_hop_failure.clone())); + }, ||{}, true, Some(failure_reason), Some(NetworkUpdate::ChannelFailure{short_channel_id, is_permanent: true}), Some(short_channel_id), + Some(next_hop_failure!(LocalHTLCFailureReason::FeeInsufficient))); // real failure surfaced in event let mut bogus_route = route.clone(); bogus_route.paths[0].hops[1].short_channel_id -= 1; @@ -547,9 +573,10 @@ fn test_onion_failure() { let mut bogus_route = route.clone(); let route_len = bogus_route.paths[0].hops.len(); bogus_route.paths[0].hops[route_len-1].fee_msat = amt_to_forward; - run_onion_failure_test("amount_below_minimum", 100, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(LocalHTLCFailureReason::AmountBelowMinimum), + let failure_reason = LocalHTLCFailureReason::AmountBelowMinimum; + run_onion_failure_test("amount_below_minimum", 100, &nodes, &bogus_route, &payment_hash, &payment_secret, |_| {}, ||{}, true, Some(failure_reason), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id), Some(next_hop_failure.clone())); + Some(short_channel_id), Some(next_hop_failure!(failure_reason))); // Clear pending payments so that the following positive test has the correct payment hash. for node in nodes.iter() { @@ -564,25 +591,30 @@ fn test_onion_failure() { // We ignore channel update contents in onion errors, so will blame the 2nd channel even though // the first node is the one that messed up. let short_channel_id = channels[1].0.contents.short_channel_id; + let failure_reason = LocalHTLCFailureReason::FeeInsufficient; run_onion_failure_test("fee_insufficient", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { msg.amount_msat -= 1; - }, || {}, true, Some(LocalHTLCFailureReason::FeeInsufficient), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), Some(next_hop_failure.clone())); + }, || {}, true, Some(failure_reason), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), + Some(next_hop_failure!(failure_reason))); let short_channel_id = channels[1].0.contents.short_channel_id; + let failure_reason = LocalHTLCFailureReason::IncorrectCLTVExpiry; run_onion_failure_test("incorrect_cltv_expiry", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { // need to violate: cltv_expiry - cltv_expiry_delta >= outgoing_cltv_value msg.cltv_expiry -= 1; - }, || {}, true, Some(LocalHTLCFailureReason::IncorrectCLTVExpiry), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), Some(next_hop_failure.clone())); + }, || {}, true, Some(failure_reason), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false}), Some(short_channel_id), + Some(next_hop_failure!(failure_reason))); let short_channel_id = channels[1].0.contents.short_channel_id; + let failure_reason = LocalHTLCFailureReason::CLTVExpiryTooSoon; run_onion_failure_test("expiry_too_soon", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS + 1; connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], height - nodes[2].best_block_info().1); - }, ||{}, true, Some(LocalHTLCFailureReason::CLTVExpiryTooSoon), + }, ||{}, true, Some(failure_reason), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id), Some(next_hop_failure.clone())); + Some(short_channel_id), Some(next_hop_failure!(failure_reason))); run_onion_failure_test("unknown_payment_hash", 2, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { nodes[2].node.fail_htlc_backwards(&payment_hash); @@ -623,14 +655,17 @@ fn test_onion_failure() { } }, true, Some(LocalHTLCFailureReason::FinalIncorrectHTLCAmount), None, Some(channels[1].0.contents.short_channel_id), Some(HTLCDestination::FailedPayment { payment_hash })); + let failure = LocalHTLCFailureReason::ChannelNotReady; let short_channel_id = channels[1].0.contents.short_channel_id; run_onion_failure_test("channel_disabled", 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { // disconnect event to the channel between nodes[1] ~ nodes[2] nodes[1].node.peer_disconnected(nodes[2].node.get_our_node_id()); nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); - }, true, Some(LocalHTLCFailureReason::TemporaryChannelFailure), + }, true, Some(failure), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id), Some(next_hop_failure.clone())); + Some(short_channel_id), Some(next_hop_failure!(failure))); + + let failure = LocalHTLCFailureReason::ChannelDisabled; run_onion_failure_test("channel_disabled", 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || { // disconnect event to the channel between nodes[1] ~ nodes[2] for _ in 0..DISABLE_GOSSIP_TICKS + 1 { @@ -639,11 +674,12 @@ fn test_onion_failure() { } nodes[1].node.get_and_clear_pending_msg_events(); nodes[2].node.get_and_clear_pending_msg_events(); - }, true, Some(LocalHTLCFailureReason::ChannelDisabled), + }, true, Some(failure), Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent: false }), - Some(short_channel_id), Some(next_hop_failure.clone())); + Some(short_channel_id), Some(next_hop_failure!(failure))); reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2])); + let failure = LocalHTLCFailureReason::CLTVExpiryTooFar; run_onion_failure_test("expiry_too_far", 100, &nodes, &route, &payment_hash, &payment_secret, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let mut route = route.clone(); @@ -656,17 +692,19 @@ fn test_onion_failure() { let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); msg.cltv_expiry = htlc_cltv; msg.onion_routing_packet = onion_packet; - }, ||{}, true, Some(LocalHTLCFailureReason::CLTVExpiryTooFar), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), Some(next_hop_failure.clone())); + }, ||{}, true, Some(failure), Some(NetworkUpdate::NodeFailure{node_id: route.paths[0].hops[0].pubkey, is_permanent: true}), Some(route.paths[0].hops[0].short_channel_id), + Some(next_hop_failure!(failure))); + let failure_reason = LocalHTLCFailureReason::MPPTimeout; run_onion_failure_test_with_fail_intercept("mpp_timeout", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { // Tamper returning error message let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap(); - let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), LocalHTLCFailureReason::MPPTimeout, &[0;0]); + let failure = onion_utils::build_failure_packet(onion_keys[1].shared_secret.as_ref(), failure_reason, &[0;0]); msg.reason = failure.data; }, ||{ nodes[2].node.fail_htlc_backwards(&payment_hash); - }, true, Some(LocalHTLCFailureReason::MPPTimeout), None, None, None); + }, true, Some(failure_reason), None, None, None); run_onion_failure_test_with_fail_intercept("bogus err packet with valid hmac", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { @@ -719,7 +757,7 @@ fn test_onion_failure() { short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: false, }), - Some(channels[1].0.contents.short_channel_id), Some(next_hop_failure.clone())); + Some(channels[1].0.contents.short_channel_id), Some(next_hop_failure!(LocalHTLCFailureReason::FeeInsufficient))); run_onion_failure_test_with_fail_intercept("0-length channel update in final node UPDATE onion failure", 200, &nodes, &route, &payment_hash, &payment_secret, |_msg| {}, |msg| { let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); @@ -899,7 +937,11 @@ fn do_test_onion_failure_stale_channel_update(announce_for_forwarding: bool) { run_onion_failure_test( name, 100, &nodes, &route, &payment_hash, &payment_secret, |_| {}, || {}, true, Some(error_reason), Some(network_update), Some(short_channel_id), - Some(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_to_update.0 }), + Some(HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: channel_to_update.0, + reason: Some(error_reason.into()) + }), ); }; diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 5ac32b28034..88673d07c61 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -11,6 +11,7 @@ use super::msgs::OnionErrorPacket; use crate::blinded_path::BlindedHop; use crate::crypto::chacha20::ChaCha20; use crate::crypto::streams::ChaChaReader; +use crate::events::HTLCDestinationFailure; use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS; use crate::ln::channelmanager::{HTLCSource, RecipientOnionFields}; use crate::ln::msgs; @@ -1644,6 +1645,21 @@ impl_writeable_tlv_based_enum!(LocalHTLCFailureReason, #[cfg_attr(test, derive(PartialEq))] pub(super) struct HTLCFailReason(HTLCFailReasonRepr); +impl Into for HTLCFailReason { + fn into(self) -> HTLCDestinationFailure { + match self.0 { + HTLCFailReasonRepr::LightningError { .. } => HTLCDestinationFailure::Downstream, + HTLCFailReasonRepr::Reason { reason, .. } => HTLCDestinationFailure::Local { reason }, + } + } +} + +impl Into for LocalHTLCFailureReason { + fn into(self) -> HTLCDestinationFailure { + HTLCDestinationFailure::Local { reason: self } + } +} + #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug #[cfg_attr(test, derive(PartialEq))] enum HTLCFailReasonRepr { diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 37f5fd9b555..8c620bb32d6 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -14,7 +14,7 @@ use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen}; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::sign::EntropySource; -use crate::events::{ClosureReason, Event, HTLCDestination, PathFailure, PaymentFailureReason, PaymentPurpose}; +use crate::events::{ClosureReason, Event, HTLCDestination, PathFailure, PaymentFailureReason, PaymentPurpose, HTLCDestinationFailure}; use crate::ln::channel::{EXPIRE_PREV_CONFIG_TICKS, get_holder_selected_channel_reserve_satoshis, ANCHOR_OUTPUT_VALUE_SATOSHI}; use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo}; use crate::types::features::{Bolt11InvoiceFeatures, ChannelTypeFeatures}; @@ -128,7 +128,11 @@ fn mpp_retry() { // Attempt to forward the payment and complete the 2nd path's failure. expect_pending_htlcs_forwardable!(&nodes[2]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[3].node.get_our_node_id()), + channel_id: chan_4_id, + reason: Some(LocalHTLCFailureReason::TemporaryChannelFailure.into()), + }]); let htlc_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -234,7 +238,9 @@ fn mpp_retry_overpay() { expect_pending_htlcs_forwardable!(&nodes[2]); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCDestination::NextHopChannel { - node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id + node_id: Some(nodes[3].node.get_our_node_id()), + channel_id: chan_4_id, + reason: Some(LocalHTLCFailureReason::TemporaryChannelFailure.into()), }] ); let htlc_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); @@ -337,7 +343,11 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates_3_1.commitment_signed, false); // Failed HTLC from node 1 -> 0 - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_3_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[3].node.get_our_node_id()), + channel_id: chan_3_id, + reason: Some(HTLCDestinationFailure::Downstream{}), + }]); let htlc_fail_updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert_eq!(htlc_fail_updates_1_0.update_fail_htlcs.len(), 1); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_fail_updates_1_0.update_fail_htlcs[0]); @@ -572,7 +582,11 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let update_fail_0 = get_htlc_update_msgs!(&nodes[3], &nodes[2].node.get_our_node_id()); nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &update_fail_0.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], update_fail_0.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[3].node.get_our_node_id()), + channel_id: chan_4_channel_id, + reason: Some(HTLCDestinationFailure::Downstream{}), + }]); check_added_monitors!(nodes[2], 1); let update_fail_1 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); @@ -663,7 +677,12 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2}] + &[HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_id_2, + // Channel is not yet disabled because timer_tick_occurred has not updated status. + reason: Some(LocalHTLCFailureReason::ChannelNotReady.into()), + }] ); check_added_monitors(&nodes[1], 1); // nodes[1] now immediately fails the HTLC as the next-hop channel is disconnected @@ -920,8 +939,11 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_id_2, + reason: Some(HTLCDestinationFailure::Downstream{}), + }]); // Connect the HTLC-Timeout transaction, timing out the HTLC on both nodes (but not confirming // the HTLC-Timeout transaction beyond 1 conf). For dust HTLCs, the HTLC is considered resolved @@ -2234,6 +2256,7 @@ fn do_automatic_retries(test: AutoRetry) { vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: $failing_channel_id, + reason: Some(LocalHTLCFailureReason::TemporaryChannelFailure.into()), }]); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -3022,7 +3045,11 @@ fn no_extra_retries_on_back_to_back_fail() { check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); - let next_hop_failure = HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; + let next_hop_failure = HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2, + reason: Some(LocalHTLCFailureReason::FeeInsufficient.into()), + }; expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone(), next_hop_failure.clone()]); check_added_monitors(&nodes[1], 1); @@ -3213,7 +3240,11 @@ fn test_simple_partial_retry() { commitment_signed_dance!(nodes[1], nodes[0], second_htlc_updates.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - let next_hop_failure = HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; + let next_hop_failure = HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2, + reason: Some(LocalHTLCFailureReason::FeeInsufficient.into()), + }; expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone()]); check_added_monitors(&nodes[1], 2); @@ -3818,7 +3849,8 @@ fn test_retry_custom_tlvs() { expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), - channel_id: chan_2_id + channel_id: chan_2_id, + reason: Some(LocalHTLCFailureReason::TemporaryChannelFailure.into()), }]); check_added_monitors!(nodes[1], 1); @@ -4005,7 +4037,8 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![ HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), - channel_id: chan_2_3.2 + channel_id: chan_2_3.2, + reason: Some(HTLCDestinationFailure::Downstream{}), }]); check_added_monitors!(nodes[2], 1); @@ -4089,7 +4122,12 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { expect_pending_htlcs_forwardable!(nodes[2]); expect_htlc_handling_failed_destinations!( nodes[2].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_id_cd }] + &[HTLCDestination::NextHopChannel { + node_id: Some(nodes[3].node.get_our_node_id()), + channel_id: chan_id_cd, + // Channel is not yet disabled because timer_tick_occurred has not updated status. + reason: Some(LocalHTLCFailureReason::ChannelNotReady.into()), + }] ); check_added_monitors(&nodes[2], 1); @@ -4156,7 +4194,11 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &ds_fail.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], ds_fail.commitment_signed, false, true); expect_pending_htlcs_forwardable_conditions(nodes[2].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: cd_channel_used }]); + &[HTLCDestination::NextHopChannel { + node_id: Some(nodes[3].node.get_our_node_id()), + channel_id: cd_channel_used, + reason: Some(HTLCDestinationFailure::Downstream{}), + }]); } else { expect_pending_htlcs_forwardable!(nodes[3]); expect_payment_claimable!(nodes[3], payment_hash, payment_secret, amt_msat); @@ -4239,7 +4281,8 @@ fn test_htlc_forward_considers_anchor_outputs_value() { expect_pending_htlcs_forwardable!(nodes[1]); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), - channel_id: chan_id_2 + channel_id: chan_id_2, + reason: Some(LocalHTLCFailureReason::TemporaryChannelFailure.into()), }]); check_added_monitors(&nodes[1], 1); @@ -4404,7 +4447,11 @@ fn test_non_strict_forwarding() { }; // The failure to forward will refer to the channel given in the onion. expect_pending_htlcs_forwardable_conditions(nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: routed_channel_id }]); + &[HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: routed_channel_id, + reason: Some(LocalHTLCFailureReason::TemporaryChannelFailure.into()), + }]); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index dfa0e8817ed..fb85fbd682f 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -74,8 +74,11 @@ fn test_priv_forwarding_rejection() { commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( - nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }] + nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_id_2, + reason: Some(LocalHTLCFailureReason::PrivateChannelForward.into()), + }] ); check_added_monitors(&nodes[1], 1); @@ -444,8 +447,11 @@ fn test_inbound_scid_privacy() { commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, true, true); expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( - nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: last_hop[0].channel_id }] + nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: last_hop[0].channel_id, + reason: Some(LocalHTLCFailureReason::RealSCIDForward.into()), + }] ); check_added_monitors(&nodes[1], 1); @@ -504,7 +510,11 @@ fn test_scid_alias_returned() { commitment_signed_dance!(nodes[1], nodes[0], &as_updates.commitment_signed, false, true); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan.0.channel_id, + reason: Some(LocalHTLCFailureReason::TemporaryChannelFailure.into()) + }]); check_added_monitors!(nodes[1], 1); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -529,8 +539,11 @@ fn test_scid_alias_returned() { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( - nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id }] + nodes[1].node.get_and_clear_pending_events(), &[HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan.0.channel_id, + reason: Some(LocalHTLCFailureReason::FeeInsufficient.into()), + }] ); check_added_monitors(&nodes[1], 1); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index e5f4bc20018..cd5d4d5471f 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -12,6 +12,7 @@ use crate::chain::{ChannelMonitorUpdateStatus, Watch}; use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateStep}; +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::routing::router::{PaymentParameters, RouteParameters}; use crate::sign::EntropySource; use crate::chain::transaction::OutPoint; @@ -1112,7 +1113,12 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht } if !claim_htlc { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], + [HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_id_2, + reason: Some(LocalHTLCFailureReason::ChannelClosed.into()), + }]); } else { expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, true); } @@ -1210,7 +1216,11 @@ fn removed_payment_no_manager_persistence() { nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_id_2, + reason: Some(LocalHTLCFailureReason::DroppedPending.into()), + }]); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 934ca0d5fdc..eb4228a7b1d 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -15,6 +15,7 @@ use crate::chain::transaction::OutPoint; use crate::chain::Confirm; use crate::events::{Event, ClosureReason, HTLCDestination}; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, Init, MessageSendEvent}; +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::ln::types::ChannelId; use crate::sign::OutputSpender; use crate::util::ser::Writeable; @@ -130,7 +131,11 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { } else { // Confirm the timeout tx and check that we fail the HTLC backwards connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, Vec::new())); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2, + reason: Some(LocalHTLCFailureReason::ChannelClosed.into()), + }]); } check_added_monitors!(nodes[1], 1); diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index b14e2bf06a8..da02611af6d 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -468,7 +468,11 @@ fn do_htlc_fail_async_shutdown(blinded_recipient: bool) { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }] + &[HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2, + reason: Some(LocalHTLCFailureReason::DroppedPending.into()), + }] ); check_added_monitors(&nodes[1], 1);