Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add a reason to HTLCHandlingFailed event #3601

Open
wants to merge 7 commits into
base: main
Choose a base branch
from
24 changes: 24 additions & 0 deletions lightning/src/events/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, Paym
use crate::chain::transaction;
use crate::ln::channelmanager::{InterceptId, PaymentId, RecipientOnionFields};
use crate::ln::channel::FUNDING_CONF_DEADLINE_BLOCKS;
use crate::ln::onion_utils::LocalHTLCFailureReason;
use crate::types::features::ChannelTypeFeatures;
use crate::ln::msgs;
use crate::ln::types::ChannelId;
Expand Down Expand Up @@ -465,6 +466,25 @@ impl_writeable_tlv_based_enum_upgradable!(ClosureReason,
},
);

/// The reason for HTLC failures in [`HTLCDestination`].
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum HTLCDestinationFailure {
/// The forwarded HTLC was failed back by the downstream node with an encrypted error reason.
Downstream,
/// The HTLC was failed locally by our node.
Local{
/// The reason that our node chose to fail the HTLC.
reason: LocalHTLCFailureReason
},
}

impl_writeable_tlv_based_enum!(HTLCDestinationFailure,
(0, Downstream) => {},
(1, Local) => {
(0, reason, required),
},
);

/// Intended destination of a failed HTLC as indicated in [`Event::HTLCHandlingFailed`].
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum HTLCDestination {
Expand All @@ -477,6 +497,9 @@ pub enum HTLCDestination {
node_id: Option<PublicKey>,
/// The outgoing `channel_id` between us and the next node.
channel_id: ChannelId,
/// The reason that the HTLC forward was failed. For backwards compatibility, this field is
/// marked as optional, versions prior to 0.1.1 will set this value to None.
reason: Option<HTLCDestinationFailure>
},
/// Scenario where we are unsure of the next node to forward the HTLC to.
UnknownNextHop {
Expand Down Expand Up @@ -510,6 +533,7 @@ pub enum HTLCDestination {
impl_writeable_tlv_based_enum_upgradable!(HTLCDestination,
(0, NextHopChannel) => {
(0, node_id, required),
(1, reason, option),
(2, channel_id, required),
},
(1, InvalidForward) => {
Expand Down
20 changes: 15 additions & 5 deletions lightning/src/ln/async_payments_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ use crate::ln::msgs::{
BaseMessageHandler, ChannelMessageHandler, MessageSendEvent, OnionMessageHandler,
};
use crate::ln::offers_tests;
use crate::ln::onion_utils::INVALID_ONION_BLINDING;
use crate::ln::onion_utils::LocalHTLCFailureReason;
use crate::ln::outbound_payment::PendingOutboundPayment;
use crate::ln::outbound_payment::Retry;
use crate::offers::invoice_request::InvoiceRequest;
Expand Down Expand Up @@ -179,24 +179,34 @@ fn invalid_keysend_payment_secret() {
assert_eq!(updates_2_1.update_fail_malformed_htlcs.len(), 1);
let update_malformed = &updates_2_1.update_fail_malformed_htlcs[0];
assert_eq!(update_malformed.sha256_of_onion, [0; 32]);
assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING);
assert_eq!(
update_malformed.failure_code,
LocalHTLCFailureReason::InvalidOnionBlinding.failure_code()
);
nodes[1]
.node
.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), update_malformed);
do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, true, false);
do_commitment_signed_dance(
&nodes[1],
&nodes[2],
&updates_2_1.commitment_signed,
Some(FailureType::Blinded),
false,
);

let updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert_eq!(updates_1_0.update_fail_htlcs.len(), 1);
nodes[0].node.handle_update_fail_htlc(
nodes[1].node.get_our_node_id(),
&updates_1_0.update_fail_htlcs[0],
);
do_commitment_signed_dance(&nodes[0], &nodes[1], &updates_1_0.commitment_signed, false, false);
do_commitment_signed_dance(&nodes[0], &nodes[1], &updates_1_0.commitment_signed, None, false);
expect_payment_failed_conditions(
&nodes[0],
payment_hash,
false,
PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]),
PaymentFailedConditions::new()
.expected_htlc_error_data(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32]),
);
}

Expand Down
120 changes: 68 additions & 52 deletions lightning/src/ln/blinded_payment_tests.rs

Large diffs are not rendered by default.

33 changes: 25 additions & 8 deletions lightning/src/ln/chanmon_update_fail_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,11 @@ use bitcoin::hash_types::BlockHash;
use bitcoin::network::Network;
use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
use crate::events::{Event, PaymentPurpose, ClosureReason, HTLCDestination};
use crate::events::{Event, PaymentPurpose, ClosureReason, HTLCDestination, HTLCDestinationFailure};
use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields};
use crate::ln::channel::AnnouncementSigsState;
use crate::ln::msgs;
use crate::ln::onion_utils::LocalHTLCFailureReason;
use crate::ln::types::ChannelId;
use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, RoutingMessageHandler, MessageSendEvent};
use crate::util::test_channel_signer::TestChannelSigner;
Expand Down Expand Up @@ -905,7 +906,11 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2.2, latest_update);
check_added_monitors!(nodes[1], 0);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel {
node_id: Some(nodes[2].node.get_our_node_id()),
channel_id: chan_2.2,
reason: Some(HTLCDestinationFailure::Downstream{}),
}]);
check_added_monitors!(nodes[1], 1);

let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
Expand Down Expand Up @@ -1752,7 +1757,11 @@ fn test_monitor_update_on_pending_forwards() {
commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);

chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel {
node_id: Some(nodes[2].node.get_our_node_id()),
channel_id: chan_2.2,
reason: Some(HTLCDestinationFailure::Downstream{}),
}]);
check_added_monitors!(nodes[1], 1);

chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
Expand Down Expand Up @@ -2159,7 +2168,11 @@ fn test_fail_htlc_on_broadcast_after_claim() {
check_closed_broadcast!(nodes[1], true);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
check_added_monitors!(nodes[1], 1);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel {
node_id: Some(nodes[2].node.get_our_node_id()),
channel_id: chan_id_2,
reason: Some(LocalHTLCFailureReason::ChannelClosed.into()),
}]);

nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
Expand Down Expand Up @@ -2549,7 +2562,11 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
reconnect_args.pending_htlc_fails.0 = 1;
reconnect_nodes(reconnect_args);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel {
node_id: Some(nodes[2].node.get_our_node_id()),
channel_id: chan_id_2,
reason: Some(HTLCDestinationFailure::Downstream{}),
}]);
} else {
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
reconnect_args.pending_htlc_claims.0 = 1;
Expand Down Expand Up @@ -2955,7 +2972,7 @@ fn test_blocked_chan_preimage_release() {

let cs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &cs_htlc_fulfill_updates.update_fulfill_htlcs[0]);
do_commitment_signed_dance(&nodes[1], &nodes[2], &cs_htlc_fulfill_updates.commitment_signed, false, false);
do_commitment_signed_dance(&nodes[1], &nodes[2], &cs_htlc_fulfill_updates.commitment_signed, None, false);
check_added_monitors(&nodes[1], 0);

// Now claim the second payment on nodes[0], which will ultimately result in nodes[1] trying to
Expand Down Expand Up @@ -2998,7 +3015,7 @@ fn test_blocked_chan_preimage_release() {
check_added_monitors(&nodes[1], 1);

nodes[2].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_htlc_fulfill_updates.update_fulfill_htlcs[0]);
do_commitment_signed_dance(&nodes[2], &nodes[1], &bs_htlc_fulfill_updates.commitment_signed, false, false);
do_commitment_signed_dance(&nodes[2], &nodes[1], &bs_htlc_fulfill_updates.commitment_signed, None, false);
expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true);
}

Expand Down Expand Up @@ -3153,7 +3170,7 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_
check_added_monitors(&nodes[1], 1);

nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, false);
do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, None, false);

expect_payment_forwarded!(nodes[1], &nodes[0], &nodes[2], Some(1_000), false, !with_latest_manager);

Expand Down
35 changes: 17 additions & 18 deletions lightning/src/ln/channel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ use crate::ln::chan_utils::{
#[cfg(splicing)]
use crate::ln::chan_utils::FUNDING_TRANSACTION_WITNESS_WEIGHT;
use crate::ln::chan_utils;
use crate::ln::onion_utils::{HTLCFailReason};
use crate::ln::onion_utils::{HTLCFailReason, LocalHTLCFailureReason};
use crate::chain::BestBlock;
use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator, fee_for_weight};
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS};
Expand Down Expand Up @@ -7581,21 +7581,17 @@ impl<SP: Deref> FundedChannel<SP> where

fn internal_htlc_satisfies_config(
&self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
) -> Result<(), (&'static str, u16)> {
) -> Result<(), (&'static str, LocalHTLCFailureReason)> {
let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
.and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
(htlc.amount_msat - fee.unwrap()) < amt_to_forward {
return Err((
"Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
0x1000 | 12, // fee_insufficient
));
return Err(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
LocalHTLCFailureReason::FeeInsufficient));
}
if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
return Err((
"Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
0x1000 | 13, // incorrect_cltv_expiry
));
return Err(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
LocalHTLCFailureReason::IncorrectCLTVExpiry));
}
Ok(())
}
Expand All @@ -7605,7 +7601,7 @@ impl<SP: Deref> FundedChannel<SP> where
/// unsuccessful, falls back to the previous one if one exists.
pub fn htlc_satisfies_config(
&self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
) -> Result<(), (&'static str, u16)> {
) -> Result<(), (&'static str, LocalHTLCFailureReason)> {
self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
.or_else(|err| {
if let Some(prev_config) = self.context.prev_config() {
Expand All @@ -7620,13 +7616,13 @@ impl<SP: Deref> FundedChannel<SP> where
/// this function determines whether to fail the HTLC, or forward / claim it.
pub fn can_accept_incoming_htlc<F: Deref, L: Deref>(
&self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: L
) -> Result<(), (&'static str, u16)>
) -> Result<(), (&'static str, LocalHTLCFailureReason)>
where
F::Target: FeeEstimator,
L::Target: Logger
{
if self.context.channel_state.is_local_shutdown_sent() {
return Err(("Shutdown was already sent", 0x4000|8))
return Err(("Shutdown was already sent", LocalHTLCFailureReason::DroppedPending))
}

let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator);
Expand All @@ -7637,7 +7633,8 @@ impl<SP: Deref> FundedChannel<SP> where
// Note that the total dust exposure includes both the dust HTLCs and the excess mining fees of the counterparty commitment transaction
log_info!(logger, "Cannot accept value that would put our total dust exposure at {} over the limit {} on counterparty commitment tx",
on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
return Err(("Exceeded our total dust exposure limit on counterparty commitment tx", 0x1000|7))
return Err(("Exceeded our total dust exposure limit on counterparty commitment tx",
LocalHTLCFailureReason::DustLimitCounterparty))
}
let htlc_success_dust_limit = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
0
Expand All @@ -7651,7 +7648,8 @@ impl<SP: Deref> FundedChannel<SP> where
if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
return Err(("Exceeded our dust exposure limit on holder commitment tx", 0x1000|7))
return Err(("Exceeded our dust exposure limit on holder commitment tx",
LocalHTLCFailureReason::DustLimitHolder))
}
}

Expand Down Expand Up @@ -7689,7 +7687,7 @@ impl<SP: Deref> FundedChannel<SP> where
}
if pending_remote_value_msat.saturating_sub(self.funding.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
return Err(("Fee spike buffer violation", 0x1000|7));
return Err(("Fee spike buffer violation", LocalHTLCFailureReason::FeeSpikeBuffer));
}
}

Expand Down Expand Up @@ -11150,7 +11148,7 @@ mod tests {
use bitcoin::network::Network;
#[cfg(splicing)]
use bitcoin::Weight;
use crate::ln::onion_utils::INVALID_ONION_BLINDING;
use crate::ln::onion_utils::LocalHTLCFailureReason;
use crate::types::payment::{PaymentHash, PaymentPreimage};
use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
Expand Down Expand Up @@ -11790,7 +11788,8 @@ mod tests {
htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
};
let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
htlc_id, failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(),
sha256_of_onion: [0; 32],
};
let mut holding_cell_htlc_updates = Vec::with_capacity(12);
for i in 0..12 {
Expand Down
Loading
Loading