@@ -256,6 +256,10 @@ pub(crate) const LATENCY_GRACE_PERIOD_BLOCKS: u32 = 3;
256
256
// solved by a previous claim tx. What we want to avoid is reorg evicting our claim tx and us not
257
257
// keep bumping another claim tx to solve the outpoint.
258
258
pub const ANTI_REORG_DELAY : u32 = 6 ;
259
+ /// Number of blocks we wait before assuming a [`ChannelMonitor`] to be fully resolved and
260
+ /// considering it be safely archived.
261
+ // 4032 blocks are roughly four weeks
262
+ pub const ARCHIVAL_DELAY_BLOCKS : u32 = 4032 ;
259
263
/// Number of blocks before confirmation at which we fail back an un-relayed HTLC or at which we
260
264
/// refuse to accept a new HTLC.
261
265
///
@@ -1023,6 +1027,12 @@ pub(crate) struct ChannelMonitorImpl<Signer: EcdsaChannelSigner> {
1023
1027
1024
1028
/// The first block height at which we had no remaining claimable balances.
1025
1029
balances_empty_height : Option < u32 > ,
1030
+
1031
+ /// In-memory only HTLC ids used to track upstream HTLCs that have been failed backwards due to
1032
+ /// a downstream channel force-close remaining unconfirmed by the time the upstream timeout
1033
+ /// expires. This is used to tell us we already generated an event to fail this HTLC back
1034
+ /// during a previous block scan.
1035
+ failed_back_htlc_ids : HashSet < SentHTLCId > ,
1026
1036
}
1027
1037
1028
1038
/// Transaction outputs to watch for on-chain spends.
@@ -1445,6 +1455,8 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
1445
1455
counterparty_node_id : Some ( counterparty_node_id) ,
1446
1456
initial_counterparty_commitment_info : None ,
1447
1457
balances_empty_height : None ,
1458
+
1459
+ failed_back_htlc_ids : new_hash_set ( ) ,
1448
1460
} )
1449
1461
}
1450
1462
@@ -2015,10 +2027,11 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
2015
2027
///
2016
2028
/// This function returns a tuple of two booleans, the first indicating whether the monitor is
2017
2029
/// fully resolved, and the second whether the monitor needs persistence to ensure it is
2018
- /// reliably marked as resolved within 4032 blocks.
2030
+ /// reliably marked as resolved within [`ARCHIVAL_DELAY_BLOCKS`] blocks.
2019
2031
///
2020
- /// The first boolean is true only if [`Self::get_claimable_balances`] has been empty for at least
2021
- /// 4032 blocks as an additional protection against any bugs resulting in spuriously empty balance sets.
2032
+ /// The first boolean is true only if [`Self::get_claimable_balances`] has been empty for at
2033
+ /// least [`ARCHIVAL_DELAY_BLOCKS`] blocks as an additional protection against any bugs
2034
+ /// resulting in spuriously empty balance sets.
2022
2035
pub fn check_and_update_full_resolution_status < L : Logger > ( & self , logger : & L ) -> ( bool , bool ) {
2023
2036
let mut is_all_funds_claimed = self . get_claimable_balances ( ) . is_empty ( ) ;
2024
2037
let current_height = self . current_best_block ( ) . height ;
@@ -2034,11 +2047,10 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
2034
2047
// once processed, implies the preimage exists in the corresponding inbound channel.
2035
2048
let preimages_not_needed_elsewhere = inner. pending_monitor_events . is_empty ( ) ;
2036
2049
2037
- const BLOCKS_THRESHOLD : u32 = 4032 ; // ~four weeks
2038
2050
match ( inner. balances_empty_height , is_all_funds_claimed, preimages_not_needed_elsewhere) {
2039
2051
( Some ( balances_empty_height) , true , true ) => {
2040
2052
// Claimed all funds, check if reached the blocks threshold.
2041
- ( current_height >= balances_empty_height + BLOCKS_THRESHOLD , false )
2053
+ ( current_height >= balances_empty_height + ARCHIVAL_DELAY_BLOCKS , false )
2042
2054
} ,
2043
2055
( Some ( _) , false , _) |( Some ( _) , _, false ) => {
2044
2056
// previously assumed we claimed all funds, but we have new funds to claim or
@@ -2058,7 +2070,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
2058
2070
// None. It is set to the current block height.
2059
2071
log_debug ! ( logger,
2060
2072
"ChannelMonitor funded at {} is now fully resolved. It will become archivable in {} blocks" ,
2061
- inner. get_funding_txo( ) . 0 , BLOCKS_THRESHOLD ) ;
2073
+ inner. get_funding_txo( ) . 0 , ARCHIVAL_DELAY_BLOCKS ) ;
2062
2074
inner. balances_empty_height = Some ( current_height) ;
2063
2075
( false , true )
2064
2076
} ,
@@ -3274,7 +3286,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
3274
3286
}
3275
3287
}
3276
3288
3277
- if ret. is_ok ( ) && ( self . funding_spend_seen || self . lockdown_from_offchain ) && is_pre_close_update {
3289
+ if ret. is_ok ( ) && ( self . funding_spend_seen || self . lockdown_from_offchain || self . holder_tx_signed ) && is_pre_close_update {
3278
3290
log_error ! ( logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent" ) ;
3279
3291
Err ( ( ) )
3280
3292
} else { ret }
@@ -4221,6 +4233,71 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
4221
4233
}
4222
4234
}
4223
4235
4236
+ if self . lockdown_from_offchain || self . funding_spend_seen || self . holder_tx_signed {
4237
+ // Fail back HTLCs on backwards channels if they expire within
4238
+ // `LATENCY_GRACE_PERIOD_BLOCKS` blocks and the channel is closed (i.e. we're at a
4239
+ // point where no further off-chain updates will be accepted). If we haven't seen the
4240
+ // preimage for an HTLC by the time the previous hop's timeout expires, we've lost that
4241
+ // HTLC, so we might as well fail it back instead of having our counterparty force-close
4242
+ // the inbound channel.
4243
+ let current_holder_htlcs = self . current_holder_commitment_tx . htlc_outputs . iter ( )
4244
+ . map ( |& ( ref a, _, ref b) | ( a, b. as_ref ( ) ) ) ;
4245
+
4246
+ let current_counterparty_htlcs = if let Some ( txid) = self . current_counterparty_commitment_txid {
4247
+ if let Some ( htlc_outputs) = self . counterparty_claimable_outpoints . get ( & txid) {
4248
+ Some ( htlc_outputs. iter ( ) . map ( |& ( ref a, ref b) | ( a, b. as_ref ( ) . map ( |boxed| & * * boxed) ) ) )
4249
+ } else { None }
4250
+ } else { None } . into_iter ( ) . flatten ( ) ;
4251
+
4252
+ let prev_counterparty_htlcs = if let Some ( txid) = self . prev_counterparty_commitment_txid {
4253
+ if let Some ( htlc_outputs) = self . counterparty_claimable_outpoints . get ( & txid) {
4254
+ Some ( htlc_outputs. iter ( ) . map ( |& ( ref a, ref b) | ( a, b. as_ref ( ) . map ( |boxed| & * * boxed) ) ) )
4255
+ } else { None }
4256
+ } else { None } . into_iter ( ) . flatten ( ) ;
4257
+
4258
+ let htlcs = current_holder_htlcs
4259
+ . chain ( current_counterparty_htlcs)
4260
+ . chain ( prev_counterparty_htlcs) ;
4261
+
4262
+ let height = self . best_block . height ;
4263
+ for ( htlc, source_opt) in htlcs {
4264
+ // Only check forwarded HTLCs' previous hops
4265
+ let source = match source_opt {
4266
+ Some ( source) => source,
4267
+ None => continue ,
4268
+ } ;
4269
+ let inbound_htlc_expiry = match source. inbound_htlc_expiry ( ) {
4270
+ Some ( cltv_expiry) => cltv_expiry,
4271
+ None => continue ,
4272
+ } ;
4273
+ let max_expiry_height = height. saturating_add ( LATENCY_GRACE_PERIOD_BLOCKS ) ;
4274
+ if inbound_htlc_expiry > max_expiry_height {
4275
+ continue ;
4276
+ }
4277
+ let duplicate_event = self . pending_monitor_events . iter ( ) . any (
4278
+ |update| if let & MonitorEvent :: HTLCEvent ( ref upd) = update {
4279
+ upd. source == * source
4280
+ } else { false } ) ;
4281
+ if duplicate_event {
4282
+ continue ;
4283
+ }
4284
+ if !self . failed_back_htlc_ids . insert ( SentHTLCId :: from_source ( source) ) {
4285
+ continue ;
4286
+ }
4287
+ if !duplicate_event {
4288
+ log_error ! ( logger, "Failing back HTLC {} upstream to preserve the \
4289
+ channel as the forward HTLC hasn't resolved and our backward HTLC \
4290
+ expires soon at {}", log_bytes!( htlc. payment_hash. 0 ) , inbound_htlc_expiry) ;
4291
+ self . pending_monitor_events . push ( MonitorEvent :: HTLCEvent ( HTLCUpdate {
4292
+ source : source. clone ( ) ,
4293
+ payment_preimage : None ,
4294
+ payment_hash : htlc. payment_hash ,
4295
+ htlc_value_satoshis : Some ( htlc. amount_msat / 1000 ) ,
4296
+ } ) ) ;
4297
+ }
4298
+ }
4299
+ }
4300
+
4224
4301
let conf_target = self . closure_conf_target ( ) ;
4225
4302
self . onchain_tx_handler . update_claims_view_from_requests ( claimable_outpoints, conf_height, self . best_block . height , broadcaster, conf_target, fee_estimator, logger) ;
4226
4303
self . onchain_tx_handler . update_claims_view_from_matched_txn ( & txn_matched, conf_height, conf_hash, self . best_block . height , broadcaster, conf_target, fee_estimator, logger) ;
@@ -5066,6 +5143,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
5066
5143
counterparty_node_id,
5067
5144
initial_counterparty_commitment_info,
5068
5145
balances_empty_height,
5146
+ failed_back_htlc_ids : new_hash_set ( ) ,
5069
5147
} ) ) )
5070
5148
}
5071
5149
}
@@ -5092,7 +5170,7 @@ mod tests {
5092
5170
use crate :: chain:: chaininterface:: LowerBoundedFeeEstimator ;
5093
5171
5094
5172
use super :: ChannelMonitorUpdateStep ;
5095
- use crate :: { check_added_monitors, check_spends, get_local_commitment_txn, get_monitor, get_route_and_payment_hash, unwrap_send_err } ;
5173
+ use crate :: { check_added_monitors, check_spends, get_local_commitment_txn, get_monitor, get_route_and_payment_hash} ;
5096
5174
use crate :: chain:: { BestBlock , Confirm } ;
5097
5175
use crate :: chain:: channelmonitor:: { ChannelMonitor , WithChannelMonitor } ;
5098
5176
use crate :: chain:: package:: { weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT } ;
@@ -5102,10 +5180,9 @@ mod tests {
5102
5180
use crate :: types:: payment:: { PaymentPreimage , PaymentHash } ;
5103
5181
use crate :: ln:: channel_keys:: { DelayedPaymentBasepoint , DelayedPaymentKey , HtlcBasepoint , RevocationBasepoint , RevocationKey } ;
5104
5182
use crate :: ln:: chan_utils:: { self , HTLCOutputInCommitment , ChannelPublicKeys , ChannelTransactionParameters , HolderCommitmentTransaction , CounterpartyChannelTransactionParameters } ;
5105
- use crate :: ln:: channelmanager:: { PaymentSendFailure , PaymentId , RecipientOnionFields } ;
5183
+ use crate :: ln:: channelmanager:: { PaymentId , RecipientOnionFields } ;
5106
5184
use crate :: ln:: functional_test_utils:: * ;
5107
5185
use crate :: ln:: script:: ShutdownScript ;
5108
- use crate :: util:: errors:: APIError ;
5109
5186
use crate :: util:: test_utils:: { TestLogger , TestBroadcaster , TestFeeEstimator } ;
5110
5187
use crate :: util:: ser:: { ReadableArgs , Writeable } ;
5111
5188
use crate :: util:: logger:: Logger ;
@@ -5166,9 +5243,9 @@ mod tests {
5166
5243
// If the ChannelManager tries to update the channel, however, the ChainMonitor will pass
5167
5244
// the update through to the ChannelMonitor which will refuse it (as the channel is closed).
5168
5245
let ( route, payment_hash, _, payment_secret) = get_route_and_payment_hash ! ( nodes[ 1 ] , nodes[ 0 ] , 100_000 ) ;
5169
- unwrap_send_err ! ( nodes[ 1 ] . node. send_payment_with_route( route, payment_hash,
5170
- RecipientOnionFields :: secret_only( payment_secret) , PaymentId ( payment_hash. 0 )
5171
- ) , false , APIError :: MonitorUpdateInProgress , { } ) ;
5246
+ nodes[ 1 ] . node . send_payment_with_route ( route, payment_hash,
5247
+ RecipientOnionFields :: secret_only ( payment_secret) , PaymentId ( payment_hash. 0 )
5248
+ ) . unwrap ( ) ;
5172
5249
check_added_monitors ! ( nodes[ 1 ] , 1 ) ;
5173
5250
5174
5251
// Build a new ChannelMonitorUpdate which contains both the failing commitment tx update
0 commit comments