From e69b9f98f93be44ca5e66b9b2d724801c720f604 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 21 May 2025 17:42:57 +0200 Subject: [PATCH 1/4] simplify code --- lightning/src/ln/channel.rs | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index bc4d38beeea..7c331f809f1 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1015,7 +1015,7 @@ enum UpdateFulfillFetch { NewClaim { monitor_update: ChannelMonitorUpdate, htlc_value_msat: u64, - msg: Option, + msg: bool, }, DuplicateClaim {}, } @@ -5350,7 +5350,7 @@ impl FundedChannel where let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, None, logger); self.context.latest_monitor_update_id = mon_update_id; if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp { - assert!(msg.is_none()); // The HTLC must have ended up in the holding cell. + assert!(!msg); // The HTLC must have ended up in the holding cell. } } @@ -5437,7 +5437,7 @@ impl FundedChannel where // TODO: We may actually be able to switch to a fulfill here, though its // rare enough it may not be worth the complexity burden. debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); - return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; + return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: false }; } }, _ => {} @@ -5447,7 +5447,7 @@ impl FundedChannel where self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, }); - return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; + return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: false }; } { @@ -5455,7 +5455,7 @@ impl FundedChannel where if let InboundHTLCState::Committed = htlc.state { } else { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); - return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; + return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: false }; } log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id); htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone())); @@ -5464,11 +5464,7 @@ impl FundedChannel where UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, - msg: Some(msgs::UpdateFulfillHTLC { - channel_id: self.context.channel_id(), - htlc_id: htlc_id_arg, - payment_preimage: payment_preimage_arg, - }), + msg: true, } } @@ -5484,7 +5480,7 @@ impl FundedChannel where // matter what. Sadly, to push a new monitor update which flies before others // already queued, we have to insert it into the pending queue and update the // update_ids of all the following monitors. - if release_cs_monitor && msg.is_some() { + if release_cs_monitor && msg { let mut additional_update = self.build_commitment_no_status_check(logger); // build_commitment_no_status_check may bump latest_monitor_id but we want them // to be strictly increasing by one, so decrement it here. @@ -5497,7 +5493,7 @@ impl FundedChannel where for held_update in self.context.blocked_monitor_updates.iter_mut() { held_update.update.update_id += 1; } - if msg.is_some() { + if msg { debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set"); let update = self.build_commitment_no_status_check(logger); self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate { @@ -5506,7 +5502,7 @@ impl FundedChannel where } } - self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused(false, msg, false, Vec::new(), Vec::new(), Vec::new()); UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, } }, UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {}, From 4a6a2b6408488f42f832ae21f51e6a921ff41d3e Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 26 May 2025 11:00:22 +0200 Subject: [PATCH 2/4] temp mpp test mod to have single route --- lightning/src/ln/payment_tests.rs | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 389d0f15fb9..c7dd388eb32 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -10,7 +10,6 @@ //! Tests that test the payment retry logic in ChannelManager, including various edge-cases around //! serialization ordering between ChannelManager/ChannelMonitors and ensuring we can still retry //! payments thereafter. - use crate::chain::channelmonitor::{ ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, }; @@ -47,7 +46,7 @@ use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::util::errors::APIError; use crate::util::ser::Writeable; use crate::util::string::UntrustedString; -use crate::util::test_utils; +use crate::util::{test_utils}; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; @@ -493,15 +492,15 @@ fn test_mpp_keysend() { let nodes = create_network(4, &node_cfgs, &node_chanmgrs); let node_b_id = nodes[1].node.get_our_node_id(); - let node_c_id = nodes[2].node.get_our_node_id(); + // let node_c_id = nodes[2].node.get_our_node_id(); let node_d_id = nodes[3].node.get_our_node_id(); create_announced_chan_between_nodes(&nodes, 0, 1); - create_announced_chan_between_nodes(&nodes, 0, 2); + // create_announced_chan_between_nodes(&nodes, 0, 2); create_announced_chan_between_nodes(&nodes, 1, 3); - create_announced_chan_between_nodes(&nodes, 2, 3); + // create_announced_chan_between_nodes(&nodes, 2, 3); - let recv_value = 15_000_000; + let recv_value = 5_000_000; let route_params = RouteParameters::from_payment_params_and_value( PaymentParameters::for_keysend(node_d_id, 40, true), recv_value, @@ -514,18 +513,18 @@ fn test_mpp_keysend() { let id = PaymentId([42; 32]); let hash = nodes[0].node.send_spontaneous_payment(preimage, onion, id, route_params, retry).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors!(nodes[0], 1); - let route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; + let route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]]]; let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 2); + assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&node_b_id, &mut events); let payment_secret = Some(payment_secret); - pass_along_path(&nodes[0], route[0], recv_value, hash, payment_secret, ev, false, preimage); + pass_along_path(&nodes[0], route[0], recv_value, hash, payment_secret, ev, true, preimage); + + std::thread::sleep(Duration::from_millis(50)); - let ev = remove_first_msg_event_to_node(&node_c_id, &mut events); - pass_along_path(&nodes[0], route[1], recv_value, hash, payment_secret, ev, true, preimage); claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], route, preimage.unwrap())); } From fb5ece984ef2e505ed459befb74b152ccd9f6d7c Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 26 May 2025 15:04:36 +0200 Subject: [PATCH 3/4] Small refactors in onion_utils --- lightning/src/ln/onion_utils.rs | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index b3dbd107a76..49a04703600 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -877,10 +877,7 @@ fn crypt_failure_packet(shared_secret: &[u8], packet: &mut OnionErrorPacket) { chacha.process_in_place(&mut packet.data); if let Some(ref mut attribution_data) = packet.attribution_data { - let ammagext = gen_ammagext_from_shared_secret(&shared_secret); - let mut chacha = ChaCha20::new(&ammagext, &[0u8; 8]); - chacha.process_in_place(&mut attribution_data.hold_times); - chacha.process_in_place(&mut attribution_data.hmacs); + attribution_data.crypt(shared_secret); } } @@ -942,10 +939,7 @@ fn update_attribution_data( let attribution_data = onion_error_packet.attribution_data.get_or_insert(AttributionData::new()); - let hold_time_bytes: [u8; 4] = hold_time.to_be_bytes(); - attribution_data.hold_times[..HOLD_TIME_LEN].copy_from_slice(&hold_time_bytes); - - attribution_data.add_hmacs(shared_secret, &onion_error_packet.data); + attribution_data.update(&onion_error_packet.data, shared_secret, hold_time); } pub(super) fn build_failure_packet( @@ -2636,6 +2630,14 @@ impl_writeable!(AttributionData, { }); impl AttributionData { + /// Encrypts or decrypts the attribution data using the provided shared secret. + pub(crate) fn crypt(&mut self, shared_secret: &[u8]) { + let ammagext = gen_ammagext_from_shared_secret(&shared_secret); + let mut chacha = ChaCha20::new(&ammagext, &[0u8; 8]); + chacha.process_in_place(&mut self.hold_times); + chacha.process_in_place(&mut self.hmacs); + } + /// Adds the current node's HMACs for all possible positions to this packet. pub(crate) fn add_hmacs(&mut self, shared_secret: &[u8], message: &[u8]) { let um: [u8; 32] = gen_um_from_shared_secret(&shared_secret); @@ -2685,7 +2687,7 @@ impl AttributionData { /// Verifies the attribution data of a failure packet for the given position in the path. If the HMAC checks out, the /// reported hold time is returned. If the HMAC does not match, None is returned. - fn verify(&self, message: &Vec, shared_secret: &[u8], position: usize) -> Option { + fn verify(&self, message: &[u8], shared_secret: &[u8], position: usize) -> Option { // Calculate the expected HMAC. let um = gen_um_from_shared_secret(shared_secret); let mut hmac = HmacEngine::::new(&um); @@ -2770,6 +2772,12 @@ impl AttributionData { fn get_hold_time_bytes(&self, idx: usize) -> &[u8] { &self.hold_times[idx * HOLD_TIME_LEN..(idx + 1) * HOLD_TIME_LEN] } + + fn update(&mut self, message: &[u8], shared_secret: &[u8], hold_time: u32) { + let hold_time_bytes: [u8; 4] = hold_time.to_be_bytes(); + self.hold_times[..HOLD_TIME_LEN].copy_from_slice(&hold_time_bytes); + self.add_hmacs(shared_secret, message); + } } /// Updates the attribution data for an intermediate node. From 1f56e046d2f89ed6caf42030e31a6c7901da9f26 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 26 May 2025 11:31:00 +0200 Subject: [PATCH 4/4] Add hold times to update_fulfill_htlc --- lightning/src/ln/chanmon_update_fail_tests.rs | 8 +- lightning/src/ln/channel.rs | 43 ++++--- lightning/src/ln/channelmanager.rs | 62 +++++++-- lightning/src/ln/htlc_reserve_unit_tests.rs | 1 + lightning/src/ln/msgs.rs | 9 +- lightning/src/ln/onion_utils.rs | 118 +++++++++++++++++- lightning/src/ln/payment_tests.rs | 2 +- 7 files changed, 207 insertions(+), 36 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index d91d3969401..0a5fef4ce6c 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -2511,6 +2511,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f channel_id: chan_id_2, htlc_id: 0, payment_preimage, + attribution_data: None, }; if second_fails { nodes[2].node.fail_htlc_backwards(&payment_hash); @@ -2524,8 +2525,11 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1); - // Check that the message we're about to deliver matches the one generated: - assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]); + + // Check that the message we're about to deliver matches the one generated. Ignore attribution data. + assert_eq!(fulfill_msg.channel_id, cs_updates.update_fulfill_htlcs[0].channel_id); + assert_eq!(fulfill_msg.htlc_id, cs_updates.update_fulfill_htlcs[0].htlc_id); + assert_eq!(fulfill_msg.payment_preimage, cs_updates.update_fulfill_htlcs[0].payment_preimage); } nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &fulfill_msg); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 7c331f809f1..9596a88677b 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -121,7 +121,7 @@ enum FeeUpdateState { enum InboundHTLCRemovalReason { FailRelay(msgs::OnionErrorPacket), FailMalformed(([u8; 32], u16)), - Fulfill(PaymentPreimage), + Fulfill(PaymentPreimage, Option), } /// Represents the resolution status of an inbound HTLC. @@ -220,7 +220,7 @@ impl From<&InboundHTLCState> for Option { Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail), InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(_)) => Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail), - InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) => + InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_, _)) => Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill), } } @@ -251,7 +251,7 @@ impl InboundHTLCState { fn preimage(&self) -> Option { match self { - InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(preimage)) => Some(*preimage), + InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(preimage, _)) => Some(*preimage), _ => None, } } @@ -439,6 +439,7 @@ enum HTLCUpdateAwaitingACK { }, ClaimHTLC { payment_preimage: PaymentPreimage, + attribution_data: AttributionData, htlc_id: u64, }, FailHTLC { @@ -5347,7 +5348,7 @@ impl FundedChannel where // (see equivalent if condition there). assert!(!self.context.channel_state.can_generate_new_commitment()); let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update - let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, None, logger); + let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, None, AttributionData::new(), logger); self.context.latest_monitor_update_id = mon_update_id; if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp { assert!(!msg); // The HTLC must have ended up in the holding cell. @@ -5356,7 +5357,7 @@ impl FundedChannel where fn get_update_fulfill_htlc( &mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, - payment_info: Option, logger: &L, + payment_info: Option, attribution_data: AttributionData, logger: &L, ) -> UpdateFulfillFetch where L::Target: Logger { // Either ChannelReady got set (which means it won't be unset) or there is no way any // caller thought we could have something claimed (cause we wouldn't have accepted in an @@ -5380,7 +5381,7 @@ impl FundedChannel where match htlc.state { InboundHTLCState::Committed => {}, InboundHTLCState::LocalRemoved(ref reason) => { - if let &InboundHTLCRemovalReason::Fulfill(_) = reason { + if let &InboundHTLCRemovalReason::Fulfill(_, _) = reason { } else { log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id()); debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); @@ -5446,6 +5447,7 @@ impl FundedChannel where log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32()); self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, + attribution_data, }); return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: false }; } @@ -5458,7 +5460,7 @@ impl FundedChannel where return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: false }; } log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id); - htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone())); + htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone(), Some(attribution_data))); } UpdateFulfillFetch::NewClaim { @@ -5470,10 +5472,10 @@ impl FundedChannel where pub fn get_update_fulfill_htlc_and_commit( &mut self, htlc_id: u64, payment_preimage: PaymentPreimage, - payment_info: Option, logger: &L, + payment_info: Option, attribution_data: AttributionData, logger: &L, ) -> UpdateFulfillCommitFetch where L::Target: Logger { let release_cs_monitor = self.context.blocked_monitor_updates.is_empty(); - match self.get_update_fulfill_htlc(htlc_id, payment_preimage, payment_info, logger) { + match self.get_update_fulfill_htlc(htlc_id, payment_preimage, payment_info, attribution_data, logger) { UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => { // Even if we aren't supposed to let new monitor updates with commitment state // updates run, we still need to push the preimage ChannelMonitorUpdateStep no @@ -5848,7 +5850,7 @@ impl FundedChannel where Err(ChannelError::close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned())) } - pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option), ChannelError> { + pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option, Option), ChannelError> { if self.context.channel_state.is_remote_stfu_sent() || self.context.channel_state.is_quiescent() { return Err(ChannelError::WarnAndDisconnect("Got fulfill HTLC message while quiescent".to_owned())); } @@ -5859,7 +5861,7 @@ impl FundedChannel where return Err(ChannelError::close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned())); } - self.mark_outbound_htlc_removed(msg.htlc_id, OutboundHTLCOutcome::Success(msg.payment_preimage)).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat)) + self.mark_outbound_htlc_removed(msg.htlc_id, OutboundHTLCOutcome::Success(msg.payment_preimage)).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat, htlc.send_timestamp)) } pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> { @@ -6188,7 +6190,7 @@ impl FundedChannel where } None }, - &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => { + &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, ref attribution_data } => { // If an HTLC claim was previously added to the holding cell (via // `get_update_fulfill_htlc`, then generating the claim message itself must // not fail - any in between attempts to claim the HTLC will have resulted @@ -6201,7 +6203,7 @@ impl FundedChannel where // We do not bother to track and include `payment_info` here, however. let mut additional_monitor_update = if let UpdateFulfillFetch::NewClaim { monitor_update, .. } = - self.get_update_fulfill_htlc(htlc_id, *payment_preimage, None, logger) + self.get_update_fulfill_htlc(htlc_id, *payment_preimage, None, attribution_data.clone(), logger) { monitor_update } else { unreachable!() }; update_fulfill_count += 1; monitor_update.updates.append(&mut additional_monitor_update.updates); @@ -6366,7 +6368,7 @@ impl FundedChannel where pending_inbound_htlcs.retain(|htlc| { if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state { log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash); - if let &InboundHTLCRemovalReason::Fulfill(_) = reason { + if let &InboundHTLCRemovalReason::Fulfill(_, _) = reason { value_to_self_msat_diff += htlc.amount_msat as i64; } *expecting_peer_commitment_signed = true; @@ -7139,11 +7141,12 @@ impl FundedChannel where failure_code: failure_code.clone(), }); }, - &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => { + &InboundHTLCRemovalReason::Fulfill(ref payment_preimage, ref attribution_data) => { update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC { channel_id: self.context.channel_id(), htlc_id: htlc.htlc_id, payment_preimage: payment_preimage.clone(), + attribution_data: attribution_data.clone(), }); }, } @@ -10642,7 +10645,7 @@ impl Writeable for FundedChannel where SP::Target: SignerProvider 1u8.write(writer)?; (hash, code).write(writer)?; }, - InboundHTLCRemovalReason::Fulfill(preimage) => { + InboundHTLCRemovalReason::Fulfill(preimage, _) => { // TODO: Persistence 2u8.write(writer)?; preimage.write(writer)?; }, @@ -10721,7 +10724,7 @@ impl Writeable for FundedChannel where SP::Target: SignerProvider holding_cell_skimmed_fees.push(skimmed_fee_msat); holding_cell_blinding_points.push(blinding_point); }, - &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => { + &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id, .. } => { 1u8.write(writer)?; payment_preimage.write(writer)?; htlc_id.write(writer)?; @@ -11003,7 +11006,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, &'c Channel attribution_data: None, }), 1 => InboundHTLCRemovalReason::FailMalformed(Readable::read(reader)?), - 2 => InboundHTLCRemovalReason::Fulfill(Readable::read(reader)?), + 2 => InboundHTLCRemovalReason::Fulfill(Readable::read(reader)?, None), // TODO: Persistence _ => return Err(DecodeError::InvalidValue), }; InboundHTLCState::LocalRemoved(reason) @@ -11076,6 +11079,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, &'c Channel 1 => HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: Readable::read(reader)?, htlc_id: Readable::read(reader)?, + attribution_data: AttributionData::new(), // TODO: Persistence }, 2 => HTLCUpdateAwaitingACK::FailHTLC { htlc_id: Readable::read(reader)?, @@ -11584,7 +11588,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, &'c Channel } } -fn duration_since_epoch() -> Option { +pub(crate) fn duration_since_epoch() -> Option { #[cfg(not(feature = "std"))] let now = None; @@ -12248,6 +12252,7 @@ mod tests { let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: PaymentPreimage([42; 32]), htlc_id: 0, + attribution_data: AttributionData::new(), }; let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42], attribution_data: Some(AttributionData::new()) } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 170d8261d5a..42e79848770 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -50,7 +50,7 @@ use crate::events::{self, Event, EventHandler, EventsProvider, InboundChannelFun use crate::ln::inbound_payment; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; -use crate::ln::channel::{self, Channel, ChannelError, ChannelUpdateStatus, FundedChannel, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, ReconnectionMsg, InboundV1Channel, WithChannelContext}; +use crate::ln::channel::{self, duration_since_epoch, Channel, ChannelError, ChannelUpdateStatus, FundedChannel, InboundV1Channel, OutboundV1Channel, ReconnectionMsg, ShutdownResult, UpdateFulfillCommitFetch, WithChannelContext}; use crate::ln::channel::PendingV2Channel; use crate::ln::channel_state::ChannelDetails; use crate::types::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; @@ -59,7 +59,7 @@ use crate::types::features::Bolt11InvoiceFeatures; use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, Payee, PaymentParameters, RouteParameters, RouteParametersConfig, Router, FixedRouter, Route}; use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, HopConnector, InboundHTLCErr, NextPacketDetails, invalid_payment_err_data}; use crate::ln::msgs; -use crate::ln::onion_utils::{self}; +use crate::ln::onion_utils::{self, process_onion_success}; use crate::ln::onion_utils::{HTLCFailReason, LocalHTLCFailureReason}; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, CommitmentUpdate, DecodeError, LightningError, MessageSendEvent}; #[cfg(test)] @@ -88,6 +88,8 @@ use crate::util::ser::{BigSize, FixedLengthReader, LengthReadable, Readable, Rea use crate::util::logger::{Level, Logger, WithContext}; use crate::util::errors::APIError; +use crate::ln::onion_utils::AttributionData; + #[cfg(async_payments)] use { crate::offers::offer::Amount, crate::offers::static_invoice::{DEFAULT_RELATIVE_EXPIRY as STATIC_INVOICE_DEFAULT_RELATIVE_EXPIRY, StaticInvoice, StaticInvoiceBuilder}, @@ -7239,8 +7241,14 @@ where pending_claim: PendingMPPClaimPointer(Arc::clone(pending_claim)), } }); + + let mut attribution_data = AttributionData::new(); + attribution_data.update(&[], &htlc.prev_hop.incoming_packet_shared_secret, 0); + attribution_data.crypt(&htlc.prev_hop.incoming_packet_shared_secret); + self.claim_funds_from_hop( htlc.prev_hop, payment_preimage, payment_info.clone(), + attribution_data, |_, definitely_duplicate| { debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment"); (Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim: this_mpp_claim }), raa_blocker) @@ -7269,7 +7277,7 @@ where ComplFunc: FnOnce(Option, bool) -> (Option, Option) >( &self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, - payment_info: Option, completion_action: ComplFunc, + payment_info: Option, attribution_data: AttributionData, completion_action: ComplFunc, ) { let counterparty_node_id = prev_hop.counterparty_node_id.or_else(|| { let short_to_chan_info = self.short_to_chan_info.read().unwrap(); @@ -7282,15 +7290,17 @@ where channel_id: prev_hop.channel_id, htlc_id: prev_hop.htlc_id, }; - self.claim_mpp_part(htlc_source, payment_preimage, payment_info, completion_action) + self.claim_mpp_part(htlc_source, payment_preimage, payment_info, attribution_data, completion_action) } fn claim_mpp_part< ComplFunc: FnOnce(Option, bool) -> (Option, Option) >( &self, prev_hop: HTLCClaimSource, payment_preimage: PaymentPreimage, - payment_info: Option, completion_action: ComplFunc, + payment_info: Option, attribution_data: AttributionData, completion_action: ComplFunc, ) { + log_info!(self.logger, "claim_mpp_part called"); + //TODO: Delay the claimed_funds relaying just like we do outbound relay! // If we haven't yet run background events assume we're still deserializing and shouldn't @@ -7322,7 +7332,7 @@ where if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(chan_id) { if let Some(chan) = chan_entry.get_mut().as_funded_mut() { let logger = WithChannelContext::from(&self.logger, &chan.context, None); - let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, &&logger); + let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, attribution_data, &&logger); match fulfill_res { UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => { @@ -7474,9 +7484,16 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ forwarded_htlc_value_msat: Option, skimmed_fee_msat: Option, from_onchain: bool, startup_replay: bool, next_channel_counterparty_node_id: PublicKey, next_channel_outpoint: OutPoint, next_channel_id: ChannelId, next_user_channel_id: Option, + attribution_data: Option<&AttributionData>, send_timestamp: Option, ) { + log_info!(self.logger, "claim_funds_internal - ONLY NON FINAL"); match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, bolt12_invoice, .. } => { + if let Some(attribution_data) = attribution_data { + let _ = process_onion_success(&self.secp_ctx, &self.logger, &path, + &session_priv, attribution_data.clone()); + } + debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire), "We don't support claim_htlc claims during startup - monitors may not be available yet"); debug_assert_eq!(next_channel_counterparty_node_id, path.hops[0].pubkey); @@ -7493,7 +7510,27 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let prev_user_channel_id = hop_data.user_channel_id; let prev_node_id = hop_data.counterparty_node_id; let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); - self.claim_funds_from_hop(hop_data, payment_preimage, None, + + let mut attribution_data = attribution_data + .map_or(AttributionData::new(), |attribution_data| { + let mut attribution_data = attribution_data.clone(); + + attribution_data.shift_right(); + + attribution_data + }); + + let now = duration_since_epoch(); + let hold_time = if let (Some(timestamp), Some(now)) = (send_timestamp, now) { + u32::try_from(now.saturating_sub(timestamp).as_millis()).unwrap_or(u32::MAX) + } else { + 0 + }; + + attribution_data.update(&[], &hop_data.incoming_packet_shared_secret, hold_time); + attribution_data.crypt(&hop_data.incoming_packet_shared_secret); + + self.claim_funds_from_hop(hop_data, payment_preimage, None, attribution_data, |htlc_claim_value_msat, definitely_duplicate| { let chan_to_release = Some(EventUnblockedChannel { counterparty_node_id: next_channel_counterparty_node_id, @@ -8896,7 +8933,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { let funding_txo; let next_user_channel_id; - let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = { + let (htlc_source, forwarded_htlc_value, skimmed_fee_msat, send_timestamp) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { @@ -8937,7 +8974,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }; self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), skimmed_fee_msat, false, false, *counterparty_node_id, - funding_txo, msg.channel_id, Some(next_user_channel_id), + funding_txo, msg.channel_id, Some(next_user_channel_id), msg.attribution_data.as_ref(), + send_timestamp, ); Ok(()) @@ -9638,6 +9676,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true, false, counterparty_node_id, funding_outpoint, channel_id, None, + None, None, ); } else { log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash); @@ -12154,6 +12193,7 @@ where } fn handle_update_fulfill_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFulfillHTLC) { + log_info!(self.logger, "Received update_fulfill_htlc: {:?}", msg); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let _ = handle_error!(self, self.internal_update_fulfill_htlc(&counterparty_node_id, msg), counterparty_node_id); } @@ -14905,7 +14945,7 @@ where // already (clearly) durably on disk in the `ChannelMonitor` so there's // no need to worry about getting it into others. channel_manager.claim_mpp_part( - part.into(), payment_preimage, None, + part.into(), payment_preimage, None, AttributionData::new(), |_, _| (Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim }), pending_claim_ptr) ); @@ -15011,7 +15051,7 @@ where // channel is closed we just assume that it probably came from an on-chain claim. channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None, downstream_closed, true, downstream_node_id, downstream_funding, - downstream_channel_id, None + downstream_channel_id, None, None, None, ); } diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index aee764682a2..45448cbd1b2 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -1785,6 +1785,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { channel_id: chan.2, htlc_id: 0, payment_preimage: our_payment_preimage, + attribution_data: None, }; nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_msg); diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 2e9c3b90957..fd0450ed3c8 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -735,6 +735,9 @@ pub struct UpdateFulfillHTLC { pub htlc_id: u64, /// The pre-image of the payment hash, allowing HTLC redemption pub payment_preimage: PaymentPreimage, + + /// Optional field for the attribution data that allows the sender to pinpoint the failing node under all conditions + pub attribution_data: Option, } /// A [`peer_storage`] message that can be sent to or received from a peer. @@ -3076,7 +3079,10 @@ impl_writeable_msg!(UpdateFulfillHTLC, { channel_id, htlc_id, payment_preimage -}, {}); +}, { + // Specified TLV key 1 plus 100 during testing phase. + (101, attribution_data, option) +}); impl_writeable_msg!(PeerStorage, { data }, {}); @@ -5552,6 +5558,7 @@ mod tests { channel_id: ChannelId::from_bytes([2; 32]), htlc_id: 2316138423780173, payment_preimage: PaymentPreimage([1; 32]), + attribution_data: None, }; let encoded_value = update_fulfill_htlc.encode(); let target_value = >::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d0101010101010101010101010101010101010101010101010101010101010101").unwrap(); diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 49a04703600..c7b80f5276c 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -1439,6 +1439,61 @@ where } } +/// Process failure we got back from upstream on a payment we sent (implying htlc_source is an +/// OutboundRoute). +pub(crate) fn process_onion_success( + secp_ctx: &Secp256k1, logger: &L, path: &Path, outer_session_priv: &SecretKey, + mut attribution_data: AttributionData, +) -> Vec +where + L::Target: Logger, +{ + let mut hold_times = Vec::new(); + + // Only consider hops in the regular path for attribution data. Failures in a blinded path are not attributable. + let regular_hops = construct_onion_keys_generic(secp_ctx, &path.hops, None, outer_session_priv) + .map(|(shared_secret, _, _, _, _)| shared_secret); + + // In the best case, paths can be up to 27 hops. But attribution data can only be conveyed back to the sender from + // the first 20 hops. Determine the number of hops to be used for attribution data. + let attributable_hop_count = usize::min(path.hops.len(), MAX_HOPS); + + for (route_hop_idx, shared_secret) in regular_hops.enumerate() { + attribution_data.crypt(shared_secret.as_ref()); + + // Only consider hops in the regular path for attribution data. Failures in a blinded path are not attributable. + if route_hop_idx >= attributable_hop_count { + break; + } + + // Calculate position relative to the last attributable hop. The last attributable hop is at position 0. + // The failure node does not need to come from the last attributable hop, but we need to look at the + // chain of HMACs that does include all data up to the last attributable hop. For a more nearby failure, + // the verified HMACs will include some zero padding data. Failures beyond the last attributable hop + // will not be attributable. + let position = attributable_hop_count - route_hop_idx - 1; + let hold_time = attribution_data.verify(&Vec::new(), shared_secret.as_ref(), position); + if let Some(hold_time) = hold_time { + hold_times.push(hold_time); + + log_debug!(logger, "Htlc hold time at pos {}: {} ms", route_hop_idx, hold_time); + + // Shift attribution data to prepare for processing the next hop. + attribution_data.shift_left(); + } else { + log_debug!( + logger, + "Invalid HMAC in attribution data for node at pos {}", + route_hop_idx + ); + + break; + } + } + + hold_times +} + const BADONION: u16 = 0x8000; const PERM: u16 = 0x4000; const NODE: u16 = 0x2000; @@ -2734,7 +2789,7 @@ impl AttributionData { /// Shifts hold times and HMACS to the right, taking into account HMAC pruning. Intermediate nodes do this to create /// space for prepending their own hold time and HMACs. - fn shift_right(&mut self) { + pub(crate) fn shift_right(&mut self) { // Shift hold times right. This will free up HOLD_TIME_LEN bytes at the beginning of the array. self.hold_times.copy_within(..(MAX_HOPS - 1) * HOLD_TIME_LEN, HOLD_TIME_LEN); @@ -2773,7 +2828,7 @@ impl AttributionData { &self.hold_times[idx * HOLD_TIME_LEN..(idx + 1) * HOLD_TIME_LEN] } - fn update(&mut self, message: &[u8], shared_secret: &[u8], hold_time: u32) { + pub(crate) fn update(&mut self, message: &[u8], shared_secret: &[u8], hold_time: u32) { let hold_time_bytes: [u8; 4] = hold_time.to_be_bytes(); self.hold_times[..HOLD_TIME_LEN].copy_from_slice(&hold_time_bytes); self.add_hmacs(shared_secret, message); @@ -3284,6 +3339,65 @@ mod tests { process_onion_failure(&ctx_full, &logger, &htlc_source, onion_error) } + #[test] + fn test_success_hold_times() { + fn assert_data(actual: &AttributionData, expected: &str) { + let (expected_hold_times, expected_hmacs) = + expected.split_at(MAX_HOPS * HOLD_TIME_LEN * 2); + + println!( + "{}{}", + actual.hold_times.to_lower_hex_string(), + actual.hmacs.to_lower_hex_string() + ); + + assert_eq!(actual.hold_times.to_lower_hex_string(), expected_hold_times); + assert_eq!(actual.hmacs.to_lower_hex_string(), expected_hmacs); + } + + const EXPECTED_MESSAGES: [&str; 5] = [ + "d77d0711b5f71d1d1be56bd88b3bb7ebc1792bb739ea7ebc1bc3b031b8bc2df3a50e25aeb99f47d7f7ab39e24187d3f4df9c4333463b053832ee9ac07274a5261b8b2a01fc09ce9ea7cd04d7b585dfb83299fb6570d71f793c1fcac0ef498766952c8c6840efa02a567d558a3cf6822b12476324b9b9efa03e5f8f26f81fa93daac46cbf00c98e69b6747cf69caaa2a71b025bd18830c4c54cd08f598cfde6197b3f2a951aba907c964c0f5d19a44e6d1d7279637321fa598adde927b3087d238f8b426ecde500d318617cdb7a56e6ce3520fc95be41a549973764e4dc483853ecc313947709f1b5199cb077d46e701fa633e11d3e13b03e9212c115ca6fa004b2f3dd912814693b705a561a06da54cdf603677a3abecdc22c7358c2de3cef771b366a568150aeecc86ad1990bb0f4e2865933b03ea0df87901bff467908273dc6cea31cbab0e2b8d398d10b001058c259ed221b7b55762f4c7e49c8c11a45a107b7a2c605c26dc5b0b10d719b1c844670102b2b6a36c43fe4753a78a483fc39166ae28420f112d50c10ee64ca69569a2f690712905236b7c2cb7ac8954f02922d2d918c56d42649261593c47b14b324a65038c3c5be8d3c403ce0c8f19299b1664bf077d7cf1636c4fb9685a8e58b7029fd0939fa07925a60bed339b23f973293598f595e75c8f9d455d7cebe4b5e23357c8bd47d66d6628b39427e37e0aecbabf46c11be6771f7136e108a143ae9bafba0fc47a51b6c7deef4cba54bae906398ee3162a41f2191ca386b628bde7e1dd63d1611aa01a95c456df337c763cb8c3a81a6013aa633739d8cd554c688102211725e6adad165adc1bcd429d020c51b4b25d2117e8bb27eb0cc7020f9070d4ad19ac31a76ebdf5f9246646aeadbfb9a3f1d75bd8237961e786302516a1a781780e8b73f58dc06f307e58bd0eb1d8f5c9111f01312974c1dc777a6a2d3834d8a2a40014e9818d0685cb3919f6b3b788ddc640b0ff9b1854d7098c7dd6f35196e902b26709640bc87935a3914869a807e8339281e9cedaaca99474c3e7bdd35050bb998ab4546f9900904e0e39135e861ff7862049269701081ebce32e4cca992c6967ff0fd239e38233eaf614af31e186635e9439ec5884d798f9174da6ff569d68ed5c092b78bd3f880f5e88a7a8ab36789e1b57b035fb6c32a6358f51f83e4e5f46220bcad072943df8bd9541a61b7dae8f30fa3dd5fb39b1fd9a0b8e802552b78d4ec306ecee15bfe6da14b29ba6d19ce5be4dd478bca74a52429cd5309d404655c3dec85c252", + "1571e10db7f8aa9f8e7e99caaf9c892e106c817df1d8e3b7b0e39d1c48f631e473e17e205489dd7b3c634cac3be0825cbf01418cd46e83c24b8d9c207742db9a0f0e5bcd888086498159f08080ba7bf3ea029c0b493227c4e75a90f70340d9e21f00979fc7e4fb2078477c1a457ba242ed54b313e590b13a2a13bfeed753dab133c78059f460075b2594b4c31c50f31076f8f1a0f7ad0530d0fadaf2d86e505ff9755940ec0665f9e5bc58cad6e523091f94d0bcd3c6c65ca1a5d401128dcc5e14f9108b32e660017c13de598bcf9d403710857cccb0fb9c2a81bfd66bc4552e1132afa3119203a4aaa1e8839c1dab8cbdcde7b527aca3f54bde651aa9f3f2178829cee3f1c0b9292758a40cc63bd998fcd0d3ed4bdcaf1023267b8f8e44130a63ad15f76145936552381eabb6d684c0a3af6ba8efcf207cebaea5b7acdbb63f8e7221102409d10c23f0514dc9f4d0efb2264161a193a999a23e992632710580a0d320f676d367b9190721194514457761af05207cdab2b6328b1b3767eacb36a7ef4f7bd2e16762d13df188e0898b7410f62459458712a44bf594ae662fd89eb300abb6952ff8ad40164f2bcd7f86db5c7650b654b79046de55d51aa8061ce35f867a3e8f5bf98ad920be827101c64fb871d86e53a4b3c0455bfac5784168218aa72cbee86d9c750a9fa63c363a8b43d7bf4b2762516706a306f0aa3be1ec788b5e13f8b24837e53ac414f211e11c7a093cd9653dfa5fba4e377c79adfa5e841e2ddb6afc054fc715c05ddc6c8fc3e1ee3406e1ffceb2df77dc2f02652614d1bfcfaddebaa53ba919c7051034e2c7b7cfaabdf89f26e7f8e3f956d205dfab747ad0cb505b85b54a68439621b25832cbc2898919d0cd7c0a64cfd235388982dd4dd68240cb668f57e1d2619a656ed326f8c92357ee0d9acead3c20008bc5f04ca8059b55d77861c6d04dfc57cfba57315075acbe1451c96cf28e1e328e142890248d18f53b5d3513ce574dea7156cf596fdb3d909095ec287651f9cf1bcdc791c5938a5dd9b47e84c004d24ab3ae74492c7e8dcc1da15f65324be2672947ec82074cac8ce2b925bc555facbbf1b55d63ea6fbea6a785c97d4caf2e1dad9551b7f66c31caae5ebc7c0047e892f201308fcf452c588be0e63d89152113d87bf0dbd01603b4cdc7f0b724b0714a9851887a01f709408882e18230fe810b9fafa58a666654576d8eba3005f07221f55a6193815a672e5db56204053bc4286fa3db38250396309fd28011b5708a26a2d76c4a333b69b6bfd272fb", + "34e34397b8621ec2f2b54dbe6c14073e267324cd60b152bce76aec8729a6ddefb61bc263be4b57bd592aae604a32bea69afe6ef4a6b573c26b17d69381ec1fc9b5aa769d148f2f1f8b5377a73840bb6dffc324ded0d1c00dc0c99e3dbc13273b2f89510af6410b525dd8836208abbbaae12753ae2276fa0ca49950374f94e187bf65cefcdd9dd9142074edc4bd0052d0eb027cb1ab6182497f9a10f9fe800b3228e3c088dab60081c807b30a67313667ca8c9e77b38b161a037cae8e973038d0fc4a97ea215914c6c4e23baf6ac4f0fb1e7fcc8aac3f6303658dae1f91588b535eb678e2200f45383c2590a55dc181a09f2209da72f79ae6745992c803310d39f960e8ecf327aed706e4b3e2704eeb9b304dc0e0685f5dcd0389ec377bdba37610ad556a0e957a413a56339dd3c40817214bced5802beee2ee545bdd713208751added5fc0eb2bc89a5aa2decb18ee37dac39f22a33b60cc1a369d24de9f3d2d8b63c039e248806de4e36a47c7a0aed30edd30c3d62debdf1ad82bf7aedd7edec413850d91c261e12beec7ad1586a9ad25b2db62c58ca17119d61dcc4f3e5c4520c42a8e384a45d8659b338b3a08f9e123a1d3781f5fc97564ccff2c1d97f06fa0150cfa1e20eacabefb0c339ec109336d207cc63d9170752fc58314c43e6d4a528fd0975afa85f3aa186ff1b6b8cb12c97ed4ace295b0ef5f075f0217665b8bb180246b87982d10f43c9866b22878106f5214e99188781180478b07764a5e12876ddcb709e0a0a8dd42cf004c695c6fc1669a6fd0e4a1ca54b024d0d80eac492a9e5036501f36fb25b72a054189294955830e43c18e55668337c8c6733abb09fc2d4ade18d5a853a2b82f7b4d77151a64985004f1d9218f2945b63c56fdebd1e96a2a7e49fa70acb4c39873947b83c191c10e9a8f40f60f3ad5a2be47145c22ea59ed3f5f4e61cb069e875fb67142d281d784bf925cc286eacc2c43e94d08da4924b83e58dbf2e43fa625bdd620eba6d9ce960ff17d14ed1f2dbee7d08eceb540fdc75ff06dabc767267658fad8ce99e2a3236e46d2deedcb51c3c6f81589357edebac9772a70b3d910d83cd1b9ce6534a011e9fa557b891a23b5d88afcc0d9856c6dabeab25eea55e9a248182229e4927f268fe5431672fcce52f434ca3d27d1a2136bae5770bb36920df12fbc01d0e8165610efa04794f414c1417f1d4059435c5385bfe2de83ce0e238d6fd2dbd3c0487c69843298577bfa480fe2a16ab2a0e4bc712cd8b5a14871cda61c993b6835303d9043d7689a", + "74a4ea61339463642a2182758871b2ea724f31f531aa98d80f1c3043febca41d5ee52e8b1e127e61719a0d078db8909748d57839e58424b91f063c4fbc8a221bef261140e66a9b596ca6d420a973ad5431adfa8280a7355462fe50d4cac15cdfbd7a535c4b72a0b6d7d8a64cff3f719ff9b8be28036826342dc3bf3781efc70063d1e6fc79dff86334ae0564a5ab87bd61f8446465ef6713f8c4ef9d0200ebb375f90ee115216b469af42de554622df222858d30d733af1c9223e327ae09d9126be8baee6dd59a112d83a57cc6e0252104c11bc11705d384220eedd72f1a29a0597d97967e28b2ad13ba28b3d8a53c3613c1bb49fe9700739969ef1f795034ef9e2e983af2d3bbd6c637fb12f2f7dfc3aee85e08711e9b604106e95d7a4974e5b047674a6015792dae5d913681d84f71edd415910582e5d86590df2ecfd561dc6e1cdb08d3e10901312326a45fb0498a177319389809c6ba07a76cfad621e07b9af097730e94df92fbd311b2cb5da32c80ab5f14971b6d40f8e2ab202ac98bd8439790764a40bf309ea2205c1632610956495720030a25dc7118e0c868fdfa78c3e9ecce58215579a0581b3bafdb7dbbe53be9e904567fdc0ce1236aab5d22f1ebc18997e3ea83d362d891e04c5785fd5238326f767bce499209f8db211a50e1402160486e98e7235cf397dbb9ae19fd9b79ef589c821c6f99f28be33452405a003b33f4540fe0a41dfcc286f4d7cc10b70552ba7850869abadcd4bb7f256823face853633d6e2a999ac9fcd259c71d08e266db5d744e1909a62c0db673745ad9585949d108ab96640d2bc27fb4acac7fa8b170a30055a5ede90e004df9a44bdc29aeb4a6bec1e85dde1de6aaf01c6a5d12405d0bec22f49026cb23264f8c04b8401d3c2ab6f2e109948b6193b3bec27adfe19fb8afb8a92364d6fc5b219e8737d583e7ff3a4bcb75d53edda3bf3f52896ac36d8a877ad9f296ea6c045603fc62ac4ae41272bde85ef7c3b3fd3538aacfd5b025fefbe277c2906821ecb20e6f75ea479fa3280f9100fb0089203455c56b6bc775e5c2f0f58c63edd63fa3eec0b40da4b276d0d41da2ec0ead865a98d12bc694e23d8eaadd2b4d0ee88e9570c88fb878930f492e036d27998d593e47763927ff7eb80b188864a3846dd2238f7f95f4090ed399ae95deaeb37abca1cf37c397cc12189affb42dca46b4ff6988eb8c060691d155302d448f50ff70a794d97c0408f8cee9385d6a71fa412e36edcb22dbf433db9db4779f27b682ee17fc05e70c8e794b9f7f6d1", + "84986c936d26bfd3bb2d34d3ec62cfdb63e0032fdb3d9d75f3e5d456f73dffa7e35aab1db4f1bd3b98ff585caf004f656c51037a3f4e810d275f3f6aea0c8e3a125ebee5f374b6440bcb9bb2955ebf70c06d64090f9f6cf098200305f7f4305ba9e1350a0c3f7dab4ccf35b8399b9650d8e363bf83d3a0a09706433f0adae6562eb338b21ea6f21329b3775905e59187c325c9cbf589f5da5e915d9e5ad1d21aa1431f9bdc587185ed8b5d4928e697e67cc96bee6d5354e3764cede3f385588fa665310356b2b1e68f8bd30c75d395405614a40a587031ebd6ace60dfb7c6dd188b572bd8e3e9a47b06c2187b528c5ed35c32da5130a21cd881138a5fcac806858ce6c596d810a7492eb261bcc91cead1dae75075b950c2e81cecf7e5fdb2b51df005d285803201ce914dfbf3218383829a0caa8f15486dd801133f1ed7edec436730b0ec98f48732547927229ac80269fcdc5e4f4db264274e940178732b429f9f0e582c559f994a7cdfb76c93ffc39de91ff936316726cc561a6520d47b2cd487299a96322dadc463ef06127fc63902ff9cc4f265e2fbd9de3fa5e48b7b51aa0850580ef9f3b5ebb60c6c3216c5a75a93e82936113d9cad57ae4a94dd6481954a9bd1b5cff4ab29ca221fa2bf9b28a362c9661206f896fc7cec563fb80aa5eaccb26c09fa4ef7a981e63028a9c4dac12f82ccb5bea090d56bbb1a4c431e315d9a169299224a8dbd099fb67ea61dfc604edf8a18ee742550b636836bb552dabb28820221bf8546331f32b0c143c1c89310c4fa2e1e0e895ce1a1eb0f43278fdb528131a3e32bfffe0c6de9006418f5309cba773ca38b6ad8507cc59445ccc0257506ebc16a4c01d4cd97e03fcf7a2049fea0db28447858f73b8e9fe98b391b136c9dc510288630a1f0af93b26a8891b857bfe4b818af99a1e011e6dbaa53982d29cf74ae7dffef45545279f19931708ed3eede5e82280eab908e8eb80abff3f1f023ab66869297b40da8496861dc455ac3abe1efa8a6f9e2c4eda48025d43a486a3f26f269743eaa30d6f0e1f48db6287751358a41f5b07aee0f098862e3493731fe2697acce734f004907c6f11eef189424fee52cd30ad708707eaf2e441f52bcf3d0c5440c1742458653c0c8a27b5ade784d9e09c8b47f1671901a29360e7e5e94946b9c75752a1a8d599d2a3e14ac81b84d42115cd688c8383a64fc6e7e1dc5568bb4837358ebe63207a4067af66b2027ad2ce8fb7ae3a452d40723a51fdf9f9c9913e8029a222cf81d12ad41e58860d75deb6de30ad", + ]; + + let onion_keys = build_test_onion_keys(); + + let mut attribution_data = AttributionData::new(); + attribution_data.update(&[], onion_keys[4].shared_secret.as_ref(), 1); + + let logger: Arc = Arc::new(TestLogger::new()); + + attribution_data.crypt(onion_keys[4].shared_secret.as_ref()); + + assert_data(&attribution_data, EXPECTED_MESSAGES[0]); + + for idx in (0..4).rev() { + let shared_secret = onion_keys[idx].shared_secret.as_ref(); + let hold_time = (5 - idx) as u32; + + attribution_data.shift_right(); + attribution_data.update(&[], shared_secret, hold_time); + attribution_data.crypt(shared_secret); + + assert_data(&attribution_data, EXPECTED_MESSAGES[4 - idx]); + } + + let ctx_full = Secp256k1::new(); + let path = build_test_path(); + let hold_times = process_onion_success( + &ctx_full, + &logger, + &path, + &get_test_session_key(), + attribution_data.clone(), + ); + + assert_eq!(hold_times, [5, 4, 3, 2, 1]) + } + fn build_trampoline_test_path() -> Path { Path { hops: vec![ diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index c7dd388eb32..f18a31be716 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -46,7 +46,7 @@ use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::util::errors::APIError; use crate::util::ser::Writeable; use crate::util::string::UntrustedString; -use crate::util::{test_utils}; +use crate::util::test_utils; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash;