From 1db55153f787227447d7f64b2c02fe8e8c0ea3b8 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 1 May 2025 01:11:28 +0000 Subject: [PATCH 01/25] Use variables for node IDs in `payment_tests` --- lightning/src/ln/payment_tests.rs | 797 ++++++++++++++++++------------ 1 file changed, 479 insertions(+), 318 deletions(-) diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index a2cc6e2774a..62779e9b156 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -58,6 +58,9 @@ fn mpp_failure() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id; let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id; @@ -66,10 +69,10 @@ fn mpp_failure() { let (mut route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); let path = route.paths[0].clone(); route.paths.push(path); - route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0].hops[0].pubkey = node_b_id; route.paths[0].hops[0].short_channel_id = chan_1_id; route.paths[0].hops[1].short_channel_id = chan_3_id; - route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[1].hops[0].pubkey = node_c_id; route.paths[1].hops[0].short_channel_id = chan_2_id; route.paths[1].hops[1].short_channel_id = chan_4_id; send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret); @@ -83,25 +86,31 @@ fn mpp_retry() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1); let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2); let (chan_3_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 1, 3); let (chan_4_update, _, chan_4_id, _) = create_announced_chan_between_nodes(&nodes, 3, 2); + // Rebalance send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000); let amt_msat = 1_000_000; let max_total_routing_fee_msat = 50_000; - let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap(); let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!( nodes[0], nodes[3], payment_params, amt_msat, Some(max_total_routing_fee_msat)); let path = route.paths[0].clone(); route.paths.push(path); - route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0].hops[0].pubkey = node_b_id; route.paths[0].hops[0].short_channel_id = chan_1_update.contents.short_channel_id; route.paths[0].hops[1].short_channel_id = chan_3_update.contents.short_channel_id; - route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[1].hops[0].pubkey = node_c_id; route.paths[1].hops[0].short_channel_id = chan_2_update.contents.short_channel_id; route.paths[1].hops[1].short_channel_id = chan_4_update.contents.short_channel_id; @@ -117,25 +126,25 @@ fn mpp_retry() { assert_eq!(events.len(), 2); // Pass half of the payment along the success path. - let success_path_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); + let success_path_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 2_000_000, payment_hash, Some(payment_secret), success_path_msgs, false, None); // Add the HTLC along the first hop. - let fail_path_msgs_1 = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); + let fail_path_msgs_1 = remove_first_msg_event_to_node(&node_c_id, &mut events); let send_event = SendEvent::from_event(fail_path_msgs_1); - nodes[2].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[0], &send_event.commitment_msg, false); // Attempt to forward the payment and complete the 2nd path's failure. expect_pending_htlcs_forwardable!(&nodes[2]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id }]); - let htlc_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_id }]); + let htlc_updates = get_htlc_update_msgs!(nodes[2], node_a_id); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); assert!(htlc_updates.update_fulfill_htlcs.is_empty()); assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); check_added_monitors!(nodes[2], 1); - nodes[0].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_c_id, &htlc_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], htlc_updates.commitment_signed, false); let mut events = nodes[0].node.get_and_clear_pending_events(); match events[1] { @@ -185,6 +194,11 @@ fn mpp_retry_overpay() { &[Some(user_config.clone()), Some(limited_config_1), Some(limited_config_2), Some(user_config)]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + let (chan_1_update, _, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 40_000, 0); let (chan_2_update, _, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 40_000, 0); let (_chan_3_update, _, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 40_000, 0); @@ -193,7 +207,7 @@ fn mpp_retry_overpay() { let amt_msat = 70_000_000; let max_total_routing_fee_msat = Some(1_000_000); - let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap(); let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!( nodes[0], nodes[3], payment_params, amt_msat, max_total_routing_fee_msat); @@ -220,30 +234,30 @@ fn mpp_retry_overpay() { assert_eq!(events.len(), 2); // Pass half of the payment along the success path. - let success_path_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); + let success_path_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], amt_msat, payment_hash, Some(payment_secret), success_path_msgs, false, None); // Add the HTLC along the first hop. - let fail_path_msgs_1 = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); + let fail_path_msgs_1 = remove_first_msg_event_to_node(&node_c_id, &mut events); let send_event = SendEvent::from_event(fail_path_msgs_1); - nodes[2].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[0], &send_event.commitment_msg, false); // Attempt to forward the payment and complete the 2nd path's failure. expect_pending_htlcs_forwardable!(&nodes[2]); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCHandlingFailureType::Forward { - node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id + node_id: Some(node_d_id), channel_id: chan_4_id }] ); - let htlc_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); + let htlc_updates = get_htlc_update_msgs!(nodes[2], node_a_id); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); assert!(htlc_updates.update_fulfill_htlcs.is_empty()); assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); check_added_monitors!(nodes[2], 1); - nodes[0].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), + nodes[0].node.handle_update_fail_htlc(node_c_id, &htlc_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], htlc_updates.commitment_signed, false); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -296,6 +310,11 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1); let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2); let (chan_3_update, _, chan_3_id, _) = create_announced_chan_between_nodes(&nodes, 1, 3); @@ -304,10 +323,10 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 100_000); let path = route.paths[0].clone(); route.paths.push(path); - route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0].hops[0].pubkey = node_b_id; route.paths[0].hops[0].short_channel_id = chan_1_update.contents.short_channel_id; route.paths[0].hops[1].short_channel_id = chan_3_update.contents.short_channel_id; - route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[1].hops[0].pubkey = node_c_id; route.paths[1].hops[0].short_channel_id = chan_2_update.contents.short_channel_id; route.paths[1].hops[1].short_channel_id = chan_4_update.contents.short_channel_id; @@ -319,7 +338,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { assert_eq!(events.len(), 2); // Pass half of the payment along the first path. - let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); + let node_1_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 200_000, payment_hash, Some(payment_secret), node_1_msgs, false, None); if send_partial_mpp { @@ -330,24 +349,24 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { // Failed HTLC from node 3 -> 1 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCHandlingFailureType::Receive { payment_hash }]); - let htlc_fail_updates_3_1 = get_htlc_update_msgs!(nodes[3], nodes[1].node.get_our_node_id()); + let htlc_fail_updates_3_1 = get_htlc_update_msgs!(nodes[3], node_b_id); assert_eq!(htlc_fail_updates_3_1.update_fail_htlcs.len(), 1); - nodes[1].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &htlc_fail_updates_3_1.update_fail_htlcs[0]); + nodes[1].node.handle_update_fail_htlc(node_d_id, &htlc_fail_updates_3_1.update_fail_htlcs[0]); check_added_monitors!(nodes[3], 1); commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates_3_1.commitment_signed, false); // Failed HTLC from node 1 -> 0 - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_3_id }]); - let htlc_fail_updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_3_id }]); + let htlc_fail_updates_1_0 = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(htlc_fail_updates_1_0.update_fail_htlcs.len(), 1); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_fail_updates_1_0.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates_1_0.update_fail_htlcs[0]); check_added_monitors!(nodes[1], 1); commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates_1_0.commitment_signed, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(LocalHTLCFailureReason::MPPTimeout, &[][..])); } else { // Pass half of the payment along the second path. - let node_2_msgs = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); + let node_2_msgs = remove_first_msg_event_to_node(&node_c_id, &mut events); pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash, Some(payment_secret), node_2_msgs, true, None); // Even after MPP_TIMEOUT_TICKS we should not timeout the MPP if we have all the parts @@ -379,12 +398,15 @@ fn do_test_keysend_payments(public_node: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + if public_node { create_announced_chan_between_nodes(&nodes, 0, 1); } else { create_chan_between_nodes(&nodes[0], &nodes[1]); } - let payee_pubkey = nodes[1].node.get_our_node_id(); + let payee_pubkey = node_b_id; let route_params = RouteParameters::from_payment_params_and_value( PaymentParameters::for_keysend(payee_pubkey, 40, false), 10000); @@ -397,7 +419,7 @@ fn do_test_keysend_payments(public_node: bool) { } check_added_monitors!(nodes[0], 1); let send_event = SendEvent::from_node(&nodes[0]); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &send_event.commitment_msg, false, false); expect_pending_htlcs_forwardable!(nodes[1]); // Previously, a refactor caused us to stop including the payment preimage in the onion which @@ -418,12 +440,16 @@ fn test_mpp_keysend() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes(&nodes, 0, 2); create_announced_chan_between_nodes(&nodes, 1, 3); create_announced_chan_between_nodes(&nodes, 2, 3); - let payee_pubkey = nodes[3].node.get_our_node_id(); + let payee_pubkey = node_d_id; let recv_value = 15_000_000; let route_params = RouteParameters::from_payment_params_and_value( PaymentParameters::for_keysend(payee_pubkey, 40, true), recv_value); @@ -440,11 +466,11 @@ fn test_mpp_keysend() { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); - let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); + let ev = remove_first_msg_event_to_node(&node_b_id, &mut events); pass_along_path(&nodes[0], expected_route[0], recv_value, payment_hash.clone(), Some(payment_secret), ev.clone(), false, Some(payment_preimage)); - let ev = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); + let ev = remove_first_msg_event_to_node(&node_c_id, &mut events); pass_along_path(&nodes[0], expected_route[1], recv_value, payment_hash.clone(), Some(payment_secret), ev.clone(), true, Some(payment_preimage)); claim_payment_along_route( @@ -462,6 +488,12 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id; let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id; @@ -471,7 +503,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let (mut route, payment_hash, payment_preimage, _) = get_route_and_payment_hash!(nodes[0], nodes[3], amount); // Pay along nodes[1] - route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0].hops[0].pubkey = node_b_id; route.paths[0].hops[0].short_channel_id = chan_1_id; route.paths[0].hops[1].short_channel_id = chan_3_id; @@ -483,16 +515,16 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { ).unwrap(); check_added_monitors!(nodes[0], 1); - let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let update_0 = get_htlc_update_msgs!(nodes[0], node_b_id); let update_add_0 = update_0.update_add_htlcs[0].clone(); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add_0); + nodes[1].node.handle_update_add_htlc(node_a_id, &update_add_0); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(&nodes[1], 1); - let update_1 = get_htlc_update_msgs!(nodes[1], nodes[3].node.get_our_node_id()); + let update_1 = get_htlc_update_msgs!(nodes[1], node_d_id); let update_add_1 = update_1.update_add_htlcs[0].clone(); - nodes[3].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add_1); + nodes[3].node.handle_update_add_htlc(node_b_id, &update_add_1); commitment_signed_dance!(nodes[3], nodes[1], update_1.commitment_signed, false, true); expect_pending_htlcs_forwardable_ignore!(nodes[3]); nodes[3].node.process_pending_update_add_htlcs(); @@ -519,7 +551,7 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { nodes[3].node.process_pending_htlc_forwards(); // Pay along nodes[2] - route.paths[0].hops[0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[0].hops[0].pubkey = node_c_id; route.paths[0].hops[0].short_channel_id = chan_2_id; route.paths[0].hops[1].short_channel_id = chan_4_id; @@ -531,16 +563,16 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { ).unwrap(); check_added_monitors!(nodes[0], 1); - let update_2 = get_htlc_update_msgs!(nodes[0], nodes[2].node.get_our_node_id()); + let update_2 = get_htlc_update_msgs!(nodes[0], node_c_id); let update_add_2 = update_2.update_add_htlcs[0].clone(); - nodes[2].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add_2); + nodes[2].node.handle_update_add_htlc(node_a_id, &update_add_2); commitment_signed_dance!(nodes[2], nodes[0], &update_2.commitment_signed, false, true); expect_pending_htlcs_forwardable!(nodes[2]); check_added_monitors!(&nodes[2], 1); - let update_3 = get_htlc_update_msgs!(nodes[2], nodes[3].node.get_our_node_id()); + let update_3 = get_htlc_update_msgs!(nodes[2], node_d_id); let update_add_3 = update_3.update_add_htlcs[0].clone(); - nodes[3].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &update_add_3); + nodes[3].node.handle_update_add_htlc(node_c_id, &update_add_3); commitment_signed_dance!(nodes[3], nodes[2], update_3.commitment_signed, false, true); expect_pending_htlcs_forwardable_ignore!(nodes[3]); nodes[3].node.process_pending_update_add_htlcs(); @@ -569,14 +601,14 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { check_added_monitors!(nodes[3], 1); // Fail back along nodes[2] - let update_fail_0 = get_htlc_update_msgs!(&nodes[3], &nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &update_fail_0.update_fail_htlcs[0]); + let update_fail_0 = get_htlc_update_msgs!(&nodes[3], &node_c_id); + nodes[2].node.handle_update_fail_htlc(node_d_id, &update_fail_0.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], update_fail_0.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_channel_id }]); check_added_monitors!(nodes[2], 1); - let update_fail_1 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &update_fail_1.update_fail_htlcs[0]); + let update_fail_1 = get_htlc_update_msgs!(nodes[2], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_c_id, &update_fail_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], update_fail_1.commitment_signed, false); expect_payment_failed_conditions(&nodes[0], payment_hash, true, PaymentFailedConditions::new()); @@ -596,12 +628,15 @@ fn no_pending_leak_on_initial_send_failure() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) @@ -631,6 +666,10 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let nodes_0_deserialized; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); let (_, _, chan_id_2, _) = create_announced_chan_between_nodes(&nodes, 1, 2); @@ -650,24 +689,24 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + assert_eq!(payment_event.node_id, node_b_id); // We relay the payment to nodes[1] while its disconnected from nodes[2], causing the payment // to be returned immediately to nodes[0], without having nodes[2] fail the inbound payment // which would prevent retry. - nodes[1].node.peer_disconnected(nodes[2].node.get_our_node_id()); - nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(node_c_id); + nodes[2].node.peer_disconnected(node_b_id); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2}] + &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2}] ); check_added_monitors(&nodes[1], 1); // nodes[1] now immediately fails the HTLC as the next-hop channel is disconnected - let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let _ = get_htlc_update_msgs!(nodes[1], node_a_id); reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2])); @@ -684,7 +723,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and // force-close the channel. - check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager, [node_b_id], 100000); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[0].node.has_pending_payments()); nodes[0].node.timer_tick_occurred(); @@ -697,27 +736,27 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { } check_added_monitors!(nodes[0], 1); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { + nodes[1].node.peer_disconnected(node_a_id); + nodes[0].node.peer_connected(node_b_id, &msgs::Init { features: nodes[1].node.init_features(), networks: None, remote_network_address: None }, true).unwrap(); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an // error, as the channel has hit the chain. - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { + nodes[1].node.peer_connected(node_a_id, &msgs::Init { features: nodes[0].node.init_features(), networks: None, remote_network_address: None }, false).unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_reestablish); + nodes[0].node.handle_channel_reestablish(node_b_id, &bs_reestablish); let as_err = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(as_err.len(), 2); match as_err[1] { MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => { - assert_eq!(node_id, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_error(nodes[0].node.get_our_node_id(), msg); + assert_eq!(node_id, node_b_id); + nodes[1].node.handle_error(node_a_id, msg); check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", - &nodes[1].node.get_our_node_id())) }, [nodes[0].node.get_our_node_id()], 100000); + &node_b_id)) }, [node_a_id], 100000); check_added_monitors!(nodes[1], 1); assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); }, @@ -731,8 +770,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000); - let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]); + let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], node_b_id); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &htlc_fulfill_updates.update_fulfill_htlcs[0]); check_added_monitors!(nodes[1], 1); commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, false); @@ -791,7 +830,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // do_claim_payment_along_route expects us to never overpay. { let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); - let mut peer_state = per_peer_state.get(&nodes[2].node.get_our_node_id()) + let mut peer_state = per_peer_state.get(&node_c_id) .unwrap().lock().unwrap(); let mut channel = peer_state.channel_by_id.get_mut(&chan_id_2).unwrap(); let mut new_config = channel.context().config(); @@ -851,6 +890,10 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Because we set nodes[1] to manually accept channels, just open a 0-conf channel. let (funding_tx, chan_id) = open_zero_conf_channel(&nodes[0], &nodes[1], None); confirm_transaction(&nodes[0], &funding_tx); @@ -871,38 +914,38 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized], first_persister, first_new_chain_monitor, first_nodes_0_deserialized); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[1].node.peer_disconnected(node_a_id); // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and // force-close the channel. - check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager, [node_b_id], 100000); nodes[0].node.timer_tick_occurred(); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[0].node.has_pending_payments()); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); check_added_monitors!(nodes[0], 1); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { + nodes[0].node.peer_connected(node_b_id, &msgs::Init { features: nodes[1].node.init_features(), networks: None, remote_network_address: None }, true).unwrap(); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an // error, as the channel has hit the chain. - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { + nodes[1].node.peer_connected(node_a_id, &msgs::Init { features: nodes[0].node.init_features(), networks: None, remote_network_address: None }, false).unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_reestablish); + nodes[0].node.handle_channel_reestablish(node_b_id, &bs_reestablish); let as_err = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(as_err.len(), 2); let bs_commitment_tx; match as_err[1] { MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => { - assert_eq!(node_id, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_error(nodes[0].node.get_our_node_id(), msg); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &nodes[1].node.get_our_node_id())) } - , [nodes[0].node.get_our_node_id()], 100000); + assert_eq!(node_id, node_b_id); + nodes[1].node.handle_error(node_a_id, msg); + check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &node_b_id)) } + , [node_a_id], 100000); check_added_monitors!(nodes[1], 1); bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); }, @@ -917,11 +960,11 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors!(nodes[2], 1); - let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fail_htlcs[0]); + let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], node_b_id); + nodes[1].node.handle_update_fail_htlc(node_c_id, &htlc_fulfill_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - [HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }]); // Connect the HTLC-Timeout transaction, timing out the HTLC on both nodes (but not confirming // the HTLC-Timeout transaction beyond 1 conf). For dust HTLCs, the HTLC is considered resolved @@ -978,7 +1021,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { assert!(!nodes[0].node.get_and_clear_pending_msg_events().is_empty()); reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], second_persister, second_new_chain_monitor, second_nodes_0_deserialized); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[1].node.peer_disconnected(node_a_id); nodes[0].node.test_process_background_events(); @@ -1007,7 +1050,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // Check that after reload we can send the payment again (though we shouldn't, since it was // claimed previously). reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], third_persister, third_new_chain_monitor, third_nodes_0_deserialized); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[1].node.peer_disconnected(node_a_id); nodes[0].node.test_process_background_events(); @@ -1043,19 +1086,22 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bo let nodes_0_deserialized; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); let error_message = "Channel force-closed"; // Route a payment, but force-close the channel before the HTLC fulfill message arrives at // nodes[0]. let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000); - nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &node_b_id, error_message.to_string()).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [node_b_id], 100000); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); // Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); @@ -1074,7 +1120,7 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bo mine_transaction(&nodes[1], &commitment_tx); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let htlc_success_tx = { let mut txn = nodes[1].tx_broadcaster.txn_broadcast(); assert_eq!(txn.len(), 1); @@ -1176,6 +1222,9 @@ fn test_fulfill_restart_failure() { let nodes_1_deserialized; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000); @@ -1188,21 +1237,21 @@ fn test_fulfill_restart_failure() { check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 100_000); - let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]); + let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &htlc_fulfill_updates.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); // Now reload nodes[1]... reload_node!(nodes[1], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); nodes[1].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors!(nodes[1], 1); - let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_fail_updates.update_fail_htlcs[0]); + let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates.commitment_signed, false); // nodes[0] shouldn't generate any events here, while it just got a payment failure completion // it had already considered the payment fulfilled, and now they just got free money. @@ -1216,19 +1265,23 @@ fn get_ldk_payment_preimage() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let amt_msat = 60_000; let expiry_secs = 60 * 60; let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(amt_msat), expiry_secs, None).unwrap(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); let scorer = test_utils::TestScorer::new(); let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet); let random_seed_bytes = keys_manager.get_secure_random_bytes(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); - let route = get_route( &nodes[0].node.get_our_node_id(), &route_params, + let route = get_route( &node_a_id, &route_params, &nodes[0].network_graph.read_only(), Some(&nodes[0].node.list_usable_channels().iter().collect::>()), nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap(); @@ -1253,6 +1306,8 @@ fn sent_probe_is_probe_of_sending_node() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes(&nodes, 1, 2); @@ -1273,7 +1328,7 @@ fn sent_probe_is_probe_of_sending_node() { _ => panic!(), } - get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + get_htlc_update_msgs!(nodes[0], node_b_id); check_added_monitors!(nodes[0], 1); } @@ -1308,10 +1363,14 @@ fn failed_probe_yields_event() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 90000000); - let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), 42); + let payment_params = PaymentParameters::from_node_id(node_c_id, 42); let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], payment_params, 9_998_000); @@ -1319,19 +1378,19 @@ fn failed_probe_yields_event() { // node[0] -- update_add_htlcs -> node[1] check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), channel_id, updates); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &probe_event.msgs[0]); + let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let probe_event = SendEvent::from_commitment_update(node_b_id, channel_id, updates); + nodes[1].node.handle_update_add_htlc(node_a_id, &probe_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); // node[0] <- update_fail_htlcs -- node[1] check_added_monitors!(nodes[1], 1); - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[1], node_a_id); // Skip the PendingHTLCsForwardable event let _events = nodes[1].node.get_and_clear_pending_events(); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); check_added_monitors!(nodes[0], 0); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); @@ -1356,10 +1415,14 @@ fn onchain_failed_probe_yields_event() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; create_announced_chan_between_nodes(&nodes, 1, 2); - let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), 42); + let payment_params = PaymentParameters::from_node_id(node_c_id, 42); // Send a dust HTLC, which will be treated as if it timed out once the channel hits the chain. let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], payment_params, 1_000); @@ -1367,15 +1430,15 @@ fn onchain_failed_probe_yields_event() { // node[0] -- update_add_htlcs -> node[1] check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - let probe_event = SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), chan_id, updates); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &probe_event.msgs[0]); + let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + let probe_event = SendEvent::from_commitment_update(node_b_id, chan_id, updates); + nodes[1].node.handle_update_add_htlc(node_a_id, &probe_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], probe_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); - let _ = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + let _ = get_htlc_update_msgs!(nodes[1], node_c_id); // Don't bother forwarding the HTLC onwards and just confirm the force-close transaction on // Node A, which after 6 confirmations should result in a probe failure event. @@ -1415,6 +1478,8 @@ fn preflight_probes_yield_event_skip_private_hop() { let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &user_configs); let nodes = create_network(5, &node_cfgs, &node_chanmgrs); + let node_d_id = nodes[3].node.get_our_node_id(); + // Setup channel topology: // N0 -(1M:0)- N1 -(1M:0)- N2 -(70k:0)- N3 -(50k:0)- N4 @@ -1426,7 +1491,7 @@ fn preflight_probes_yield_event_skip_private_hop() { let mut invoice_features = Bolt11InvoiceFeatures::empty(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) .with_bolt11_features(invoice_features).unwrap(); let recv_value = 50_000_000; @@ -1457,6 +1522,8 @@ fn preflight_probes_yield_event() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &user_configs); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_d_id = nodes[3].node.get_our_node_id(); + // Setup channel topology: // (1M:0)- N1 -(30k:0) // / \ @@ -1472,7 +1539,7 @@ fn preflight_probes_yield_event() { let mut invoice_features = Bolt11InvoiceFeatures::empty(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) .with_bolt11_features(invoice_features).unwrap(); let recv_value = 50_000_000; @@ -1503,6 +1570,8 @@ fn preflight_probes_yield_event_and_skip() { let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &user_configs); let nodes = create_network(5, &node_cfgs, &node_chanmgrs); + let node_e_id = nodes[4].node.get_our_node_id(); + // Setup channel topology: // (30k:0)- N2 -(1M:0) // / \ @@ -1519,7 +1588,7 @@ fn preflight_probes_yield_event_and_skip() { let mut invoice_features = Bolt11InvoiceFeatures::empty(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[4].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_e_id, TEST_FINAL_CLTV) .with_bolt11_features(invoice_features).unwrap(); let recv_value = 80_000_000; @@ -1695,6 +1764,10 @@ fn test_trivial_inflight_htlc_tracking(){ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let (_, _, chan_1_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); let (_, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2); @@ -1707,8 +1780,8 @@ fn test_trivial_inflight_htlc_tracking(){ let channel_1 = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id); let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) , - &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()), + &NodeId::from_pubkey(&node_a_id) , + &NodeId::from_pubkey(&node_b_id), channel_1.context().get_short_channel_id().unwrap() ); assert_eq!(chan_1_used_liquidity, None); @@ -1719,8 +1792,8 @@ fn test_trivial_inflight_htlc_tracking(){ let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id); let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) , - &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()), + &NodeId::from_pubkey(&node_b_id) , + &NodeId::from_pubkey(&node_c_id), channel_2.context().get_short_channel_id().unwrap() ); @@ -1744,8 +1817,8 @@ fn test_trivial_inflight_htlc_tracking(){ let channel_1 = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id); let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) , - &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()), + &NodeId::from_pubkey(&node_a_id) , + &NodeId::from_pubkey(&node_b_id), channel_1.context().get_short_channel_id().unwrap() ); // First hop accounts for expected 1000 msat fee @@ -1757,8 +1830,8 @@ fn test_trivial_inflight_htlc_tracking(){ let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id); let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) , - &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()), + &NodeId::from_pubkey(&node_b_id) , + &NodeId::from_pubkey(&node_c_id), channel_2.context().get_short_channel_id().unwrap() ); @@ -1783,8 +1856,8 @@ fn test_trivial_inflight_htlc_tracking(){ let channel_1 = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id); let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) , - &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()), + &NodeId::from_pubkey(&node_a_id) , + &NodeId::from_pubkey(&node_b_id), channel_1.context().get_short_channel_id().unwrap() ); assert_eq!(chan_1_used_liquidity, None); @@ -1795,8 +1868,8 @@ fn test_trivial_inflight_htlc_tracking(){ let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id); let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) , - &NodeId::from_pubkey(&nodes[2].node.get_our_node_id()), + &NodeId::from_pubkey(&node_b_id) , + &NodeId::from_pubkey(&node_c_id), channel_2.context().get_short_channel_id().unwrap() ); assert_eq!(chan_2_used_liquidity, None); @@ -1812,6 +1885,10 @@ fn test_holding_cell_inflight_htlcs() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); @@ -1836,8 +1913,8 @@ fn test_holding_cell_inflight_htlcs() { let channel = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id); let used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) , - &NodeId::from_pubkey(&nodes[1].node.get_our_node_id()), + &NodeId::from_pubkey(&node_a_id) , + &NodeId::from_pubkey(&node_b_id), channel.context().get_short_channel_id().unwrap() ); @@ -1870,6 +1947,11 @@ fn do_test_intercepted_payment(test: InterceptTest) { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), Some(zero_conf_chan_config)]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let scorer = test_utils::TestScorer::new(); let random_seed_bytes = chanmon_cfgs[0].keys_manager.get_secure_random_bytes(); @@ -1877,10 +1959,10 @@ fn do_test_intercepted_payment(test: InterceptTest) { let amt_msat = 100_000; let intercept_scid = nodes[1].node.get_intercept_scid(); - let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) .with_route_hints(vec![ RouteHint(vec![RouteHintHop { - src_node_id: nodes[1].node.get_our_node_id(), + src_node_id: node_b_id, short_channel_id: intercept_scid, fees: RoutingFees { base_msat: 1000, @@ -1894,7 +1976,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); let route = get_route( - &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(), None, + &node_a_id, &route_params, &nodes[0].network_graph.read_only(), None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes ).unwrap(); @@ -1911,7 +1993,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[1]); @@ -1931,21 +2013,21 @@ fn do_test_intercepted_payment(test: InterceptTest) { }; // Check for unknown channel id error. - let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &ChannelId::from_bytes([42; 32]), nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err(); + let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &ChannelId::from_bytes([42; 32]), node_c_id, expected_outbound_amount_msat).unwrap_err(); assert_eq!(unknown_chan_id_err , APIError::ChannelUnavailable { err: format!("Channel with id {} not found for the passed counterparty node_id {}", - log_bytes!([42; 32]), nodes[2].node.get_our_node_id()) }); + log_bytes!([42; 32]), node_c_id) }); if test == InterceptTest::Fail { // Ensure we can fail the intercepted payment back. nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap(); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }]); nodes[1].node.process_pending_htlc_forwards(); - let update_fail = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let update_fail = get_htlc_update_msgs!(nodes[1], node_a_id); check_added_monitors!(&nodes[1], 1); assert!(update_fail.update_fail_htlcs.len() == 1); let fail_msg = update_fail.update_fail_htlcs[0].clone(); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); + nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_msg); commitment_signed_dance!(nodes[0], nodes[1], update_fail.commitment_signed, false); // Ensure the payment fails with the expected error. @@ -1956,18 +2038,18 @@ fn do_test_intercepted_payment(test: InterceptTest) { expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions); } else if test == InterceptTest::Forward { // Check that we'll fail as expected when sending to a channel that isn't in `ChannelReady` yet. - let temp_chan_id = nodes[1].node.create_channel(nodes[2].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); - let unusable_chan_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &temp_chan_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err(); + let temp_chan_id = nodes[1].node.create_channel(node_c_id, 100_000, 0, 42, None, None).unwrap(); + let unusable_chan_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &temp_chan_id, node_c_id, expected_outbound_amount_msat).unwrap_err(); assert_eq!(unusable_chan_err , APIError::ChannelUnavailable { err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.", - temp_chan_id, nodes[2].node.get_our_node_id()) }); + temp_chan_id, node_c_id) }); assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1); // Open the just-in-time channel so the payment can then be forwarded. let (_, channel_id) = open_zero_conf_channel(&nodes[1], &nodes[2], None); // Finally, forward the intercepted payment through and claim it. - nodes[1].node.forward_intercepted_htlc(intercept_id, &channel_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap(); + nodes[1].node.forward_intercepted_htlc(intercept_id, &channel_id, node_c_id, expected_outbound_amount_msat).unwrap(); expect_pending_htlcs_forwardable!(nodes[1]); let payment_event = { @@ -1980,12 +2062,12 @@ fn do_test_intercepted_payment(test: InterceptTest) { assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[1], &payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); let payment_preimage = nodes[2].node.get_payment_preimage(payment_hash, payment_secret).unwrap(); - expect_payment_claimable!(&nodes[2], payment_hash, payment_secret, amt_msat, Some(payment_preimage), nodes[2].node.get_our_node_id()); + expect_payment_claimable!(&nodes[2], payment_hash, payment_secret, amt_msat, Some(payment_preimage), node_c_id); do_claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage) ); @@ -2017,19 +2099,19 @@ fn do_test_intercepted_payment(test: InterceptTest) { } expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }]); check_added_monitors!(nodes[1], 1); - let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(htlc_timeout_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1); assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty()); assert!(htlc_timeout_updates.update_fee.is_none()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_timeout_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false); expect_payment_failed!(nodes[0], payment_hash, false, LocalHTLCFailureReason::TemporaryNodeFailure, []); // Check for unknown intercept id error. let (_, channel_id) = open_zero_conf_channel(&nodes[1], &nodes[2], None); - let unknown_intercept_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &channel_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err(); + let unknown_intercept_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &channel_id, node_c_id, expected_outbound_amount_msat).unwrap_err(); assert_eq!(unknown_intercept_id_err , APIError::APIMisuseError { err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) }); let unknown_intercept_id_err = nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap_err(); assert_eq!(unknown_intercept_id_err , APIError::APIMisuseError { err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) }); @@ -2056,6 +2138,10 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), Some(underpay_config)]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let amt_msat = 900_000; let mut chan_ids = Vec::new(); @@ -2072,7 +2158,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { let mut route_hints = Vec::new(); for _ in 0..num_mpp_parts { route_hints.push(RouteHint(vec![RouteHintHop { - src_node_id: nodes[1].node.get_our_node_id(), + src_node_id: node_b_id, short_channel_id: nodes[1].node.get_intercept_scid(), fees: RoutingFees { base_msat: 1000, @@ -2083,7 +2169,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { htlc_maximum_msat: Some(amt_msat / num_mpp_parts as u64 + 5), }])); } - let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) .with_route_hints(route_hints).unwrap() .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); @@ -2096,7 +2182,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { // Forward the intercepted payments. for (idx, ev) in events.into_iter().enumerate() { - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &ev.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &ev.msgs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &ev.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[1]); @@ -2112,7 +2198,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { _ => panic!() }; nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_ids[idx], - nodes[2].node.get_our_node_id(), expected_outbound_amt_msat - skimmed_fee_msat).unwrap(); + node_c_id, expected_outbound_amt_msat - skimmed_fee_msat).unwrap(); expect_pending_htlcs_forwardable!(nodes[1]); let payment_event = { { @@ -2124,7 +2210,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, false, true); if idx == num_mpp_parts - 1 { expect_pending_htlcs_forwardable!(nodes[2]); @@ -2142,7 +2228,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { assert_eq!(payment_hash, payment_hash); assert_eq!(amt_msat - skimmed_fee_msat * num_mpp_parts as u64, amount_msat); assert_eq!(skimmed_fee_msat * num_mpp_parts as u64, counterparty_skimmed_fee_msat); - assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap()); + assert_eq!(node_c_id, receiver_node_id.unwrap()); match purpose { crate::events::PaymentPurpose::Bolt11InvoicePayment { payment_preimage: ev_payment_preimage, @@ -2201,6 +2287,11 @@ fn do_automatic_retries(test: AutoRetry) { let node_0_deserialized; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let channel_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; let channel_id_2 = create_announced_chan_between_nodes(&nodes, 2, 1).2; @@ -2214,7 +2305,7 @@ fn do_automatic_retries(test: AutoRetry) { invoice_features.set_variable_length_onion_required(); invoice_features.set_payment_secret_required(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) .with_bolt11_features(invoice_features).unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); @@ -2224,23 +2315,23 @@ fn do_automatic_retries(test: AutoRetry) { ($failing_channel_id: expr, $expect_pending_htlcs_forwardable: expr) => { // Send a payment attempt that fails due to lack of liquidity on the second hop check_added_monitors!(nodes[0], 1); - let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let update_0 = get_htlc_update_msgs!(nodes[0], node_b_id); let mut update_add = update_0.update_add_htlcs[0].clone(); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add); + nodes[1].node.handle_update_add_htlc(node_a_id, &update_add); commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true); expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingFailureType::Forward { - node_id: Some(nodes[2].node.get_our_node_id()), + node_id: Some(node_c_id), channel_id: $failing_channel_id, }]); nodes[1].node.process_pending_htlc_forwards(); - let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let update_1 = get_htlc_update_msgs!(nodes[1], node_a_id); check_added_monitors!(&nodes[1], 1); assert!(update_1.update_fail_htlcs.len() == 1); let fail_msg = update_1.update_fail_htlcs[0].clone(); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); + nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_msg); commitment_signed_dance!(nodes[0], nodes[1], update_1.commitment_signed, false); // Ensure the attempt fails and a new PendingHTLCsForwardable event is generated for the retry @@ -2419,6 +2510,9 @@ fn auto_retry_partial_failure() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Open three channels, the first has plenty of liquidity, the second and third have ~no // available liquidity, causing any outbound payments routed over it to fail immediately. let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; @@ -2436,7 +2530,7 @@ fn auto_retry_partial_failure() { invoice_features.set_variable_length_onion_required(); invoice_features.set_payment_secret_required(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) .with_bolt11_features(invoice_features).unwrap(); @@ -2447,7 +2541,7 @@ fn auto_retry_partial_failure() { let send_route = Route { paths: vec![ Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chan_1_id, channel_features: nodes[1].node.channel_features(), @@ -2456,7 +2550,7 @@ fn auto_retry_partial_failure() { maybe_announced_channel: true, }], blinded_tail: None }, Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chan_2_id, channel_features: nodes[1].node.channel_features(), @@ -2478,7 +2572,7 @@ fn auto_retry_partial_failure() { let retry_1_route = Route { paths: vec![ Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chan_1_id, channel_features: nodes[1].node.channel_features(), @@ -2487,7 +2581,7 @@ fn auto_retry_partial_failure() { maybe_announced_channel: true, }], blinded_tail: None }, Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chan_3_id, channel_features: nodes[1].node.channel_features(), @@ -2509,7 +2603,7 @@ fn auto_retry_partial_failure() { let retry_2_route = Route { paths: vec![ Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chan_1_id, channel_features: nodes[1].node.channel_features(), @@ -2544,36 +2638,36 @@ fn auto_retry_partial_failure() { assert_eq!(msg_events.len(), 1); let mut payment_event = SendEvent::from_event(msg_events.remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); - let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); check_added_monitors!(nodes[0], 1); let as_second_htlc_updates = SendEvent::from_node(&nodes[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_first_cs); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); check_added_monitors!(nodes[0], 1); - let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_second_htlc_updates.msgs[0]); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_second_htlc_updates.msgs[1]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_second_htlc_updates.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &as_second_htlc_updates.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &as_second_htlc_updates.msgs[1]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_second_htlc_updates.commitment_msg); check_added_monitors!(nodes[1], 1); - let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_cs); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); check_added_monitors!(nodes[0], 1); - let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable_ignore!(nodes[1]); @@ -2581,41 +2675,41 @@ fn auto_retry_partial_failure() { expect_payment_claimable!(nodes[1], payment_hash, payment_secret, amt_msat); nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], payment_hash, amt_msat); - let bs_claim_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_claim_update = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(bs_claim_update.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_claim_update.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_claim_update.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_claim_update.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_claim_update.commitment_signed); check_added_monitors!(nodes[0], 1); - let (as_third_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let (as_third_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], node_b_id); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_third_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_third_raa); check_added_monitors!(nodes[1], 4); - let bs_second_claim_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_second_claim_update = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_third_cs); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_third_cs); check_added_monitors!(nodes[1], 1); - let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_third_raa); check_added_monitors!(nodes[0], 1); expect_payment_path_successful!(nodes[0]); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_second_claim_update.update_fulfill_htlcs[0]); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_second_claim_update.update_fulfill_htlcs[1]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_claim_update.commitment_signed); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_second_claim_update.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_second_claim_update.update_fulfill_htlcs[1]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_claim_update.commitment_signed); check_added_monitors!(nodes[0], 1); - let (as_fourth_raa, as_fourth_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let (as_fourth_raa, as_fourth_cs) = get_revoke_commit_msgs!(nodes[0], node_b_id); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_fourth_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_fourth_raa); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_fourth_cs); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_fourth_cs); check_added_monitors!(nodes[1], 1); - let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); check_added_monitors!(nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); @@ -2630,6 +2724,8 @@ fn auto_retry_zero_attempts_send_error() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + // Open a single channel that does not have sufficient liquidity for the payment we want to // send. let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000).0.contents.short_channel_id; @@ -2645,7 +2741,7 @@ fn auto_retry_zero_attempts_send_error() { invoice_features.set_variable_length_onion_required(); invoice_features.set_payment_secret_required(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) .with_bolt11_features(invoice_features).unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); @@ -2654,7 +2750,7 @@ fn auto_retry_zero_attempts_send_error() { let send_route = Route { paths: vec![ Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chan_id, channel_features: nodes[1].node.channel_features(), @@ -2684,6 +2780,9 @@ fn fails_paying_after_rejected_by_payee() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; // Marshall data to send the payment @@ -2697,7 +2796,7 @@ fn fails_paying_after_rejected_by_payee() { invoice_features.set_variable_length_onion_required(); invoice_features.set_payment_secret_required(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) .with_bolt11_features(invoice_features).unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); @@ -2708,7 +2807,7 @@ fn fails_paying_after_rejected_by_payee() { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -2727,6 +2826,8 @@ fn retry_multi_path_single_failed_payment() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); @@ -2741,7 +2842,7 @@ fn retry_multi_path_single_failed_payment() { invoice_features.set_variable_length_onion_required(); invoice_features.set_payment_secret_required(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) .with_bolt11_features(invoice_features).unwrap(); let mut route_params = RouteParameters::from_payment_params_and_value( @@ -2752,7 +2853,7 @@ fn retry_multi_path_single_failed_payment() { let mut route = Route { paths: vec![ Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chans[0].short_channel_id.unwrap(), channel_features: nodes[1].node.channel_features(), @@ -2761,7 +2862,7 @@ fn retry_multi_path_single_failed_payment() { maybe_announced_channel: true, }], blinded_tail: None }, Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chans[1].short_channel_id.unwrap(), channel_features: nodes[1].node.channel_features(), @@ -2821,6 +2922,8 @@ fn immediate_retry_on_failure() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); @@ -2834,7 +2937,7 @@ fn immediate_retry_on_failure() { invoice_features.set_variable_length_onion_required(); invoice_features.set_payment_secret_required(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) .with_bolt11_features(invoice_features).unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); @@ -2843,7 +2946,7 @@ fn immediate_retry_on_failure() { let mut route = Route { paths: vec![ Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chans[0].short_channel_id.unwrap(), channel_features: nodes[1].node.channel_features(), @@ -2907,6 +3010,10 @@ fn no_extra_retries_on_back_to_back_fail() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0); let chan_1_scid = chan_1.0.contents.short_channel_id; let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 0); @@ -2922,7 +3029,7 @@ fn no_extra_retries_on_back_to_back_fail() { invoice_features.set_variable_length_onion_required(); invoice_features.set_payment_secret_required(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) .with_bolt11_features(invoice_features).unwrap(); let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); @@ -2931,7 +3038,7 @@ fn no_extra_retries_on_back_to_back_fail() { let mut route = Route { paths: vec![ Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chan_1_scid, channel_features: nodes[1].node.channel_features(), @@ -2939,7 +3046,7 @@ fn no_extra_retries_on_back_to_back_fail() { cltv_expiry_delta: 100, maybe_announced_channel: true, }, RouteHop { - pubkey: nodes[2].node.get_our_node_id(), + pubkey: node_c_id, node_features: nodes[2].node.node_features(), short_channel_id: chan_2_scid, channel_features: nodes[2].node.channel_features(), @@ -2948,7 +3055,7 @@ fn no_extra_retries_on_back_to_back_fail() { maybe_announced_channel: true, }], blinded_tail: None }, Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chan_1_scid, channel_features: nodes[1].node.channel_features(), @@ -2956,7 +3063,7 @@ fn no_extra_retries_on_back_to_back_fail() { cltv_expiry_delta: 100, maybe_announced_channel: true, }, RouteHop { - pubkey: nodes[2].node.get_our_node_id(), + pubkey: node_c_id, node_features: nodes[2].node.node_features(), short_channel_id: chan_2_scid, channel_features: nodes[2].node.channel_features(), @@ -2989,47 +3096,47 @@ fn no_extra_retries_on_back_to_back_fail() { check_added_monitors!(nodes[0], 1); assert_eq!(first_htlc_updates.msgs.len(), 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &first_htlc_updates.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &first_htlc_updates.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &first_htlc_updates.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &first_htlc_updates.commitment_msg); check_added_monitors!(nodes[1], 1); - let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); + let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); check_added_monitors!(nodes[0], 1); let second_htlc_updates = SendEvent::from_node(&nodes[0]); assert_eq!(second_htlc_updates.msgs.len(), 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_first_cs); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); check_added_monitors!(nodes[0], 1); - let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); + let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &second_htlc_updates.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &second_htlc_updates.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &second_htlc_updates.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &second_htlc_updates.commitment_msg); check_added_monitors!(nodes[1], 1); - let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); + let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_cs); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); check_added_monitors!(nodes[0], 1); - let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); + let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); - let next_hop_failure = HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; + let next_hop_failure = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }; expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone(), next_hop_failure.clone()]); check_added_monitors(&nodes[1], 1); - let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_fail_update = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(bs_fail_update.update_fail_htlcs.len(), 2); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[1]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_fail_update.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_fail_update.update_fail_htlcs[1]); commitment_signed_dance!(nodes[0], nodes[1], bs_fail_update.commitment_signed, false); // At this point A has sent two HTLCs which both failed due to lack of fee. It now has two @@ -3069,14 +3176,14 @@ fn no_extra_retries_on_back_to_back_fail() { let retry_htlc_updates = SendEvent::from_node(&nodes[0]); check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &retry_htlc_updates.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &retry_htlc_updates.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &retry_htlc_updates.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone()]); check_added_monitors(&nodes[1], 1); - let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]); + let bs_fail_update = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_fail_update.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], &bs_fail_update.commitment_signed, false, true); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -3110,6 +3217,10 @@ fn test_simple_partial_retry() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0); let chan_1_scid = chan_1.0.contents.short_channel_id; let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000_000, 0); @@ -3125,7 +3236,7 @@ fn test_simple_partial_retry() { invoice_features.set_variable_length_onion_required(); invoice_features.set_payment_secret_required(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) .with_bolt11_features(invoice_features).unwrap(); let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); @@ -3134,7 +3245,7 @@ fn test_simple_partial_retry() { let mut route = Route { paths: vec![ Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chan_1_scid, channel_features: nodes[1].node.channel_features(), @@ -3142,7 +3253,7 @@ fn test_simple_partial_retry() { cltv_expiry_delta: 100, maybe_announced_channel: true, }, RouteHop { - pubkey: nodes[2].node.get_our_node_id(), + pubkey: node_c_id, node_features: nodes[2].node.node_features(), short_channel_id: chan_2_scid, channel_features: nodes[2].node.channel_features(), @@ -3151,7 +3262,7 @@ fn test_simple_partial_retry() { maybe_announced_channel: true, }], blinded_tail: None }, Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chan_1_scid, channel_features: nodes[1].node.channel_features(), @@ -3159,7 +3270,7 @@ fn test_simple_partial_retry() { cltv_expiry_delta: 100, maybe_announced_channel: true, }, RouteHop { - pubkey: nodes[2].node.get_our_node_id(), + pubkey: node_c_id, node_features: nodes[2].node.node_features(), short_channel_id: chan_2_scid, channel_features: nodes[2].node.channel_features(), @@ -3191,29 +3302,29 @@ fn test_simple_partial_retry() { check_added_monitors!(nodes[0], 1); assert_eq!(first_htlc_updates.msgs.len(), 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &first_htlc_updates.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &first_htlc_updates.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &first_htlc_updates.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &first_htlc_updates.commitment_msg); check_added_monitors!(nodes[1], 1); - let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); + let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); check_added_monitors!(nodes[0], 1); let second_htlc_updates = SendEvent::from_node(&nodes[0]); assert_eq!(second_htlc_updates.msgs.len(), 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_first_cs); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); check_added_monitors!(nodes[0], 1); - let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); + let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &second_htlc_updates.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &second_htlc_updates.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], second_htlc_updates.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - let next_hop_failure = HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }; + let next_hop_failure = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }; expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone()]); check_added_monitors(&nodes[1], 2); @@ -3222,13 +3333,13 @@ fn test_simple_partial_retry() { assert_eq!(msg_events.len(), 2); let mut handle_update_htlcs = |event: MessageSendEvent| { if let MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } = event { - if node_id == nodes[0].node.get_our_node_id() { + if node_id == node_a_id { assert_eq!(updates.update_fail_htlcs.len(), 1); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false); - } else if node_id == nodes[2].node.get_our_node_id() { + } else if node_id == node_c_id { assert_eq!(updates.update_add_htlcs.len(), 1); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + nodes[2].node.handle_update_add_htlc(node_b_id, &updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[1], &updates.commitment_signed, false); } else { panic!("Unexpected node_id for UpdateHTLCs send"); @@ -3259,14 +3370,14 @@ fn test_simple_partial_retry() { let retry_htlc_updates = SendEvent::from_node(&nodes[0]); check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &retry_htlc_updates.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &retry_htlc_updates.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &retry_htlc_updates.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); - let bs_second_forward_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &bs_second_forward_update.update_add_htlcs[0]); + let bs_second_forward_update = get_htlc_update_msgs!(nodes[1], node_c_id); + nodes[2].node.handle_update_add_htlc(node_b_id, &bs_second_forward_update.update_add_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[1], &bs_second_forward_update.commitment_signed, false); expect_pending_htlcs_forwardable!(nodes[2]); @@ -3286,6 +3397,11 @@ fn test_threaded_payment_retries() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + // There is one mitigating guardrail when retrying payments - we can never over-pay by more // than 10% of the original value. Thus, we want all our retries to be below that. In order to // keep things simple, we route one HTLC for 0.1% of the payment over channel 1 and the rest @@ -3306,7 +3422,7 @@ fn test_threaded_payment_retries() { invoice_features.set_variable_length_onion_required(); invoice_features.set_payment_secret_required(); invoice_features.set_basic_mpp_optional(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) .with_bolt11_features(invoice_features).unwrap(); let mut route_params = RouteParameters { @@ -3316,7 +3432,7 @@ fn test_threaded_payment_retries() { let mut route = Route { paths: vec![ Path { hops: vec![RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chan_1_scid, channel_features: nodes[1].node.channel_features(), @@ -3324,7 +3440,7 @@ fn test_threaded_payment_retries() { cltv_expiry_delta: 100, maybe_announced_channel: true, }, RouteHop { - pubkey: nodes[3].node.get_our_node_id(), + pubkey: node_d_id, node_features: nodes[2].node.node_features(), short_channel_id: 42, // Set a random SCID which nodes[1] will fail as unknown channel_features: nodes[2].node.channel_features(), @@ -3333,7 +3449,7 @@ fn test_threaded_payment_retries() { maybe_announced_channel: true, }], blinded_tail: None }, Path { hops: vec![RouteHop { - pubkey: nodes[2].node.get_our_node_id(), + pubkey: node_c_id, node_features: nodes[2].node.node_features(), short_channel_id: chan_3_scid, channel_features: nodes[2].node.channel_features(), @@ -3341,7 +3457,7 @@ fn test_threaded_payment_retries() { cltv_expiry_delta: 100, maybe_announced_channel: true, }, RouteHop { - pubkey: nodes[3].node.get_our_node_id(), + pubkey: node_d_id, node_features: nodes[3].node.node_features(), short_channel_id: chan_4_scid, channel_features: nodes[3].node.channel_features(), @@ -3363,7 +3479,7 @@ fn test_threaded_payment_retries() { if let MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, .. } = msg { // Drop the commitment update for nodes[2], we can just let that one sit pending // forever. - *node_id == nodes[1].node.get_our_node_id() + *node_id == node_b_id } else { panic!(); } ); @@ -3400,7 +3516,7 @@ fn test_threaded_payment_retries() { let send_event = SendEvent::from_event(send_msg_events.pop().unwrap()); assert_eq!(send_event.msgs.len(), 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); @@ -3422,15 +3538,15 @@ fn test_threaded_payment_retries() { route.route_params = Some(new_route_params.clone()); nodes[0].router.expect_find_route(new_route_params, Ok(route.clone())); - let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]); + let bs_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_fail_updates.update_fail_htlcs[0]); // The "normal" commitment_signed_dance delivers the final RAA and then calls // `check_added_monitors` to ensure only the one RAA-generated monitor update was created. // This races with our other threads which may generate an add-HTLCs commitment update via // `process_pending_htlc_forwards`. Instead, we defer the monitor update check until after // *we've* called `process_pending_htlc_forwards` when its guaranteed to have two updates. let last_raa = commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true, false, true); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &last_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &last_raa); let cur_time = Instant::now(); if cur_time > end_time { @@ -3461,6 +3577,9 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: let (nodes_0_deserialized, nodes_0_deserialized_b, nodes_0_deserialized_c); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; let mut nodes_0_serialized = Vec::new(); @@ -3479,13 +3598,13 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000); if at_midpoint { - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &updates.commitment_signed); + let updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); check_added_monitors!(nodes[0], 1); } else { - let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]); + let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &htlc_fulfill_updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], htlc_fulfill_updates.commitment_signed, false); // Ignore the PaymentSent event which is now pending on nodes[0] - if we were to handle it we'd // be expected to ignore the eventual conflicting PaymentFailed, but by not looking at it we @@ -3567,19 +3686,24 @@ fn do_claim_from_closed_chan(fail_payment: bool) { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0); let chan_bd = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 1_000_000, 0).2; create_announced_chan_between_nodes(&nodes, 2, 3); let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[3]); - let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, 10_000_000); - let mut route = nodes[0].router.find_route(&nodes[0].node.get_our_node_id(), &route_params, + let mut route = nodes[0].router.find_route(&node_a_id, &route_params, None, nodes[0].node.compute_inflight_htlcs()).unwrap(); // Make sure the route is ordered as the B->D path before C->D - route.paths.sort_by(|a, _| if a.hops[0].pubkey == nodes[1].node.get_our_node_id() { + route.paths.sort_by(|a, _| if a.hops[0].pubkey == node_b_id { std::cmp::Ordering::Less } else { std::cmp::Ordering::Greater }); // Note that we add an extra 1 in the send pipeline to compensate for any blocks found while @@ -3596,7 +3720,7 @@ fn do_claim_from_closed_chan(fail_payment: bool) { send_msgs.sort_by(|a, _| { let a_node_id = if let MessageSendEvent::UpdateHTLCs { node_id, .. } = a { node_id } else { panic!() }; - let node_b_id = nodes[1].node.get_our_node_id(); + let node_b_id = node_b_id; if *a_node_id == node_b_id { std::cmp::Ordering::Less } else { std::cmp::Ordering::Greater } }); @@ -3627,9 +3751,9 @@ fn do_claim_from_closed_chan(fail_payment: bool) { expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason]); pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash, PaymentFailureReason::RecipientRejected); } else { - nodes[1].node.force_close_broadcasting_latest_txn(&chan_bd, &nodes[3].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan_bd, &node_d_id, error_message.to_string()).unwrap(); check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, - [nodes[3].node.get_our_node_id()], 1000000); + [node_d_id], 1000000); check_closed_broadcast(&nodes[1], 1, true); let bs_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_tx.len(), 1); @@ -3638,7 +3762,7 @@ fn do_claim_from_closed_chan(fail_payment: bool) { check_added_monitors(&nodes[3], 1); check_closed_broadcast(&nodes[3], 1, true); check_closed_event!(&nodes[3], 1, ClosureReason::CommitmentTxConfirmed, false, - [nodes[1].node.get_our_node_id()], 1000000); + [node_b_id], 1000000); nodes[3].node.claim_funds(payment_preimage); check_added_monitors(&nodes[3], 2); @@ -3656,7 +3780,7 @@ fn do_claim_from_closed_chan(fail_payment: bool) { check_added_monitors(&nodes[1], 1); assert_eq!(bs_claims.len(), 1); if let MessageSendEvent::UpdateHTLCs { updates, .. } = &bs_claims[0] { - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true); } else { panic!(); } @@ -3665,7 +3789,7 @@ fn do_claim_from_closed_chan(fail_payment: bool) { let ds_claim_msgs = nodes[3].node.get_and_clear_pending_msg_events(); assert_eq!(ds_claim_msgs.len(), 1); let cs_claim_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = &ds_claim_msgs[0] { - nodes[2].node.handle_update_fulfill_htlc(nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[2].node.handle_update_fulfill_htlc(node_d_id, &updates.update_fulfill_htlcs[0]); let cs_claim_msgs = nodes[2].node.get_and_clear_pending_msg_events(); check_added_monitors(&nodes[2], 1); commitment_signed_dance!(nodes[2], nodes[3], updates.commitment_signed, false, true); @@ -3675,7 +3799,7 @@ fn do_claim_from_closed_chan(fail_payment: bool) { assert_eq!(cs_claim_msgs.len(), 1); if let MessageSendEvent::UpdateHTLCs { updates, .. } = &cs_claim_msgs[0] { - nodes[0].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_c_id, &updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], updates.commitment_signed, false, true); } else { panic!(); } @@ -3709,6 +3833,9 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let amt_msat = 100_000; @@ -3734,10 +3861,10 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); + let ev = remove_first_msg_event_to_node(&node_b_id, &mut events); let mut payment_event = SendEvent::from_event(ev); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); check_added_monitors!(&nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -3783,6 +3910,10 @@ fn test_retry_custom_tlvs() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let (chan_2_update, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 2, 1); @@ -3807,25 +3938,25 @@ fn test_retry_custom_tlvs() { check_added_monitors!(nodes[0], 1); // one monitor per path // Add the HTLC along the first hop. - let htlc_updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + let htlc_updates = get_htlc_update_msgs(&nodes[0], &node_b_id); let msgs::CommitmentUpdate { update_add_htlcs, commitment_signed, .. } = htlc_updates; assert_eq!(update_add_htlcs.len(), 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &update_add_htlcs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &update_add_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false); // Attempt to forward the payment and complete the path's failure. expect_pending_htlcs_forwardable!(&nodes[1]); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCHandlingFailureType::Forward { - node_id: Some(nodes[2].node.get_our_node_id()), + node_id: Some(node_c_id), channel_id: chan_2_id }]); check_added_monitors!(nodes[1], 1); - let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let htlc_updates = get_htlc_update_msgs!(nodes[1], node_a_id); let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates; assert_eq!(update_fail_htlcs.len(), 1); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); let mut events = nodes[0].node.get_and_clear_pending_events(); @@ -3903,18 +4034,23 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0); let chan_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0); - let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap(); let mut route = get_route!(nodes[0], payment_params, 15_000_000).unwrap(); assert_eq!(route.paths.len(), 2); route.paths.sort_by(|path_a, _| { // Sort the path so that the path through nodes[1] comes first - if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() { + if path_a.hops[0].pubkey == node_b_id { core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } }); @@ -3959,7 +4095,7 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[2].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[2]); @@ -3969,7 +4105,7 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[3].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[3].node.handle_update_add_htlc(node_c_id, &payment_event.msgs[0]); check_added_monitors!(nodes[3], 0); commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true); } @@ -3998,19 +4134,19 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], expected_destinations); check_added_monitors!(nodes[3], 1); - let fail_updates_1 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); + let fail_updates_1 = get_htlc_update_msgs!(nodes[3], node_c_id); + nodes[2].node.handle_update_fail_htlc(node_d_id, &fail_updates_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![ HTLCHandlingFailureType::Forward { - node_id: Some(nodes[3].node.get_our_node_id()), + node_id: Some(node_d_id), channel_id: chan_2_3.2 }]); check_added_monitors!(nodes[2], 1); - let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]); + let fail_updates_2 = get_htlc_update_msgs!(nodes[2], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_c_id, &fail_updates_2.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false); expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, @@ -4037,6 +4173,11 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); let chan_id_bd = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 1_000_000, 0).2; create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0); @@ -4048,7 +4189,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { let payment_id = PaymentId(payment_hash.0); let payment_metadata = vec![44, 49, 52, 142]; - let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); @@ -4063,38 +4204,38 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { let first_send = SendEvent::from_event(send_events.pop().unwrap()); let second_send = SendEvent::from_event(send_events.pop().unwrap()); - let (b_recv_ev, c_recv_ev) = if first_send.node_id == nodes[1].node.get_our_node_id() { + let (b_recv_ev, c_recv_ev) = if first_send.node_id == node_b_id { (&first_send, &second_send) } else { (&second_send, &first_send) }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &b_recv_ev.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &b_recv_ev.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], b_recv_ev.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors(&nodes[1], 1); let b_forward_ev = SendEvent::from_node(&nodes[1]); - nodes[3].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &b_forward_ev.msgs[0]); + nodes[3].node.handle_update_add_htlc(node_b_id, &b_forward_ev.msgs[0]); commitment_signed_dance!(nodes[3], nodes[1], b_forward_ev.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[3]); // Before delivering the second MPP HTLC to nodes[2], disconnect nodes[2] and nodes[3], which // will result in nodes[2] failing the HTLC back. - nodes[2].node.peer_disconnected(nodes[3].node.get_our_node_id()); - nodes[3].node.peer_disconnected(nodes[2].node.get_our_node_id()); + nodes[2].node.peer_disconnected(node_d_id); + nodes[3].node.peer_disconnected(node_c_id); - nodes[2].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &c_recv_ev.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_a_id, &c_recv_ev.msgs[0]); commitment_signed_dance!(nodes[2], nodes[0], c_recv_ev.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); expect_htlc_handling_failed_destinations!( nodes[2].node.get_and_clear_pending_events(), - &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_id_cd }] + &[HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_id_cd }] ); check_added_monitors(&nodes[2], 1); - let cs_fail = get_htlc_update_msgs(&nodes[2], &nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &cs_fail.update_fail_htlcs[0]); + let cs_fail = get_htlc_update_msgs(&nodes[2], &node_a_id); + nodes[0].node.handle_update_fail_htlc(node_c_id, &cs_fail.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], cs_fail.commitment_signed, false, true); let payment_fail_retryable_evs = nodes[0].node.get_and_clear_pending_events(); @@ -4115,7 +4256,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { let mon_cd = get_monitor!(nodes[3], chan_id_cd).encode(); reload_node!(nodes[3], config, &nodes[3].node.encode(), &[&mon_bd, &mon_cd], persister, new_chain_monitor, nodes_0_deserialized); - nodes[1].node.peer_disconnected(nodes[3].node.get_our_node_id()); + nodes[1].node.peer_disconnected(node_d_id); reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[3])); } let mut reconnect_args = ReconnectArgs::new(&nodes[2], &nodes[3]); @@ -4130,14 +4271,14 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { nodes[0].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[0], 1); let as_resend = SendEvent::from_node(&nodes[0]); - nodes[2].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_resend.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_a_id, &as_resend.msgs[0]); commitment_signed_dance!(nodes[2], nodes[0], as_resend.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); check_added_monitors(&nodes[2], 1); let cs_forward = SendEvent::from_node(&nodes[2]); let cd_channel_used = cs_forward.msgs[0].channel_id; - nodes[3].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &cs_forward.msgs[0]); + nodes[3].node.handle_update_add_htlc(node_c_id, &cs_forward.msgs[0]); commitment_signed_dance!(nodes[3], nodes[2], cs_forward.commitment_msg, false, true); // Finally, check that nodes[3] does the correct thing - either accepting the payment or, if @@ -4151,12 +4292,12 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { nodes[3].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[3], 1); - let ds_fail = get_htlc_update_msgs(&nodes[3], &nodes[2].node.get_our_node_id()); + let ds_fail = get_htlc_update_msgs(&nodes[3], &node_c_id); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &ds_fail.update_fail_htlcs[0]); + nodes[2].node.handle_update_fail_htlc(node_d_id, &ds_fail.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], ds_fail.commitment_signed, false, true); expect_pending_htlcs_forwardable_conditions(nodes[2].node.get_and_clear_pending_events(), - &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: cd_channel_used }]); + &[HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: cd_channel_used }]); } else { expect_pending_htlcs_forwardable!(nodes[3]); expect_payment_claimable!(nodes[3], payment_hash, payment_secret, amt_msat); @@ -4196,6 +4337,10 @@ fn test_htlc_forward_considers_anchor_outputs_value() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + const CHAN_AMT: u64 = 1_000_000; const PUSH_MSAT: u64 = 900_000_000; create_announced_chan_between_nodes_with_value(&nodes, 0, 1, CHAN_AMT, 500_000_000); @@ -4227,7 +4372,7 @@ fn test_htlc_forward_considers_anchor_outputs_value() { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut update_add_htlc = if let MessageSendEvent::UpdateHTLCs { updates, .. } = events.pop().unwrap() { - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); check_added_monitors(&nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], &updates.commitment_signed, false); updates.update_add_htlcs[0].clone() @@ -4238,7 +4383,7 @@ fn test_htlc_forward_considers_anchor_outputs_value() { // The forwarding node should reject forwarding it as expected. expect_pending_htlcs_forwardable!(nodes[1]); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCHandlingFailureType::Forward { - node_id: Some(nodes[2].node.get_our_node_id()), + node_id: Some(node_c_id), channel_id: chan_id_2 }]); check_added_monitors(&nodes[1], 1); @@ -4246,7 +4391,7 @@ fn test_htlc_forward_considers_anchor_outputs_value() { let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); if let MessageSendEvent::UpdateHTLCs { updates, .. } = events.pop().unwrap() { - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); check_added_monitors(&nodes[0], 0); commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false); } else { @@ -4258,10 +4403,10 @@ fn test_htlc_forward_considers_anchor_outputs_value() { // Assume that the forwarding node did forward it, and make sure the recipient rejects it as an // invalid update and closes the channel. update_add_htlc.channel_id = chan_id_2; - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add_htlc); + nodes[2].node.handle_update_add_htlc(node_b_id, &update_add_htlc); check_closed_event(&nodes[2], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_owned() - }, false, &[nodes[1].node.get_our_node_id()], 1_000_000); + }, false, &[node_b_id], 1_000_000); check_closed_broadcast(&nodes[2], 1, true); check_added_monitors(&nodes[2], 1); } @@ -4272,11 +4417,14 @@ fn peel_payment_onion_custom_tlvs() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let secp_ctx = Secp256k1::new(); let amt_msat = 1000; - let payment_params = PaymentParameters::for_keysend(nodes[1].node.get_our_node_id(), + let payment_params = PaymentParameters::for_keysend(node_b_id, TEST_FINAL_CLTV, false); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); let route = functional_test_utils::get_route(&nodes[0], &route_params).unwrap(); @@ -4331,6 +4479,10 @@ fn test_non_strict_forwarding() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config)]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Create a routing node with two outbound channels, each of which can forward 2 payments of // the given value. let payment_value = 1_500_000; @@ -4339,7 +4491,7 @@ fn test_non_strict_forwarding() { let (chan_update_2, _, channel_id_2, _) = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 5_000, 0); // Create a route once. - let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, payment_value); let route = functional_test_utils::get_route(&nodes[0], &route_params).unwrap(); @@ -4353,7 +4505,7 @@ fn test_non_strict_forwarding() { let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); let mut send_event = SendEvent::from_event(msg_events.remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &send_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -4370,7 +4522,7 @@ fn test_non_strict_forwarding() { } else { channel_id_2 }); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &send_event.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_b_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[1], &send_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[2]); @@ -4391,7 +4543,7 @@ fn test_non_strict_forwarding() { let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); let mut send_event = SendEvent::from_event(msg_events.remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &send_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -4404,10 +4556,10 @@ fn test_non_strict_forwarding() { }; // The failure to forward will refer to the channel given in the onion. expect_pending_htlcs_forwardable_conditions(nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: routed_channel_id }]); + &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: routed_channel_id }]); - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + let updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); let events = nodes[0].node.get_and_clear_pending_events(); expect_payment_failed_conditions_event(events, payment_hash, false, PaymentFailedConditions::new().blamed_scid(routed_scid)); @@ -4421,11 +4573,14 @@ fn remove_pending_outbounds_on_buggy_router() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let amt_msat = 10_000; let payment_id = PaymentId([42; 32]); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0) + let payment_params = PaymentParameters::from_node_id(node_b_id, 0) .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); let (mut route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); @@ -4467,10 +4622,13 @@ fn remove_pending_outbound_probe_on_buggy_path() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let amt_msat = 10_000; - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0) + let payment_params = PaymentParameters::from_node_id(node_b_id, 0) .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); let (mut route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); @@ -4495,10 +4653,13 @@ fn pay_route_without_params() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let amt_msat = 10_000; - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); route.route_params.take(); @@ -4509,7 +4670,7 @@ fn pay_route_without_params() { check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); + let node_1_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); pass_along_path(&nodes[0], &[&nodes[1]], amt_msat, payment_hash, Some(payment_secret), node_1_msgs, true, None); claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], payment_preimage) From 66a0b595a9514f853a22f3846673671a31649730 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 26 Apr 2025 21:44:46 +0000 Subject: [PATCH 02/25] Clean up payment_tests.rs in anticipation of `rustfmt`'ing it --- lightning/src/ln/payment_tests.rs | 1327 +++++++++++++++++------------ 1 file changed, 761 insertions(+), 566 deletions(-) diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 62779e9b156..c7e690fed38 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -28,7 +28,6 @@ use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, ProbeSendFailure, R use crate::routing::gossip::{EffectiveCapacity, RoutingFees}; use crate::routing::router::{get_route, Path, PaymentParameters, Route, Router, RouteHint, RouteHintHop, RouteHop, RouteParameters}; use crate::routing::scoring::ChannelUsage; -use crate::util::config::UserConfig; use crate::util::test_utils; use crate::util::errors::APIError; use crate::util::ser::Writeable; @@ -45,6 +44,8 @@ use crate::ln::functional_test_utils; use crate::ln::functional_test_utils::*; use crate::routing::gossip::NodeId; +use core::cmp::Ordering; + #[cfg(feature = "std")] use { crate::util::time::Instant as TestTime, @@ -75,8 +76,9 @@ fn mpp_failure() { route.paths[1].hops[0].pubkey = node_c_id; route.paths[1].hops[0].short_channel_id = chan_2_id; route.paths[1].hops[1].short_channel_id = chan_4_id; - send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret); - fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash); + let paths: &[&[_]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; + send_along_route_with_secret(&nodes[0], route, paths, 200_000, payment_hash, payment_secret); + fail_payment_along_route(&nodes[0], paths, false, payment_hash); } #[test] @@ -97,14 +99,14 @@ fn mpp_retry() { let (chan_4_update, _, chan_4_id, _) = create_announced_chan_between_nodes(&nodes, 3, 2); // Rebalance - send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000); + send_payment(&nodes[3], &[&nodes[2]], 1_500_000); let amt_msat = 1_000_000; - let max_total_routing_fee_msat = 50_000; + let max_fee = 50_000; let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap(); - let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!( - nodes[0], nodes[3], payment_params, amt_msat, Some(max_total_routing_fee_msat)); + let (mut route, hash, preimage, pay_secret) = + get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, amt_msat, Some(max_fee)); let path = route.paths[0].clone(); route.paths.push(path); route.paths[0].hops[0].pubkey = node_b_id; @@ -115,29 +117,32 @@ fn mpp_retry() { route.paths[1].hops[1].short_channel_id = chan_4_update.contents.short_channel_id; // Initiate the MPP payment. - let payment_id = PaymentId(payment_hash.0); + let id = PaymentId(hash.0); let mut route_params = route.route_params.clone().unwrap(); nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - payment_id, route_params.clone(), Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(pay_secret); + let retry = Retry::Attempts(1); + nodes[0].node.send_payment(hash, onion, id, route_params.clone(), retry).unwrap(); check_added_monitors!(nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); // Pass half of the payment along the success path. - let success_path_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 2_000_000, payment_hash, Some(payment_secret), success_path_msgs, false, None); + let init_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); + let path = &[&nodes[1], &nodes[3]]; + pass_along_path(&nodes[0], path, 2_000_000, hash, Some(pay_secret), init_msgs, false, None); // Add the HTLC along the first hop. - let fail_path_msgs_1 = remove_first_msg_event_to_node(&node_c_id, &mut events); - let send_event = SendEvent::from_event(fail_path_msgs_1); + let second_msgs = remove_first_msg_event_to_node(&node_c_id, &mut events); + let send_event = SendEvent::from_event(second_msgs); nodes[2].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[0], &send_event.commitment_msg, false); // Attempt to forward the payment and complete the 2nd path's failure. expect_pending_htlcs_forwardable!(&nodes[2]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_id }]); + let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_id }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], [fail]); let htlc_updates = get_htlc_update_msgs!(nodes[2], node_a_id); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -152,28 +157,32 @@ fn mpp_retry() { _ => panic!("Unexpected event") } events.remove(1); - expect_payment_failed_conditions_event(events, payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain()); + + let conditions = PaymentFailedConditions::new().mpp_parts_remain(); + expect_payment_failed_conditions_event(events, hash, false, conditions); // Rebalance the channel so the second half of the payment can succeed. - send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000); + send_payment(&nodes[3], &[&nodes[2]], 1_500_000); // Retry the second half of the payment and make sure it succeeds. route.paths.remove(0); route_params.final_value_msat = 1_000_000; - route_params.payment_params.previously_failed_channels.push(chan_4_update.contents.short_channel_id); + let chan_4_scid = chan_4_update.contents.short_channel_id; + route_params.payment_params.previously_failed_channels.push(chan_4_scid); // Check the remaining max total routing fee for the second attempt is 50_000 - 1_000 msat fee // used by the first path - route_params.max_total_routing_fee_msat = Some(max_total_routing_fee_msat - 1_000); + route_params.max_total_routing_fee_msat = Some(max_fee - 1_000); route.route_params = Some(route_params.clone()); nodes[0].router.expect_find_route(route_params, Ok(route)); nodes[0].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 2_000_000, payment_hash, Some(payment_secret), events.pop().unwrap(), true, None); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], payment_preimage) - ); + let event = events.pop().unwrap(); + let last_path = &[&nodes[2], &nodes[3]]; + pass_along_path(&nodes[0], last_path, 2_000_000, hash, Some(pay_secret), event, true, None); + let claim_paths: &[&[_]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], claim_paths, preimage)); } #[test] @@ -184,14 +193,16 @@ fn mpp_retry_overpay() { // in the first attempt. let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let mut user_config = test_default_channel_config(); user_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; - let mut limited_config_1 = user_config.clone(); - limited_config_1.channel_handshake_config.our_htlc_minimum_msat = 35_000_000; - let mut limited_config_2 = user_config.clone(); - limited_config_2.channel_handshake_config.our_htlc_minimum_msat = 34_500_000; - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, - &[Some(user_config.clone()), Some(limited_config_1), Some(limited_config_2), Some(user_config)]); + let mut limited_1 = user_config.clone(); + limited_1.channel_handshake_config.our_htlc_minimum_msat = 35_000_000; + let mut limited_2 = user_config.clone(); + limited_2.channel_handshake_config.our_htlc_minimum_msat = 34_500_000; + let configs = [Some(user_config.clone()), Some(limited_1), Some(limited_2), Some(user_config)]; + + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &configs); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -205,12 +216,12 @@ fn mpp_retry_overpay() { let (chan_4_update, _, chan_4_id, _) = create_announced_chan_between_nodes_with_value(&nodes, 3, 2, 40_000, 0); let amt_msat = 70_000_000; - let max_total_routing_fee_msat = Some(1_000_000); + let max_fee = Some(1_000_000); let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap(); - let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!( - nodes[0], nodes[3], payment_params, amt_msat, max_total_routing_fee_msat); + let (mut route, hash, payment_preimage, pay_secret) = + get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, amt_msat, max_fee); // Check we overpay on the second path which we're about to fail. assert_eq!(chan_1_update.contents.fee_proportional_millionths, 0); @@ -223,20 +234,21 @@ fn mpp_retry_overpay() { let total_overpaid_amount = overpaid_amount_1 + overpaid_amount_2; // Initiate the payment. - let payment_id = PaymentId(payment_hash.0); + let id = PaymentId(hash.0); let mut route_params = route.route_params.clone().unwrap(); nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - payment_id, route_params.clone(), Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(pay_secret); + let retry = Retry::Attempts(1); + nodes[0].node.send_payment(hash, onion, id, route_params.clone(), retry).unwrap(); check_added_monitors!(nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); // Pass half of the payment along the success path. - let success_path_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], amt_msat, payment_hash, - Some(payment_secret), success_path_msgs, false, None); + let init_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); + let path = &[&nodes[1], &nodes[3]]; + pass_along_path(&nodes[0], path, amt_msat, hash, Some(pay_secret), init_msgs, false, None); // Add the HTLC along the first hop. let fail_path_msgs_1 = remove_first_msg_event_to_node(&node_c_id, &mut events); @@ -246,11 +258,9 @@ fn mpp_retry_overpay() { // Attempt to forward the payment and complete the 2nd path's failure. expect_pending_htlcs_forwardable!(&nodes[2]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], - vec![HTLCHandlingFailureType::Forward { - node_id: Some(node_d_id), channel_id: chan_4_id - }] - ); + let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_id }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], [fail]); + let htlc_updates = get_htlc_update_msgs!(nodes[2], node_a_id); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -266,11 +276,11 @@ fn mpp_retry_overpay() { _ => panic!("Unexpected event") } events.remove(1); - expect_payment_failed_conditions_event(events, payment_hash, false, - PaymentFailedConditions::new().mpp_parts_remain()); + let fail_conditions = PaymentFailedConditions::new().mpp_parts_remain(); + expect_payment_failed_conditions_event(events, hash, false, fail_conditions); // Rebalance the channel so the second half of the payment can succeed. - send_payment(&nodes[3], &vec!(&nodes[2])[..], 38_000_000); + send_payment(&nodes[3], &[&nodes[2]], 38_000_000); // Retry the second half of the payment and make sure it succeeds. let first_path_value = route.paths[0].final_value_msat(); @@ -278,7 +288,8 @@ fn mpp_retry_overpay() { route.paths.remove(0); route_params.final_value_msat -= first_path_value; - route_params.payment_params.previously_failed_channels.push(chan_4_update.contents.short_channel_id); + let chan_4_scid = chan_4_update.contents.short_channel_id; + route_params.payment_params.previously_failed_channels.push(chan_4_scid); // Check the remaining max total routing fee for the second attempt accounts only for 1_000 msat // base fee, but not for overpaid value of the first try. route_params.max_total_routing_fee_msat.as_mut().map(|m| *m -= 1000); @@ -290,8 +301,9 @@ fn mpp_retry_overpay() { check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], amt_msat, payment_hash, - Some(payment_secret), events.pop().unwrap(), true, None); + let event = events.pop().unwrap(); + let path = &[&nodes[2], &nodes[3]]; + pass_along_path(&nodes[0], path, amt_msat, hash, Some(pay_secret), event, true, None); // Can't use claim_payment_along_route as it doesn't support overpayment, so we break out the // individual steps here. @@ -320,7 +332,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { let (chan_3_update, _, chan_3_id, _) = create_announced_chan_between_nodes(&nodes, 1, 3); let (chan_4_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 2, 3); - let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 100_000); + let (mut route, hash, payment_preimage, pay_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 100_000); let path = route.paths[0].clone(); route.paths.push(path); route.paths[0].hops[0].pubkey = node_b_id; @@ -331,15 +343,16 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { route.paths[1].hops[1].short_channel_id = chan_4_update.contents.short_channel_id; // Initiate the MPP payment. - nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(pay_secret); + nodes[0].node.send_payment_with_route(route, hash, onion, PaymentId(hash.0)).unwrap(); check_added_monitors!(nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); // Pass half of the payment along the first path. let node_1_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 200_000, payment_hash, Some(payment_secret), node_1_msgs, false, None); + let path = &[&nodes[1], &nodes[3]]; + pass_along_path(&nodes[0], path, 200_000, hash, Some(pay_secret), node_1_msgs, false, None); if send_partial_mpp { // Time out the partial MPP @@ -348,35 +361,45 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { } // Failed HTLC from node 3 -> 1 - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCHandlingFailureType::Receive { payment_hash }]); - let htlc_fail_updates_3_1 = get_htlc_update_msgs!(nodes[3], node_b_id); - assert_eq!(htlc_fail_updates_3_1.update_fail_htlcs.len(), 1); - nodes[1].node.handle_update_fail_htlc(node_d_id, &htlc_fail_updates_3_1.update_fail_htlcs[0]); + let fail = HTLCHandlingFailureType::Receive { payment_hash: hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], [fail]); + + let htlc_fail_updates = get_htlc_update_msgs!(nodes[3], node_b_id); + assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1); + nodes[1].node.handle_update_fail_htlc(node_d_id, &htlc_fail_updates.update_fail_htlcs[0]); check_added_monitors!(nodes[3], 1); - commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates_3_1.commitment_signed, false); + + commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates.commitment_signed, false); // Failed HTLC from node 1 -> 0 - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_3_id }]); - let htlc_fail_updates_1_0 = get_htlc_update_msgs!(nodes[1], node_a_id); - assert_eq!(htlc_fail_updates_1_0.update_fail_htlcs.len(), 1); - nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates_1_0.update_fail_htlcs[0]); + let fail_type = + HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_3_id }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); + + let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + assert_eq!(htlc_fail_updates.update_fail_htlcs.len(), 1); + nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates.update_fail_htlcs[0]); check_added_monitors!(nodes[1], 1); - commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates_1_0.commitment_signed, false); + commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates.commitment_signed, false); - expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().mpp_parts_remain().expected_htlc_error_data(LocalHTLCFailureReason::MPPTimeout, &[][..])); + let mut conditions = PaymentFailedConditions::new() + .mpp_parts_remain() + .expected_htlc_error_data(LocalHTLCFailureReason::MPPTimeout, &[][..]); + expect_payment_failed_conditions(&nodes[0], hash, false, conditions); } else { // Pass half of the payment along the second path. let node_2_msgs = remove_first_msg_event_to_node(&node_c_id, &mut events); - pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash, Some(payment_secret), node_2_msgs, true, None); + let path = &[&nodes[2], &nodes[3]]; + let secret = Some(pay_secret); + pass_along_path(&nodes[0], path, 200_000, hash, secret, node_2_msgs, true, None); // Even after MPP_TIMEOUT_TICKS we should not timeout the MPP if we have all the parts for _ in 0..MPP_TIMEOUT_TICKS { nodes[3].node.timer_tick_occurred(); } - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], payment_preimage) - ); + let full_path: &[&[_]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], full_path, payment_preimage)); } } @@ -406,17 +429,17 @@ fn do_test_keysend_payments(public_node: bool) { } else { create_chan_between_nodes(&nodes[0], &nodes[1]); } - let payee_pubkey = node_b_id; let route_params = RouteParameters::from_payment_params_and_value( - PaymentParameters::for_keysend(payee_pubkey, 40, false), 10000); + PaymentParameters::for_keysend(node_b_id, 40, false), 10000); { - let test_preimage = PaymentPreimage([42; 32]); - nodes[0].node.send_spontaneous_payment( - Some(test_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(test_preimage.0), - route_params, Retry::Attempts(1) - ).unwrap(); + let preimage = Some(PaymentPreimage([42; 32])); + let onion = RecipientOnionFields::spontaneous_empty(); + let retry = Retry::Attempts(1); + let id = PaymentId([42; 32]); + nodes[0].node.send_spontaneous_payment(preimage, onion, id, route_params, retry).unwrap(); } + check_added_monitors!(nodes[0], 1); let send_event = SendEvent::from_node(&nodes[0]); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); @@ -428,8 +451,10 @@ fn do_test_keysend_payments(public_node: bool) { // extracting it from the onion nodes[1] received. let event = nodes[1].node.get_and_clear_pending_events(); assert_eq!(event.len(), 1); - if let Event::PaymentClaimable { purpose: PaymentPurpose::SpontaneousPayment(preimage), .. } = event[0] { - claim_payment(&nodes[0], &[&nodes[1]], preimage); + if let Event::PaymentClaimable { purpose, .. } = &event[0] { + if let PaymentPurpose::SpontaneousPayment(preimage) = purpose { + claim_payment(&nodes[0], &[&nodes[1]], *preimage); + } } else { panic!(); } } @@ -449,33 +474,31 @@ fn test_mpp_keysend() { create_announced_chan_between_nodes(&nodes, 1, 3); create_announced_chan_between_nodes(&nodes, 2, 3); - let payee_pubkey = node_d_id; let recv_value = 15_000_000; let route_params = RouteParameters::from_payment_params_and_value( - PaymentParameters::for_keysend(payee_pubkey, 40, true), recv_value); + PaymentParameters::for_keysend(node_d_id, 40, true), + recv_value, + ); - let payment_preimage = PaymentPreimage([42; 32]); - let payment_secret = PaymentSecret(payment_preimage.0); - let payment_hash = nodes[0].node.send_spontaneous_payment( - Some(payment_preimage), RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_preimage.0), route_params, Retry::Attempts(0) - ).unwrap(); + let preimage = Some(PaymentPreimage([42; 32])); + let secret = PaymentSecret([42; 32]); + let onion = RecipientOnionFields::secret_only(secret); + let retry = Retry::Attempts(0); + let id = PaymentId([42; 32]); + let hash = + nodes[0].node.send_spontaneous_payment(preimage, onion, id, route_params, retry).unwrap(); check_added_monitors!(nodes[0], 2); - let expected_route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; + let route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); let ev = remove_first_msg_event_to_node(&node_b_id, &mut events); - pass_along_path(&nodes[0], expected_route[0], recv_value, payment_hash.clone(), - Some(payment_secret), ev.clone(), false, Some(payment_preimage)); + pass_along_path(&nodes[0], route[0], recv_value, hash, Some(secret), ev, false, preimage); let ev = remove_first_msg_event_to_node(&node_c_id, &mut events); - pass_along_path(&nodes[0], expected_route[1], recv_value, payment_hash.clone(), - Some(payment_secret), ev.clone(), true, Some(payment_preimage)); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], expected_route, payment_preimage) - ); + pass_along_path(&nodes[0], route[1], recv_value, hash, Some(secret), ev, true, preimage); + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], route, preimage.unwrap())); } #[test] @@ -497,10 +520,11 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id; let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id; - let (update_a, _, chan_4_channel_id, _) = create_announced_chan_between_nodes(&nodes, 2, 3); + let (update_a, _, chan_4_chan_id, _) = create_announced_chan_between_nodes(&nodes, 2, 3); let chan_4_id = update_a.contents.short_channel_id; let amount = 40_000; let (mut route, payment_hash, payment_preimage, _) = get_route_and_payment_hash!(nodes[0], nodes[3], amount); + let preimage = Some(payment_preimage); // Pay along nodes[1] route.paths[0].hops[0].pubkey = node_b_id; @@ -509,10 +533,10 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let payment_id_0 = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); nodes[0].router.expect_find_route(route.route_params.clone().unwrap(), Ok(route.clone())); - nodes[0].node.send_spontaneous_payment( - Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_0, - route.route_params.clone().unwrap(), Retry::Attempts(0) - ).unwrap(); + let params = route.route_params.clone().unwrap(); + let onion = RecipientOnionFields::spontaneous_empty(); + let retry = Retry::Attempts(0); + nodes[0].node.send_spontaneous_payment(preimage, onion, payment_id_0, params, retry).unwrap(); check_added_monitors!(nodes[0], 1); let update_0 = get_htlc_update_msgs!(nodes[0], node_b_id); @@ -557,10 +581,11 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let payment_id_1 = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); nodes[0].router.expect_find_route(route.route_params.clone().unwrap(), Ok(route.clone())); - nodes[0].node.send_spontaneous_payment( - Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_1, - route.route_params.clone().unwrap(), Retry::Attempts(0) - ).unwrap(); + + let onion = RecipientOnionFields::spontaneous_empty(); + let params = route.route_params.clone().unwrap(); + let retry = Retry::Attempts(0); + nodes[0].node.send_spontaneous_payment(preimage, onion, payment_id_1, params, retry).unwrap(); check_added_monitors!(nodes[0], 1); let update_2 = get_htlc_update_msgs!(nodes[0], node_c_id); @@ -597,14 +622,18 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { } } nodes[3].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], vec![HTLCHandlingFailureType::Receive { payment_hash }]); + let fail_type = HTLCHandlingFailureType::Receive { payment_hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], [fail_type]); check_added_monitors!(nodes[3], 1); // Fail back along nodes[2] let update_fail_0 = get_htlc_update_msgs!(&nodes[3], &node_c_id); nodes[2].node.handle_update_fail_htlc(node_d_id, &update_fail_0.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], update_fail_0.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_channel_id }]); + + let fail_type = + HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_4_chan_id }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [fail_type]); check_added_monitors!(nodes[2], 1); let update_fail_1 = get_htlc_update_msgs!(nodes[2], node_a_id); @@ -638,9 +667,10 @@ fn no_pending_leak_on_initial_send_failure() { nodes[0].node.peer_disconnected(node_b_id); nodes[1].node.peer_disconnected(node_a_id); - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, + let onion = RecipientOnionFields::secret_only(payment_secret); + let payment_id = PaymentId(payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, payment_hash, onion, payment_id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, "Peer for first hop currently disconnected")); assert!(!nodes[0].node.has_pending_payments()); @@ -663,7 +693,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let persister; let new_chain_monitor; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); - let nodes_0_deserialized; + let node_a_reload; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -674,16 +704,18 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let (_, _, chan_id_2, _) = create_announced_chan_between_nodes(&nodes, 1, 2); // Serialize the ChannelManager prior to sending payments - let nodes_0_serialized = nodes[0].node.encode(); + let node_a_ser = nodes[0].node.encode(); // Send two payments - one which will get to nodes[2] and will be claimed, one which we'll time // out and retry. let amt_msat = 1_000_000; - let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + let (route, payment_hash, payment_preimage, secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); let (payment_preimage_1, payment_hash_1, _, payment_id_1) = send_along_route(&nodes[0], route.clone(), &[&nodes[1], &nodes[2]], 1_000_000); + let route_params = route.route_params.unwrap().clone(); - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -699,11 +731,13 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2}] + [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2}] ); + check_added_monitors(&nodes[1], 1); // nodes[1] now immediately fails the HTLC as the next-hop channel is disconnected let _ = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -719,7 +753,9 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // The ChannelMonitor should always be the latest version, as we're required to persist it // during the `commitment_signed_dance!()`. let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); - reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); + let config = test_default_channel_config(); + let mons: &[_] = &[&chan_0_monitor_serialized[..]]; + reload_node!(nodes[0], config, &node_a_ser, mons, persister, new_chain_monitor, node_a_reload); // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and // force-close the channel. @@ -737,16 +773,18 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { check_added_monitors!(nodes[0], 1); nodes[1].node.peer_disconnected(node_a_id); - nodes[0].node.peer_connected(node_b_id, &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an // error, as the channel has hit the chain. - nodes[1].node.peer_connected(node_a_id, &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[0].node.handle_channel_reestablish(node_b_id, &bs_reestablish); let as_err = nodes[0].node.get_and_clear_pending_msg_events(); @@ -758,7 +796,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &node_b_id)) }, [node_a_id], 100000); check_added_monitors!(nodes[1], 1); - assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); + assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); + nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); }, _ => panic!("Unexpected event"), } @@ -770,10 +809,10 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000); - let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], node_b_id); - nodes[1].node.handle_update_fulfill_htlc(node_c_id, &htlc_fulfill_updates.update_fulfill_htlcs[0]); + let htlc_fulfill = get_htlc_update_msgs!(nodes[2], node_b_id); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &htlc_fulfill.update_fulfill_htlcs[0]); check_added_monitors!(nodes[1], 1); - commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false); + commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill.commitment_signed, false); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, false); if confirm_before_reload { @@ -818,7 +857,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { confirm_transaction(&nodes[0], &first_htlc_timeout_tx); } nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear(); - expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new()); + let conditions = PaymentFailedConditions::new(); + expect_payment_failed_conditions(&nodes[0], payment_hash, false, conditions); // Finally, retry the payment (which was reloaded from the ChannelMonitor when nodes[0] was // reloaded) via a route over the new channel, which work without issue and eventually be @@ -844,17 +884,24 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { nodes[1].node.timer_tick_occurred(); } - assert!(nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, // Shouldn't be allowed to retry a fulfilled payment - RecipientOnionFields::secret_only(payment_secret), payment_id_1).is_err()); - nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(secret); + // Check that we cannot retry a fulfilled payment + nodes[0] + .node + .send_payment_with_route(new_route.clone(), payment_hash, onion, payment_id_1) + .unwrap_err(); + // ...but if we send with a different PaymentId the payment should fly + let id = PaymentId(payment_hash.0); + let onion = RecipientOnionFields::secret_only(secret); + nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000, payment_hash, Some(payment_secret), events.pop().unwrap(), true, None); - do_claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage) - ); + let event = events.pop().unwrap(); + let path = &[&nodes[1], &nodes[2]]; + pass_along_path(&nodes[0], path, 1_000_000, payment_hash, Some(secret), event, true, None); + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], &[path], payment_preimage)); expect_payment_sent!(nodes[0], payment_preimage, Some(new_route.paths[0].hops[0].fee_msat)); } @@ -876,17 +923,17 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let mut manually_accept_config = test_default_channel_config(); manually_accept_config.manually_accept_inbound_channels = true; - let first_persister; - let first_new_chain_monitor; - let second_persister; - let second_new_chain_monitor; - let third_persister; - let third_new_chain_monitor; + let persist_1; + let chain_1; + let persist_2; + let chain_2; + let persist_3; + let chain_3; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(manually_accept_config), None]); - let first_nodes_0_deserialized; - let second_nodes_0_deserialized; - let third_nodes_0_deserialized; + let node_a_reload_1; + let node_a_reload_2; + let node_a_reload_3; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -904,16 +951,19 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; // Serialize the ChannelManager prior to sending payments - let mut nodes_0_serialized = nodes[0].node.encode(); + let mut node_a_ser = nodes[0].node.encode(); - let route = get_route_and_payment_hash!(nodes[0], nodes[2], if use_dust { 1_000 } else { 1_000_000 }).0; - let (payment_preimage, payment_hash, payment_secret, payment_id) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], if use_dust { 1_000 } else { 1_000_000 }); + let amt = if use_dust { 1_000 } else { 1_000_000 }; + let route = get_route_and_payment_hash!(nodes[0], nodes[2], amt).0; + let (payment_preimage, hash, payment_secret, payment_id) = + send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], amt); // The ChannelMonitor should always be the latest version, as we're required to persist it // during the `commitment_signed_dance!()`. - let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); + let mon_ser = get_monitor!(nodes[0], chan_id).encode(); - reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized], first_persister, first_new_chain_monitor, first_nodes_0_deserialized); + let config = test_default_channel_config(); + reload_node!(nodes[0], config, node_a_ser, &[&mon_ser], persist_1, chain_1, node_a_reload_1); nodes[1].node.peer_disconnected(node_a_id); // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and @@ -925,16 +975,17 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); check_added_monitors!(nodes[0], 1); - nodes[0].node.peer_connected(node_b_id, &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an // error, as the channel has hit the chain. - nodes[1].node.peer_connected(node_a_id, &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[0].node.handle_channel_reestablish(node_b_id, &bs_reestablish); let as_err = nodes[0].node.get_and_clear_pending_msg_events(); @@ -944,8 +995,12 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => { assert_eq!(node_id, node_b_id); nodes[1].node.handle_error(node_a_id, msg); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", &node_b_id)) } - , [node_a_id], 100000); + let msg = format!( + "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", + &node_b_id + ); + let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(msg) }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); check_added_monitors!(nodes[1], 1); bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); }, @@ -956,8 +1011,9 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // Now fail back the payment from nodes[2] to nodes[1]. This doesn't really matter as the // previous hop channel is already on-chain, but it makes nodes[2] willing to see additional // incoming HTLCs with the same payment hash later. - nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [HTLCHandlingFailureType::Receive { payment_hash }]); + nodes[2].node.fail_htlc_backwards(&hash); + let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [fail_type]); check_added_monitors!(nodes[2], 1); let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], node_b_id); @@ -997,8 +1053,9 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // If we attempt to retry prior to the HTLC-Timeout (or commitment transaction, for dust HTLCs) // confirming, we will fail as it's considered still-pending... - let (new_route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], if use_dust { 1_000 } else { 1_000_000 }); - match nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) { + let (new_route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], amt); + let onion = RecipientOnionFields::secret_only(payment_secret); + match nodes[0].node.send_payment_with_route(new_route.clone(), hash, onion, payment_id) { Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected error") } @@ -1009,18 +1066,20 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // (which should also still work). connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new()); + expect_payment_failed_conditions(&nodes[0], hash, false, PaymentFailedConditions::new()); let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_3).encode(); - nodes_0_serialized = nodes[0].node.encode(); + node_a_ser = nodes[0].node.encode(); // After the payment failed, we're free to send it again. - assert!(nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, - RecipientOnionFields::secret_only(payment_secret), payment_id).is_ok()); + let onion = RecipientOnionFields::secret_only(payment_secret); + nodes[0].node.send_payment_with_route(new_route.clone(), hash, onion, payment_id).unwrap(); assert!(!nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], second_persister, second_new_chain_monitor, second_nodes_0_deserialized); + let config = test_default_channel_config(); + let monitors = &[&chan_0_monitor_serialized[..], &chan_1_monitor_serialized[..]]; + reload_node!(nodes[0], config, node_a_ser, monitors, persist_2, chain_2, node_a_reload_2); nodes[1].node.peer_disconnected(node_a_id); nodes[0].node.test_process_background_events(); @@ -1031,13 +1090,14 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // Now resend the payment, delivering the HTLC and actually claiming it this time. This ensures // the payment is not (spuriously) listed as still pending. - assert!(nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, - RecipientOnionFields::secret_only(payment_secret), payment_id).is_ok()); + let onion = RecipientOnionFields::secret_only(payment_secret); + nodes[0].node.send_payment_with_route(new_route.clone(), hash, onion, payment_id).unwrap(); check_added_monitors!(nodes[0], 1); - pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], if use_dust { 1_000 } else { 1_000_000 }, payment_hash, payment_secret); + pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], amt, hash, payment_secret); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); - match nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) { + let onion = RecipientOnionFields::secret_only(payment_secret); + match nodes[0].node.send_payment_with_route(new_route.clone(), hash, onion, payment_id) { Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected error") } @@ -1045,18 +1105,21 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_3).encode(); - nodes_0_serialized = nodes[0].node.encode(); + node_a_ser = nodes[0].node.encode(); // Check that after reload we can send the payment again (though we shouldn't, since it was // claimed previously). - reload_node!(nodes[0], test_default_channel_config(), nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], third_persister, third_new_chain_monitor, third_nodes_0_deserialized); + let config = test_default_channel_config(); + let monitors = &[&chan_0_monitor_serialized[..], &chan_1_monitor_serialized[..]]; + reload_node!(nodes[0], config, node_a_ser, monitors, persist_3, chain_3, node_a_reload_3); nodes[1].node.peer_disconnected(node_a_id); nodes[0].node.test_process_background_events(); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - match nodes[0].node.send_payment_with_route(new_route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) { + let onion = RecipientOnionFields::secret_only(payment_secret); + match nodes[0].node.send_payment_with_route(new_route, hash, onion, payment_id) { Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected error") } @@ -1081,24 +1144,25 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bo let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let persister; - let new_chain_monitor; + let chain; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes_0_deserialized; + let node_a_reload; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); - let error_message = "Channel force-closed"; + let error_message = "Channel force-closed".to_string(); // Route a payment, but force-close the channel before the HTLC fulfill message arrives at // nodes[0]. let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000); - nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &node_b_id, error_message.to_string()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &node_b_id, error_message).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [node_b_id], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); nodes[0].node.peer_disconnected(node_b_id); nodes[1].node.peer_disconnected(node_a_id); @@ -1134,7 +1198,8 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bo connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - 1); } - let claim_block = create_dummy_block(nodes[0].best_block_hash(), 42, if payment_timeout { vec![htlc_timeout_tx] } else { vec![htlc_success_tx] }); + let txn = if payment_timeout { vec![htlc_timeout_tx] } else { vec![htlc_success_tx] }; + let claim_block = create_dummy_block(nodes[0].best_block_hash(), 42, txn); if payment_timeout { assert!(confirm_commitment_tx); // Otherwise we're spending below our CSV! @@ -1164,7 +1229,7 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bo chan_manager_serialized = nodes[0].node.encode(); } - let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); + let mon_ser = get_monitor!(nodes[0], chan_id).encode(); if payment_timeout { expect_payment_failed!(nodes[0], payment_hash, false); } else { @@ -1178,7 +1243,7 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bo } // Now reload nodes[0]... - reload_node!(nodes[0], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); + reload_node!(nodes[0], &chan_manager_serialized, &[&mon_ser], persister, chain, node_a_reload); if persist_manager_post_event { assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); @@ -1217,9 +1282,9 @@ fn test_fulfill_restart_failure() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let persister; - let new_chain_monitor; + let chain; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes_1_deserialized; + let node_b_reload; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -1231,25 +1296,27 @@ fn test_fulfill_restart_failure() { // The simplest way to get a failure after a fulfill is to reload nodes[1] from a state // pre-fulfill, which we do by serializing it here. let chan_manager_serialized = nodes[1].node.encode(); - let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id).encode(); + let mon_ser = get_monitor!(nodes[1], chan_id).encode(); nodes[1].node.claim_funds(payment_preimage); check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 100_000); - let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &htlc_fulfill_updates.update_fulfill_htlcs[0]); + let htlc_fulfill = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &htlc_fulfill.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); // Now reload nodes[1]... - reload_node!(nodes[1], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + reload_node!(nodes[1], &chan_manager_serialized, &[&mon_ser], persister, chain, node_b_reload); nodes[0].node.peer_disconnected(node_b_id); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); nodes[1].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash }]); + let fail_type = HTLCHandlingFailureType::Receive { payment_hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); check_added_monitors!(nodes[1], 1); + let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], htlc_fail_updates.commitment_signed, false); @@ -1281,22 +1348,24 @@ fn get_ldk_payment_preimage() { let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet); let random_seed_bytes = keys_manager.get_secure_random_bytes(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); - let route = get_route( &node_a_id, &route_params, + let route = get_route(&node_a_id, &route_params, &nodes[0].network_graph.read_only(), Some(&nodes[0].node.list_usable_channels().iter().collect::>()), nodes[0].logger, - &scorer, &Default::default(), &random_seed_bytes).unwrap(); - nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + &scorer, &Default::default(), &random_seed_bytes); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route.unwrap(), payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); // Make sure to use `get_payment_preimage` - let payment_preimage = nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(); + let preimage = nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[1]], amt_msat, payment_hash, Some(payment_secret), events.pop().unwrap(), true, Some(payment_preimage)); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], payment_preimage) - ); + let event = events.pop().unwrap(); + let secret = Some(payment_secret); + let path = &[&nodes[1]]; + pass_along_path(&nodes[0], path, amt_msat, payment_hash, secret, event, true, Some(preimage)); + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], &[path], preimage)); } #[test] @@ -1370,9 +1439,8 @@ fn failed_probe_yields_event() { let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 90000000); - let payment_params = PaymentParameters::from_node_id(node_c_id, 42); - - let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], payment_params, 9_998_000); + let params = PaymentParameters::from_node_id(node_c_id, 42); + let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], params, 9_998_000); let (payment_hash, payment_id) = nodes[0].node.send_probe(route.paths[0].clone()).unwrap(); @@ -1471,11 +1539,12 @@ fn preflight_probes_yield_event_skip_private_hop() { let node_cfgs = create_node_cfgs(5, &chanmon_cfgs); // We alleviate the HTLC max-in-flight limit, as otherwise we'd always be limited through that. - let mut no_htlc_limit_config = test_default_channel_config(); - no_htlc_limit_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let mut config = test_default_channel_config(); + config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let config = Some(config); - let user_configs = std::iter::repeat(no_htlc_limit_config).take(5).map(|c| Some(c)).collect::>>(); - let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &user_configs); + let configs = [config.clone(), config.clone(), config.clone(), config.clone(), config]; + let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &configs[..]); let nodes = create_network(5, &node_cfgs, &node_chanmgrs); let node_d_id = nodes[3].node.get_our_node_id(); @@ -1515,11 +1584,12 @@ fn preflight_probes_yield_event() { let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); // We alleviate the HTLC max-in-flight limit, as otherwise we'd always be limited through that. - let mut no_htlc_limit_config = test_default_channel_config(); - no_htlc_limit_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let mut config = test_default_channel_config(); + config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let config = Some(config); - let user_configs = std::iter::repeat(no_htlc_limit_config).take(4).map(|c| Some(c)).collect::>>(); - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &user_configs); + let configs = [config.clone(), config.clone(), config.clone(), config]; + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &configs[..]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); let node_d_id = nodes[3].node.get_our_node_id(); @@ -1563,11 +1633,13 @@ fn preflight_probes_yield_event_and_skip() { let node_cfgs = create_node_cfgs(5, &chanmon_cfgs); // We alleviate the HTLC max-in-flight limit, as otherwise we'd always be limited through that. - let mut no_htlc_limit_config = test_default_channel_config(); - no_htlc_limit_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let mut config = test_default_channel_config(); + config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let config = Some(config); - let user_configs = std::iter::repeat(no_htlc_limit_config).take(5).map(|c| Some(c)).collect::>>(); - let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &user_configs); + let configs = + [config.clone(), config.clone(), config.clone(), config.clone(), config.clone(), config]; + let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &configs[..]); let nodes = create_network(5, &node_cfgs, &node_chanmgrs); let node_e_id = nodes[4].node.get_our_node_id(); @@ -1617,15 +1689,16 @@ fn claimed_send_payment_idempotent() { create_announced_chan_between_nodes(&nodes, 0, 1).2; - let (route, second_payment_hash, second_payment_preimage, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); - let (first_payment_preimage, _, _, payment_id) = send_along_route(&nodes[0], route.clone(), &[&nodes[1]], 100_000); + let (route, hash_b, preimage_b, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); + let (preimage_a, _, _, payment_id) = send_along_route(&nodes[0], route.clone(), &[&nodes[1]], 100_000); macro_rules! check_send_rejected { () => { // If we try to resend a new payment with a different payment_hash but with the same // payment_id, it should be rejected. - let send_result = nodes[0].node.send_payment_with_route(route.clone(), second_payment_hash, - RecipientOnionFields::secret_only(second_payment_secret), payment_id); + let onion = RecipientOnionFields::secret_only(second_payment_secret); + let send_result = + nodes[0].node.send_payment_with_route(route.clone(), hash_b, onion, payment_id); match send_result { Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected send result: {:?}", send_result), @@ -1650,7 +1723,7 @@ fn claimed_send_payment_idempotent() { // not been seen by the user. At this point, from the user perspective nothing has changed, so // we must remain just as idempotent as we were before. do_claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], first_payment_preimage) + ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], preimage_a) ); for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS { @@ -1664,7 +1737,7 @@ fn claimed_send_payment_idempotent() { // the payment complete. However, they could have called `send_payment` while the event was // being processed, leading to a race in our idempotency guarantees. Thus, even immediately // after the event is handled a duplicate payment should sitll be rejected. - expect_payment_sent!(&nodes[0], first_payment_preimage, Some(0)); + expect_payment_sent!(&nodes[0], preimage_a, Some(0)); check_send_rejected!(); // If relatively little time has passed, a duplicate payment should still fail. @@ -1678,11 +1751,11 @@ fn claimed_send_payment_idempotent() { nodes[0].node.timer_tick_occurred(); } - nodes[0].node.send_payment_with_route(route, second_payment_hash, - RecipientOnionFields::secret_only(second_payment_secret), payment_id).unwrap(); + let onion = RecipientOnionFields::secret_only(second_payment_secret); + nodes[0].node.send_payment_with_route(route, hash_b, onion, payment_id).unwrap(); check_added_monitors!(nodes[0], 1); - pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, second_payment_hash, second_payment_secret); - claim_payment(&nodes[0], &[&nodes[1]], second_payment_preimage); + pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, hash_b, second_payment_secret); + claim_payment(&nodes[0], &[&nodes[1]], preimage_b); } #[test] @@ -1696,15 +1769,16 @@ fn abandoned_send_payment_idempotent() { create_announced_chan_between_nodes(&nodes, 0, 1).2; - let (route, second_payment_hash, second_payment_preimage, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); + let (route, hash_b, second_payment_preimage, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); let (_, first_payment_hash, _, payment_id) = send_along_route(&nodes[0], route.clone(), &[&nodes[1]], 100_000); macro_rules! check_send_rejected { () => { // If we try to resend a new payment with a different payment_hash but with the same // payment_id, it should be rejected. - let send_result = nodes[0].node.send_payment_with_route(route.clone(), second_payment_hash, - RecipientOnionFields::secret_only(second_payment_secret), payment_id); + let onion = RecipientOnionFields::secret_only(second_payment_secret); + let send_result = + nodes[0].node.send_payment_with_route(route.clone(), hash_b, onion, payment_id); match send_result { Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected send result: {:?}", send_result), @@ -1726,7 +1800,8 @@ fn abandoned_send_payment_idempotent() { check_send_rejected!(); nodes[1].node.fail_htlc_backwards(&first_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Receive { payment_hash: first_payment_hash }]); + let fail_type = HTLCHandlingFailureType::Receive { payment_hash: first_payment_hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); // Until we abandon the payment upon path failure, no matter how many timer ticks pass, we still cannot reuse the // PaymentId. @@ -1735,14 +1810,15 @@ fn abandoned_send_payment_idempotent() { } check_send_rejected!(); - pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, first_payment_hash, PaymentFailureReason::RecipientRejected); + let reason = PaymentFailureReason::RecipientRejected; + pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, first_payment_hash, reason); // However, we can reuse the PaymentId immediately after we `abandon_payment` upon passing the // failed payment back. - nodes[0].node.send_payment_with_route(route, second_payment_hash, - RecipientOnionFields::secret_only(second_payment_secret), payment_id).unwrap(); + let onion = RecipientOnionFields::secret_only(second_payment_secret); + nodes[0].node.send_payment_with_route(route, hash_b, onion, payment_id).unwrap(); check_added_monitors!(nodes[0], 1); - pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, second_payment_hash, second_payment_secret); + pass_along_route(&nodes[0], &[&[&nodes[1]]], 100_000, hash_b, second_payment_secret); claim_payment(&nodes[0], &[&nodes[1]], second_payment_preimage); } @@ -1775,21 +1851,23 @@ fn test_trivial_inflight_htlc_tracking(){ let (_, payment_hash, _, payment_id) = send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000); let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - let channel_1 = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id); + let mut per_peer_lock; + let mut peer_state_lock; + let channel_1 = + get_channel_ref!(&nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_1_id); let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&node_a_id) , + &NodeId::from_pubkey(&node_a_id), &NodeId::from_pubkey(&node_b_id), channel_1.context().get_short_channel_id().unwrap() ); assert_eq!(chan_1_used_liquidity, None); } { - let mut node_1_per_peer_lock; - let mut node_1_peer_state_lock; - let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id); + let mut per_peer_lock; + let mut peer_state_lock; + let channel_2 = + get_channel_ref!(&nodes[1], nodes[2], per_peer_lock, peer_state_lock, chan_2_id); let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&node_b_id) , @@ -1801,7 +1879,8 @@ fn test_trivial_inflight_htlc_tracking(){ } let pending_payments = nodes[0].node.list_recent_payments(); assert_eq!(pending_payments.len(), 1); - assert_eq!(pending_payments[0], RecentPaymentDetails::Fulfilled { payment_hash: Some(payment_hash), payment_id }); + let details = RecentPaymentDetails::Fulfilled { payment_hash: Some(payment_hash), payment_id }; + assert_eq!(pending_payments[0], details); // Remove fulfilled payment for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS { @@ -1812,9 +1891,10 @@ fn test_trivial_inflight_htlc_tracking(){ let (payment_preimage, payment_hash, _, payment_id) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000); let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - let channel_1 = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id); + let mut per_peer_lock; + let mut peer_state_lock; + let channel_1 = + get_channel_ref!(&nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_1_id); let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&node_a_id) , @@ -1825,9 +1905,10 @@ fn test_trivial_inflight_htlc_tracking(){ assert_eq!(chan_1_used_liquidity, Some(501000)); } { - let mut node_1_per_peer_lock; - let mut node_1_peer_state_lock; - let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id); + let mut per_peer_lock; + let mut peer_state_lock; + let channel_2 = + get_channel_ref!(&nodes[1], nodes[2], per_peer_lock, peer_state_lock, chan_2_id); let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&node_b_id) , @@ -1839,7 +1920,8 @@ fn test_trivial_inflight_htlc_tracking(){ } let pending_payments = nodes[0].node.list_recent_payments(); assert_eq!(pending_payments.len(), 1); - assert_eq!(pending_payments[0], RecentPaymentDetails::Pending { payment_id, payment_hash, total_msat: 500000 }); + let details = RecentPaymentDetails::Pending { payment_id, payment_hash, total_msat: 500000 }; + assert_eq!(pending_payments[0], details); // Now, let's claim the payment. This should result in the used liquidity to return `None`. claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); @@ -1851,9 +1933,10 @@ fn test_trivial_inflight_htlc_tracking(){ let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - let channel_1 = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1_id); + let mut per_peer_lock; + let mut peer_state_lock; + let channel_1 = + get_channel_ref!(&nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_1_id); let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&node_a_id) , @@ -1863,9 +1946,10 @@ fn test_trivial_inflight_htlc_tracking(){ assert_eq!(chan_1_used_liquidity, None); } { - let mut node_1_per_peer_lock; - let mut node_1_peer_state_lock; - let channel_2 = get_channel_ref!(&nodes[1], nodes[2], node_1_per_peer_lock, node_1_peer_state_lock, chan_2_id); + let mut per_peer_lock; + let mut peer_state_lock; + let channel_2 = + get_channel_ref!(&nodes[1], nodes[2], per_peer_lock, peer_state_lock, chan_2_id); let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&node_b_id) , @@ -1897,20 +1981,24 @@ fn test_holding_cell_inflight_htlcs() { // Queue up two payments - one will be delivered right away, one immediately goes into the // holding cell as nodes[0] is AwaitingRAA. { - nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret_1); + let id = PaymentId(payment_hash_1.0); + nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + + let onion = RecipientOnionFields::secret_only(payment_secret_2); + let id = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); check_added_monitors!(nodes[0], 0); } let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - let channel = get_channel_ref!(&nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id); + let mut per_peer_lock; + let mut peer_state_lock; + let channel = + get_channel_ref!(&nodes[0], nodes[1], per_peer_lock, peer_state_lock, channel_id); let used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&node_a_id) , @@ -1944,7 +2032,9 @@ fn do_test_intercepted_payment(test: InterceptTest) { zero_conf_chan_config.manually_accept_inbound_channels = true; let mut intercept_forwards_config = test_default_channel_config(); intercept_forwards_config.accept_intercept_htlcs = true; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), Some(zero_conf_chan_config)]); + + let configs = [None, Some(intercept_forwards_config), Some(zero_conf_chan_config)]; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -1980,9 +2070,10 @@ fn do_test_intercepted_payment(test: InterceptTest) { nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes ).unwrap(); - let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap(); - nodes[0].node.send_payment_with_route(route.clone(), payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let (hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(hash.0); + nodes[0].node.send_payment_with_route(route.clone(), hash, onion, id).unwrap(); let payment_event = { { let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); @@ -2000,11 +2091,11 @@ fn do_test_intercepted_payment(test: InterceptTest) { // Check that we generate the PaymentIntercepted event when an intercept forward is detected. let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); - let (intercept_id, expected_outbound_amount_msat) = match events[0] { + let (intercept_id, outbound_amt) = match events[0] { crate::events::Event::HTLCIntercepted { - intercept_id, expected_outbound_amount_msat, payment_hash: pmt_hash, inbound_amount_msat, requested_next_hop_scid: short_channel_id + intercept_id, expected_outbound_amount_msat, payment_hash, inbound_amount_msat, requested_next_hop_scid: short_channel_id } => { - assert_eq!(pmt_hash, payment_hash); + assert_eq!(payment_hash, hash); assert_eq!(inbound_amount_msat, route.get_total_amount() + route.get_total_fees()); assert_eq!(short_channel_id, intercept_scid); (intercept_id, expected_outbound_amount_msat) @@ -2013,15 +2104,22 @@ fn do_test_intercepted_payment(test: InterceptTest) { }; // Check for unknown channel id error. - let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &ChannelId::from_bytes([42; 32]), node_c_id, expected_outbound_amount_msat).unwrap_err(); - assert_eq!(unknown_chan_id_err , APIError::ChannelUnavailable { - err: format!("Channel with id {} not found for the passed counterparty node_id {}", - log_bytes!([42; 32]), node_c_id) }); + let chan_id = ChannelId::from_bytes([42; 32]); + let unknown_chan_id_err = + nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_id, node_c_id, outbound_amt); + let err = format!( + "Channel with id {} not found for the passed counterparty node_id {}", + log_bytes!([42; 32]), + node_c_id, + ); + assert_eq!(unknown_chan_id_err, Err(APIError::ChannelUnavailable { err })); if test == InterceptTest::Fail { // Ensure we can fail the intercepted payment back. nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }]); + let fail = + HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], [fail]); nodes[1].node.process_pending_htlc_forwards(); let update_fail = get_htlc_update_msgs!(nodes[1], node_a_id); check_added_monitors!(&nodes[1], 1); @@ -2035,21 +2133,24 @@ fn do_test_intercepted_payment(test: InterceptTest) { .blamed_scid(intercept_scid) .blamed_chan_closed(true) .expected_htlc_error_data(LocalHTLCFailureReason::UnknownNextPeer, &[]); - expect_payment_failed_conditions(&nodes[0], payment_hash, false, fail_conditions); + expect_payment_failed_conditions(&nodes[0], hash, false, fail_conditions); } else if test == InterceptTest::Forward { // Check that we'll fail as expected when sending to a channel that isn't in `ChannelReady` yet. - let temp_chan_id = nodes[1].node.create_channel(node_c_id, 100_000, 0, 42, None, None).unwrap(); - let unusable_chan_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &temp_chan_id, node_c_id, expected_outbound_amount_msat).unwrap_err(); - assert_eq!(unusable_chan_err , APIError::ChannelUnavailable { - err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.", - temp_chan_id, node_c_id) }); + let temp_id = nodes[1].node.create_channel(node_c_id, 100_000, 0, 42, None, None).unwrap(); + let unusable_chan_err = + nodes[1].node.forward_intercepted_htlc(intercept_id, &temp_id, node_c_id, outbound_amt); + let err = format!( + "Channel with id {} for the passed counterparty node_id {} is still opening.", + temp_id, node_c_id, + ); + assert_eq!(unusable_chan_err, Err(APIError::ChannelUnavailable { err })); assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1); // Open the just-in-time channel so the payment can then be forwarded. - let (_, channel_id) = open_zero_conf_channel(&nodes[1], &nodes[2], None); + let (_, chan_id) = open_zero_conf_channel(&nodes[1], &nodes[2], None); // Finally, forward the intercepted payment through and claim it. - nodes[1].node.forward_intercepted_htlc(intercept_id, &channel_id, node_c_id, expected_outbound_amount_msat).unwrap(); + nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_id, node_c_id, outbound_amt).unwrap(); expect_pending_htlcs_forwardable!(nodes[1]); let payment_event = { @@ -2066,24 +2167,26 @@ fn do_test_intercepted_payment(test: InterceptTest) { commitment_signed_dance!(nodes[2], nodes[1], &payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); - let payment_preimage = nodes[2].node.get_payment_preimage(payment_hash, payment_secret).unwrap(); - expect_payment_claimable!(&nodes[2], payment_hash, payment_secret, amt_msat, Some(payment_preimage), node_c_id); - do_claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage) - ); + let preimage = + Some(nodes[2].node.get_payment_preimage(hash, payment_secret).unwrap()); + expect_payment_claimable!(&nodes[2], hash, payment_secret, amt_msat, preimage, node_c_id); + + let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, preimage.unwrap())); + let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PaymentSent { payment_preimage: ref ev_preimage, payment_hash: ref ev_hash, ref fee_paid_msat, .. } => { - assert_eq!(payment_preimage, *ev_preimage); - assert_eq!(payment_hash, *ev_hash); + Event::PaymentSent { payment_preimage, payment_hash, ref fee_paid_msat, .. } => { + assert_eq!(preimage.unwrap(), payment_preimage); + assert_eq!(hash, payment_hash); assert_eq!(fee_paid_msat, &Some(1000)); }, _ => panic!("Unexpected event") } match events[1] { - Event::PaymentPathSuccessful { payment_hash: hash, .. } => { - assert_eq!(hash, Some(payment_hash)); + Event::PaymentPathSuccessful { payment_hash, .. } => { + assert_eq!(payment_hash, Some(hash)); }, _ => panic!("Unexpected event") } @@ -2097,24 +2200,32 @@ fn do_test_intercepted_payment(test: InterceptTest) { connect_block(&nodes[0], &block); connect_block(&nodes[1], &block); } - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }]); + let fail_type = + HTLCHandlingFailureType::InvalidForward { requested_forward_scid: intercept_scid }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); check_added_monitors!(nodes[1], 1); - let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], node_a_id); - assert!(htlc_timeout_updates.update_add_htlcs.is_empty()); - assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1); - assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty()); - assert!(htlc_timeout_updates.update_fee.is_none()); - nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_timeout_updates.update_fail_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false); - expect_payment_failed!(nodes[0], payment_hash, false, LocalHTLCFailureReason::TemporaryNodeFailure, []); + let htlc_fail = get_htlc_update_msgs!(nodes[1], node_a_id); + assert!(htlc_fail.update_add_htlcs.is_empty()); + assert_eq!(htlc_fail.update_fail_htlcs.len(), 1); + assert!(htlc_fail.update_fail_malformed_htlcs.is_empty()); + assert!(htlc_fail.update_fee.is_none()); + + nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_fail.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], htlc_fail.commitment_signed, false); + let reason = LocalHTLCFailureReason::TemporaryNodeFailure; + expect_payment_failed!(nodes[0], hash, false, reason, []); // Check for unknown intercept id error. - let (_, channel_id) = open_zero_conf_channel(&nodes[1], &nodes[2], None); - let unknown_intercept_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &channel_id, node_c_id, expected_outbound_amount_msat).unwrap_err(); - assert_eq!(unknown_intercept_id_err , APIError::APIMisuseError { err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) }); + let (_, chan_id) = open_zero_conf_channel(&nodes[1], &nodes[2], None); + let unknown_intercept_id_err = + nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_id, node_c_id, outbound_amt); + let err = format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)); + assert_eq!(unknown_intercept_id_err, Err(APIError::APIMisuseError { err })); + let unknown_intercept_id_err = nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap_err(); - assert_eq!(unknown_intercept_id_err , APIError::APIMisuseError { err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) }); + let err = format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)); + assert_eq!(unknown_intercept_id_err , APIError::APIMisuseError { err }); } } @@ -2128,6 +2239,7 @@ fn accept_underpaying_htlcs_config() { fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let max_in_flight_percent = 10; let mut intercept_forwards_config = test_default_channel_config(); intercept_forwards_config.accept_intercept_htlcs = true; @@ -2135,7 +2247,9 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { let mut underpay_config = test_default_channel_config(); underpay_config.channel_config.accept_underpaying_htlcs = true; underpay_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = max_in_flight_percent; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), Some(underpay_config)]); + + let configs = [None, Some(intercept_forwards_config), Some(underpay_config)]; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -2149,8 +2263,8 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { // We choose the channel size so that there can be at most one part pending on each channel. let channel_size = amt_msat / 1000 / num_mpp_parts as u64 * 100 / max_in_flight_percent as u64 + 100; let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_size, 0); - let channel_id = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, channel_size, 0).0.channel_id; - chan_ids.push(channel_id); + let chan = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, channel_size, 0); + chan_ids.push(chan.0.channel_id); } // Send the initial payment. @@ -2174,14 +2288,18 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap(); - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap(); + + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(0)).unwrap(); + check_added_monitors!(nodes[0], num_mpp_parts); // one monitor per path - let mut events: Vec = nodes[0].node.get_and_clear_pending_msg_events().into_iter().map(|e| SendEvent::from_event(e)).collect(); + let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), num_mpp_parts); // Forward the intercepted payments. for (idx, ev) in events.into_iter().enumerate() { + let ev = SendEvent::from_event(ev); nodes[1].node.handle_update_add_htlc(node_a_id, &ev.msgs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &ev.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[1]); @@ -2197,10 +2315,10 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { }, _ => panic!() }; - nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_ids[idx], - node_c_id, expected_outbound_amt_msat - skimmed_fee_msat).unwrap(); + let amt = expected_outbound_amt_msat - skimmed_fee_msat; + nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_ids[idx], node_c_id, amt).unwrap(); expect_pending_htlcs_forwardable!(nodes[1]); - let payment_event = { + let pay_event = { { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); @@ -2210,8 +2328,8 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); - do_commitment_signed_dance(&nodes[2], &nodes[1], &payment_event.commitment_msg, false, true); + nodes[2].node.handle_update_add_htlc(node_b_id, &pay_event.msgs[0]); + do_commitment_signed_dance(&nodes[2], &nodes[1], &pay_event.commitment_msg, false, true); if idx == num_mpp_parts - 1 { expect_pending_htlcs_forwardable!(nodes[2]); } @@ -2252,8 +2370,8 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { .with_expected_extra_fees(vec![skimmed_fee_msat as u32; num_mpp_parts]); let total_fee_msat = pass_claimed_payment_along_route(args); // The sender doesn't know that the penultimate hop took an extra fee. - expect_payment_sent(&nodes[0], payment_preimage, - Some(Some(total_fee_msat - skimmed_fee_msat * num_mpp_parts as u64)), true, true); + let amt = total_fee_msat - skimmed_fee_msat * num_mpp_parts as u64; + expect_payment_sent(&nodes[0], payment_preimage, Some(Some(amt)), true, true); } #[derive(PartialEq)] @@ -2281,10 +2399,10 @@ fn do_automatic_retries(test: AutoRetry) { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let persister; - let new_chain_monitor; + let chain_monitor; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); - let node_0_deserialized; + let node_a_reload; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -2309,7 +2427,7 @@ fn do_automatic_retries(test: AutoRetry) { .with_expiry_time(payment_expiry_secs as u64) .with_bolt11_features(invoice_features).unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); - let (_, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + let (_, hash, preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); macro_rules! pass_failed_attempt_with_retry_along_path { ($failing_channel_id: expr, $expect_pending_htlcs_forwardable: expr) => { @@ -2322,7 +2440,7 @@ fn do_automatic_retries(test: AutoRetry) { expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], - vec![HTLCHandlingFailureType::Forward { + [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: $failing_channel_id, }]); @@ -2338,8 +2456,8 @@ fn do_automatic_retries(test: AutoRetry) { let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => { - assert_eq!(payment_hash, ev_payment_hash); + Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } => { + assert_eq!(hash, payment_hash); assert_eq!(payment_failed_permanently, false); }, _ => panic!("Unexpected event"), @@ -2351,8 +2469,8 @@ fn do_automatic_retries(test: AutoRetry) { } } else { match events[1] { - Event::PaymentFailed { payment_hash: ev_payment_hash, .. } => { - assert_eq!(Some(payment_hash), ev_payment_hash); + Event::PaymentFailed { payment_hash, .. } => { + assert_eq!(Some(hash), payment_hash); }, _ => panic!("Unexpected event"), } @@ -2362,8 +2480,10 @@ fn do_automatic_retries(test: AutoRetry) { if test == AutoRetry::Success { // Test that we can succeed on the first retry. - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(hash.0); + let retry = Retry::Attempts(1); + nodes[0].node.send_payment(hash, onion, id, route_params, retry).unwrap(); pass_failed_attempt_with_retry_along_path!(channel_id_2, true); // Open a new channel with liquidity on the second hop so we can find a route for the retry @@ -2373,16 +2493,23 @@ fn do_automatic_retries(test: AutoRetry) { // We retry payments in `process_pending_htlc_forwards` nodes[0].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[0], 1); + let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], amt_msat, payment_hash, Some(payment_secret), msg_events.pop().unwrap(), true, None); + let event = msg_events.pop().unwrap(); + + let path = &[&nodes[1], &nodes[2]]; + pass_along_path(&nodes[0], path, amt_msat, hash, Some(payment_secret), event, true, None); claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage) + ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], preimage) ); } else if test == AutoRetry::Spontaneous { - nodes[0].node.send_spontaneous_payment(Some(payment_preimage), - RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params.clone(), - Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::spontaneous_empty(); + let id = PaymentId(hash.0); + nodes[0] + .node + .send_spontaneous_payment(Some(preimage), onion, id, route_params, Retry::Attempts(1)) + .unwrap(); pass_failed_attempt_with_retry_along_path!(channel_id_2, true); // Open a new channel with liquidity on the second hop so we can find a route for the retry @@ -2392,16 +2519,21 @@ fn do_automatic_retries(test: AutoRetry) { // We retry payments in `process_pending_htlc_forwards` nodes[0].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[0], 1); + let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[2]], amt_msat, payment_hash, None, msg_events.pop().unwrap(), true, Some(payment_preimage)); + let event = msg_events.pop().unwrap(); + + let path = &[&nodes[1], &nodes[2]]; + pass_along_path(&nodes[0], path, amt_msat, hash, None, event, true, Some(preimage)); claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage) + ClaimAlongRouteArgs::new(&nodes[0], &[path], preimage) ); } else if test == AutoRetry::FailAttempts { // Ensure ChannelManager will not retry a payment if it has run out of payment attempts. - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(hash.0); + nodes[0].node.send_payment(hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); pass_failed_attempt_with_retry_along_path!(channel_id_2, true); // Open a new channel with no liquidity on the second hop so we can find a (bad) route for @@ -2419,8 +2551,10 @@ fn do_automatic_retries(test: AutoRetry) { } else if test == AutoRetry::FailTimeout { #[cfg(feature = "std")] { // Ensure ChannelManager will not retry a payment if it times out due to Retry::Timeout. - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Timeout(Duration::from_secs(60))).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(hash.0); + let retry = Retry::Timeout(Duration::from_secs(60)); + nodes[0].node.send_payment(hash, onion, id, route_params, retry).unwrap(); pass_failed_attempt_with_retry_along_path!(channel_id_2, true); // Advance the time so the second attempt fails due to timeout. @@ -2434,10 +2568,10 @@ fn do_automatic_retries(test: AutoRetry) { let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id, reason: ref ev_reason } => { - assert_eq!(Some(payment_hash), *ev_payment_hash); - assert_eq!(PaymentId(payment_hash.0), *ev_payment_id); - assert_eq!(PaymentFailureReason::RetriesExhausted, ev_reason.unwrap()); + Event::PaymentFailed { payment_hash, payment_id, reason } => { + assert_eq!(Some(hash), payment_hash); + assert_eq!(PaymentId(hash.0), payment_id); + assert_eq!(PaymentFailureReason::RetriesExhausted, reason.unwrap()); }, _ => panic!("Unexpected event"), } @@ -2445,8 +2579,9 @@ fn do_automatic_retries(test: AutoRetry) { } else if test == AutoRetry::FailOnRestart { // Ensure ChannelManager will not retry a payment after restart, even if there were retry // attempts remaining prior to restart. - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(2)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(hash.0); + nodes[0].node.send_payment(hash, onion, id, route_params, Retry::Attempts(2)).unwrap(); pass_failed_attempt_with_retry_along_path!(channel_id_2, true); // Open a new channel with no liquidity on the second hop so we can find a (bad) route for @@ -2459,8 +2594,8 @@ fn do_automatic_retries(test: AutoRetry) { // Restart the node and ensure that ChannelManager does not use its remaining retry attempt let node_encoded = nodes[0].node.encode(); - let chan_1_monitor_serialized = get_monitor!(nodes[0], channel_id_1).encode(); - reload_node!(nodes[0], node_encoded, &[&chan_1_monitor_serialized], persister, new_chain_monitor, node_0_deserialized); + let mon_ser = get_monitor!(nodes[0], channel_id_1).encode(); + reload_node!(nodes[0], node_encoded, &[&mon_ser], persister, chain_monitor, node_a_reload); let mut events = nodes[0].node.get_and_clear_pending_events(); expect_pending_htlcs_forwardable_from_events!(nodes[0], events, true); @@ -2471,16 +2606,17 @@ fn do_automatic_retries(test: AutoRetry) { let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id, reason: ref ev_reason } => { - assert_eq!(Some(payment_hash), *ev_payment_hash); - assert_eq!(PaymentId(payment_hash.0), *ev_payment_id); - assert_eq!(PaymentFailureReason::RetriesExhausted, ev_reason.unwrap()); + Event::PaymentFailed { payment_hash, payment_id, reason } => { + assert_eq!(Some(hash), payment_hash); + assert_eq!(PaymentId(hash.0), payment_id); + assert_eq!(PaymentFailureReason::RetriesExhausted, reason.unwrap()); }, _ => panic!("Unexpected event"), } } else if test == AutoRetry::FailOnRetry { - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(hash.0); + nodes[0].node.send_payment(hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); pass_failed_attempt_with_retry_along_path!(channel_id_2, true); // We retry payments in `process_pending_htlc_forwards`. Since our channel closed, we should @@ -2492,10 +2628,10 @@ fn do_automatic_retries(test: AutoRetry) { let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id, reason: ref ev_reason } => { - assert_eq!(Some(payment_hash), *ev_payment_hash); - assert_eq!(PaymentId(payment_hash.0), *ev_payment_id); - assert_eq!(PaymentFailureReason::RouteNotFound, ev_reason.unwrap()); + Event::PaymentFailed { payment_hash, payment_id, reason } => { + assert_eq!(Some(hash), payment_hash); + assert_eq!(PaymentId(hash.0), payment_id); + assert_eq!(PaymentFailureReason::RouteNotFound, reason.unwrap()); }, _ => panic!("Unexpected event"), } @@ -2516,8 +2652,10 @@ fn auto_retry_partial_failure() { // Open three channels, the first has plenty of liquidity, the second and third have ~no // available liquidity, causing any outbound payments routed over it to fail immediately. let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; - let chan_2_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000).0.contents.short_channel_id; - let chan_3_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000).0.contents.short_channel_id; + let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000); + let chan_2_id = chan_2.0.contents.short_channel_id; + let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000); + let chan_3_id = chan_3.0.contents.short_channel_id; // Marshall data to send the payment let amt_msat = 10_000_000; @@ -2617,8 +2755,10 @@ fn auto_retry_partial_failure() { nodes[0].router.expect_find_route(retry_2_params, Ok(retry_2_route)); // Send a payment that will partially fail on send, then partially fail on retry, then succeed. - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(3)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(3)).unwrap(); + let payment_failed_events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(payment_failed_events.len(), 2); match payment_failed_events[0] { @@ -2645,7 +2785,7 @@ fn auto_retry_partial_failure() { nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); check_added_monitors!(nodes[0], 1); - let as_second_htlc_updates = SendEvent::from_node(&nodes[0]); + let as_2nd_htlcs = SendEvent::from_node(&nodes[0]); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); check_added_monitors!(nodes[0], 1); @@ -2654,9 +2794,9 @@ fn auto_retry_partial_failure() { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_update_add_htlc(node_a_id, &as_second_htlc_updates.msgs[0]); - nodes[1].node.handle_update_add_htlc(node_a_id, &as_second_htlc_updates.msgs[1]); - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_second_htlc_updates.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &as_2nd_htlcs.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &as_2nd_htlcs.msgs[1]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_2nd_htlcs.commitment_msg); check_added_monitors!(nodes[1], 1); let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); @@ -2675,18 +2815,18 @@ fn auto_retry_partial_failure() { expect_payment_claimable!(nodes[1], payment_hash, payment_secret, amt_msat); nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], payment_hash, amt_msat); - let bs_claim_update = get_htlc_update_msgs!(nodes[1], node_a_id); - assert_eq!(bs_claim_update.update_fulfill_htlcs.len(), 1); + let bs_claim = get_htlc_update_msgs!(nodes[1], node_a_id); + assert_eq!(bs_claim.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_claim_update.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_claim.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_claim_update.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_claim.commitment_signed); check_added_monitors!(nodes[0], 1); let (as_third_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_third_raa); check_added_monitors!(nodes[1], 4); - let bs_second_claim_update = get_htlc_update_msgs!(nodes[1], node_a_id); + let bs_2nd_claim = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_third_cs); check_added_monitors!(nodes[1], 1); @@ -2696,9 +2836,9 @@ fn auto_retry_partial_failure() { check_added_monitors!(nodes[0], 1); expect_payment_path_successful!(nodes[0]); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_second_claim_update.update_fulfill_htlcs[0]); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_second_claim_update.update_fulfill_htlcs[1]); - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_claim_update.commitment_signed); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_2nd_claim.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_2nd_claim.update_fulfill_htlcs[1]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_2nd_claim.commitment_signed); check_added_monitors!(nodes[0], 1); let (as_fourth_raa, as_fourth_cs) = get_revoke_commit_msgs!(nodes[0], node_b_id); @@ -2728,7 +2868,8 @@ fn auto_retry_zero_attempts_send_error() { // Open a single channel that does not have sufficient liquidity for the payment we want to // send. - let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000).0.contents.short_channel_id; + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000); + let chan_id = chan.0.contents.short_channel_id; // Marshall data to send the payment let amt_msat = 10_000_000; @@ -2763,8 +2904,10 @@ fn auto_retry_zero_attempts_send_error() { }; nodes[0].router.expect_find_route(route_params.clone(), Ok(send_route)); - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(0)).unwrap(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); @@ -2801,8 +2944,9 @@ fn fails_paying_after_rejected_by_payee() { .with_bolt11_features(invoice_features).unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2814,8 +2958,10 @@ fn fails_paying_after_rejected_by_payee() { expect_payment_claimable!(&nodes[1], payment_hash, payment_secret, amt_msat); nodes[1].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCHandlingFailureType::Receive { payment_hash }]); - pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, payment_hash, PaymentFailureReason::RecipientRejected); + let fail_type = HTLCHandlingFailureType::Receive { payment_hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); + let reason = PaymentFailureReason::RecipientRejected; + pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, payment_hash, reason); } #[test] @@ -2888,15 +3034,21 @@ fn retry_multi_path_single_failed_payment() { { let scorer = chanmon_cfgs[0].scorer.read().unwrap(); // The initial send attempt, 2 paths - scorer.expect_usage(chans[0].short_channel_id.unwrap(), ChannelUsage { amount_msat: 10_000, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown }); - scorer.expect_usage(chans[1].short_channel_id.unwrap(), ChannelUsage { amount_msat: 100_000_001, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown }); + let effective_capacity = EffectiveCapacity::Unknown; + let usage = ChannelUsage { amount_msat: 10_000, inflight_htlc_msat: 0, effective_capacity }; + scorer.expect_usage(chans[0].short_channel_id.unwrap(), usage); + let usage = ChannelUsage { amount_msat: 100_000_001, inflight_htlc_msat: 0, effective_capacity }; + scorer.expect_usage(chans[1].short_channel_id.unwrap(), usage); // The retry, 2 paths. Ensure that the in-flight HTLC amount is factored in. - scorer.expect_usage(chans[0].short_channel_id.unwrap(), ChannelUsage { amount_msat: 50_000_001, inflight_htlc_msat: 10_000, effective_capacity: EffectiveCapacity::Unknown }); - scorer.expect_usage(chans[1].short_channel_id.unwrap(), ChannelUsage { amount_msat: 50_000_000, inflight_htlc_msat: 0, effective_capacity: EffectiveCapacity::Unknown }); + let usage = ChannelUsage { amount_msat: 50_000_001, inflight_htlc_msat: 10_000, effective_capacity }; + scorer.expect_usage(chans[0].short_channel_id.unwrap(), usage); + let usage = ChannelUsage { amount_msat: 50_000_000, inflight_htlc_msat: 0, effective_capacity }; + scorer.expect_usage(chans[1].short_channel_id.unwrap(), usage); } - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { @@ -2969,8 +3121,9 @@ fn immediate_retry_on_failure() { route.route_params = Some(retry_params.clone()); nodes[0].router.expect_find_route(retry_params, Ok(route.clone())); - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { @@ -3089,23 +3242,24 @@ fn no_extra_retries_on_back_to_back_fail() { // We can't use the commitment_signed_dance macro helper because in this test we'll be sending // two HTLCs back-to-back on the same channel, and the macro only expects to handle one at a // time. - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); - let first_htlc_updates = SendEvent::from_node(&nodes[0]); + let first_htlc = SendEvent::from_node(&nodes[0]); check_added_monitors!(nodes[0], 1); - assert_eq!(first_htlc_updates.msgs.len(), 1); + assert_eq!(first_htlc.msgs.len(), 1); - nodes[1].node.handle_update_add_htlc(node_a_id, &first_htlc_updates.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &first_htlc_updates.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &first_htlc.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &first_htlc.commitment_msg); check_added_monitors!(nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); check_added_monitors!(nodes[0], 1); - let second_htlc_updates = SendEvent::from_node(&nodes[0]); - assert_eq!(second_htlc_updates.msgs.len(), 1); + let second_htlc = SendEvent::from_node(&nodes[0]); + assert_eq!(second_htlc.msgs.len(), 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); check_added_monitors!(nodes[0], 1); @@ -3114,8 +3268,8 @@ fn no_extra_retries_on_back_to_back_fail() { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_update_add_htlc(node_a_id, &second_htlc_updates.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &second_htlc_updates.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &second_htlc.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &second_htlc.commitment_msg); check_added_monitors!(nodes[1], 1); let (bs_second_raa, bs_second_cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); @@ -3296,14 +3450,15 @@ fn test_simple_partial_retry() { // We can't use the commitment_signed_dance macro helper because in this test we'll be sending // two HTLCs back-to-back on the same channel, and the macro only expects to handle one at a // time. - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params, Retry::Attempts(1)).unwrap(); - let first_htlc_updates = SendEvent::from_node(&nodes[0]); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); + let first_htlc = SendEvent::from_node(&nodes[0]); check_added_monitors!(nodes[0], 1); - assert_eq!(first_htlc_updates.msgs.len(), 1); + assert_eq!(first_htlc.msgs.len(), 1); - nodes[1].node.handle_update_add_htlc(node_a_id, &first_htlc_updates.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &first_htlc_updates.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &first_htlc.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &first_htlc.commitment_msg); check_added_monitors!(nodes[1], 1); let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); @@ -3376,9 +3531,9 @@ fn test_simple_partial_retry() { expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); - let bs_second_forward_update = get_htlc_update_msgs!(nodes[1], node_c_id); - nodes[2].node.handle_update_add_htlc(node_b_id, &bs_second_forward_update.update_add_htlcs[0]); - commitment_signed_dance!(nodes[2], nodes[1], &bs_second_forward_update.commitment_signed, false); + let bs_second_forward = get_htlc_update_msgs!(nodes[1], node_c_id); + nodes[2].node.handle_update_add_htlc(node_b_id, &bs_second_forward.update_add_htlcs[0]); + commitment_signed_dance!(nodes[2], nodes[1], &bs_second_forward.commitment_signed, false); expect_pending_htlcs_forwardable!(nodes[2]); expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat); @@ -3407,10 +3562,13 @@ fn test_threaded_payment_retries() { // keep things simple, we route one HTLC for 0.1% of the payment over channel 1 and the rest // out over channel 3+4. This will let us ignore 99% of the payment value and deal with only // our channel. - let chan_1_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0).0.contents.short_channel_id; + let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0); + let chan_1_scid = chan_1.0.contents.short_channel_id; create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 10_000_000, 0); - let chan_3_scid = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 10_000_000, 0).0.contents.short_channel_id; - let chan_4_scid = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 0).0.contents.short_channel_id; + let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 10_000_000, 0); + let chan_3_scid = chan_3.0.contents.short_channel_id; + let chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 10_000_000, 0); + let chan_4_scid = chan_4.0.contents.short_channel_id; let amt_msat = 100_000_000; let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[2], amt_msat); @@ -3470,8 +3628,10 @@ fn test_threaded_payment_retries() { }; nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0xdeadbeef)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + let retry = Retry::Attempts(0xdeadbeef); + nodes[0].node.send_payment(payment_hash, onion, id, route_params.clone(), retry).unwrap(); check_added_monitors!(nodes[0], 2); let mut send_msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(send_msg_events.len(), 2); @@ -3545,7 +3705,8 @@ fn test_threaded_payment_retries() { // This races with our other threads which may generate an add-HTLCs commitment update via // `process_pending_htlc_forwards`. Instead, we defer the monitor update check until after // *we've* called `process_pending_htlc_forwards` when its guaranteed to have two updates. - let last_raa = commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true, false, true); + let cs = bs_fail_updates.commitment_signed; + let last_raa = commitment_signed_dance!(nodes[0], nodes[1], cs, false, true, false, true); nodes[0].node.handle_revoke_and_ack(node_b_id, &last_raa); let cur_time = Instant::now(); @@ -3571,10 +3732,10 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: // it was last persisted. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let (persister_a, persister_b, persister_c); + let (persist_a, persist_b, persist_c); let (chain_monitor_a, chain_monitor_b, chain_monitor_c); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let (nodes_0_deserialized, nodes_0_deserialized_b, nodes_0_deserialized_c); + let (node_a_1, node_a_2, node_a_3); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3582,15 +3743,15 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; - let mut nodes_0_serialized = Vec::new(); + let mut node_a_ser = Vec::new(); if !persist_manager_with_payment { - nodes_0_serialized = nodes[0].node.encode(); + node_a_ser = nodes[0].node.encode(); } let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); if persist_manager_with_payment { - nodes_0_serialized = nodes[0].node.encode(); + node_a_ser = nodes[0].node.encode(); } nodes[1].node.claim_funds(our_payment_preimage); @@ -3603,9 +3764,9 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); check_added_monitors!(nodes[0], 1); } else { - let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &htlc_fulfill_updates.update_fulfill_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], htlc_fulfill_updates.commitment_signed, false); + let htlc_fulfill = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &htlc_fulfill.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], htlc_fulfill.commitment_signed, false); // Ignore the PaymentSent event which is now pending on nodes[0] - if we were to handle it we'd // be expected to ignore the eventual conflicting PaymentFailed, but by not looking at it we // expect to get the PaymentSent again later. @@ -3614,8 +3775,9 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: // The ChannelMonitor should always be the latest version, as we're required to persist it // during the commitment signed handling. - let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); - reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister_a, chain_monitor_a, nodes_0_deserialized); + let mon_ser = get_monitor!(nodes[0], chan_id).encode(); + let config = test_default_channel_config(); + reload_node!(nodes[0], config, &node_a_ser, &[&mon_ser], persist_a, chain_monitor_a, node_a_1); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); @@ -3634,8 +3796,10 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: // failure event when we restart. for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { nodes[0].node.timer_tick_occurred(); } - let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); - reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_b, chain_monitor_b, nodes_0_deserialized_b); + let mon_ser = get_monitor!(nodes[0], chan_id).encode(); + let node_ser = nodes[0].node.encode(); + let config = test_default_channel_config(); + reload_node!(nodes[0], config, &node_ser, &[&mon_ser], persist_b, chain_monitor_b, node_a_2); let events = nodes[0].node.get_and_clear_pending_events(); assert!(events.is_empty()); @@ -3647,8 +3811,10 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: let events = nodes[0].node.get_and_clear_pending_events(); assert!(events.is_empty()); - let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); - reload_node!(nodes[0], test_default_channel_config(), &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister_c, chain_monitor_c, nodes_0_deserialized_c); + let mon_ser = get_monitor!(nodes[0], chan_id).encode(); + let config = test_default_channel_config(); + let node_ser = nodes[0].node.encode(); + reload_node!(nodes[0], config, &node_ser, &[&mon_ser], persist_c, chain_monitor_c, node_a_3); let events = nodes[0].node.get_and_clear_pending_events(); assert!(events.is_empty()); } @@ -3696,15 +3862,17 @@ fn do_claim_from_closed_chan(fail_payment: bool) { let chan_bd = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 1_000_000, 0).2; create_announced_chan_between_nodes(&nodes, 2, 3); - let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[3]); + let (payment_preimage, hash, secret) = get_payment_preimage_hash!(nodes[3]); let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, 10_000_000); - let mut route = nodes[0].router.find_route(&node_a_id, &route_params, - None, nodes[0].node.compute_inflight_htlcs()).unwrap(); + let inflight = nodes[0].node.compute_inflight_htlcs(); + let mut route = nodes[0].router.find_route(&node_a_id, &route_params, None, inflight).unwrap(); + // Make sure the route is ordered as the B->D path before C->D - route.paths.sort_by(|a, _| if a.hops[0].pubkey == node_b_id { - std::cmp::Ordering::Less } else { std::cmp::Ordering::Greater }); + route.paths.sort_by(|a, _| + if a.hops[0].pubkey == node_b_id { Ordering::Less } else { Ordering::Greater }); // Note that we add an extra 1 in the send pipeline to compensate for any blocks found while // the HTLC is being relayed. @@ -3713,22 +3881,26 @@ fn do_claim_from_closed_chan(fail_payment: bool) { let final_cltv = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 8 + 1; nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); - nodes[0].node.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields::secret_only(secret); + let id = PaymentId(hash.0); + nodes[0].node.send_payment(hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); + check_added_monitors(&nodes[0], 2); let mut send_msgs = nodes[0].node.get_and_clear_pending_msg_events(); send_msgs.sort_by(|a, _| { let a_node_id = if let MessageSendEvent::UpdateHTLCs { node_id, .. } = a { node_id } else { panic!() }; let node_b_id = node_b_id; - if *a_node_id == node_b_id { std::cmp::Ordering::Less } else { std::cmp::Ordering::Greater } + if *a_node_id == node_b_id { Ordering::Less } else { Ordering::Greater } }); assert_eq!(send_msgs.len(), 2); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 10_000_000, - payment_hash, Some(payment_secret), send_msgs.remove(0), false, None); - let receive_event = pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 10_000_000, - payment_hash, Some(payment_secret), send_msgs.remove(0), true, None); + let (msg_a, msg_b) = (send_msgs.remove(0), send_msgs.remove(0)); + let (path_a, path_b) = (&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]); + + pass_along_path(&nodes[0], path_a, 10_000_000, hash, Some(secret), msg_a, false, None); + let receive_event = + pass_along_path(&nodes[0], path_b, 10_000_000, hash, Some(secret), msg_b, true, None); match receive_event.unwrap() { Event::PaymentClaimable { claim_deadline, .. } => { @@ -3739,21 +3911,26 @@ fn do_claim_from_closed_chan(fail_payment: bool) { // Ensure that the claim_deadline is correct, with the payment failing at exactly the given // height. - connect_blocks(&nodes[3], final_cltv - HTLC_FAIL_BACK_BUFFER - nodes[3].best_block_info().1 - - if fail_payment { 0 } else { 2 }); - let error_message = "Channel force-closed"; + let blocks = final_cltv + - HTLC_FAIL_BACK_BUFFER + - nodes[3].best_block_info().1 + - if fail_payment { 0 } else { 2 }; + connect_blocks(&nodes[3], blocks); if fail_payment { // We fail the HTLC on the A->B->D path first as it expires 4 blocks earlier. We go ahead // and expire both immediately, though, by connecting another 4 blocks. - let reason = HTLCHandlingFailureType::Receive { payment_hash }; + let reason = HTLCHandlingFailureType::Receive { payment_hash: hash }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason.clone()]); connect_blocks(&nodes[3], 4); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[3], [reason]); - pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash, PaymentFailureReason::RecipientRejected); + + let reason = PaymentFailureReason::RecipientRejected; + pass_failed_payment_back(&nodes[0], &[path_a, path_b], false, hash, reason); } else { - nodes[1].node.force_close_broadcasting_latest_txn(&chan_bd, &node_d_id, error_message.to_string()).unwrap(); - check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, - [node_d_id], 1000000); + let err = "Channel force-closed".to_string(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan_bd, &node_d_id, err).unwrap(); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(&nodes[1], 1, reason, false, [node_d_id], 1000000); check_closed_broadcast(&nodes[1], 1, true); let bs_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_tx.len(), 1); @@ -3761,12 +3938,12 @@ fn do_claim_from_closed_chan(fail_payment: bool) { mine_transaction(&nodes[3], &bs_tx[0]); check_added_monitors(&nodes[3], 1); check_closed_broadcast(&nodes[3], 1, true); - check_closed_event!(&nodes[3], 1, ClosureReason::CommitmentTxConfirmed, false, - [node_b_id], 1000000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event!(&nodes[3], 1, reason, false, [node_b_id], 1000000); nodes[3].node.claim_funds(payment_preimage); check_added_monitors(&nodes[3], 2); - expect_payment_claimed!(nodes[3], payment_hash, 10_000_000); + expect_payment_claimed!(nodes[3], hash, 10_000_000); let ds_tx = nodes[3].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(ds_tx.len(), 1); @@ -3839,24 +4016,23 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { create_announced_chan_between_nodes(&nodes, 0, 1); let amt_msat = 100_000; - let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[0], &nodes[1], amt_msat); - let payment_id = PaymentId(our_payment_hash.0); + let (mut route, hash, preimage, secret) = get_route_and_payment_hash!(&nodes[0], &nodes[1], amt_msat); + let id = PaymentId(hash.0); let custom_tlvs = vec![ (if even_tlvs { 5482373482 } else { 5482373483 }, vec![1, 2, 3, 4]), (5482373487, vec![0x42u8; 16]), ]; - let onion_fields = RecipientOnionFields { - payment_secret: if spontaneous { None } else { Some(our_payment_secret) }, + let onion = RecipientOnionFields { + payment_secret: if spontaneous { None } else { Some(secret) }, payment_metadata: None, custom_tlvs: custom_tlvs.clone() }; if spontaneous { - nodes[0].node.send_spontaneous_payment( - Some(our_payment_preimage), onion_fields, payment_id, route.route_params.unwrap(), - Retry::Attempts(0) - ).unwrap(); + let params = route.route_params.unwrap(); + let retry = Retry::Attempts(0); + nodes[0].node.send_spontaneous_payment(Some(preimage), onion, id, params, retry).unwrap(); } else { - nodes[0].node.send_payment_with_route(route, our_payment_hash, onion_fields, payment_id).unwrap(); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); } check_added_monitors(&nodes[0], 1); @@ -3880,24 +4056,25 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { match (known_tlvs, even_tlvs) { (true, _) => { - nodes[1].node.claim_funds_with_known_custom_tlvs(our_payment_preimage); + nodes[1].node.claim_funds_with_known_custom_tlvs(preimage); let expected_total_fee_msat = pass_claimed_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], our_payment_preimage) + ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], preimage) .with_custom_tlvs(custom_tlvs) ); - expect_payment_sent!(&nodes[0], our_payment_preimage, Some(expected_total_fee_msat)); + expect_payment_sent!(&nodes[0], preimage, Some(expected_total_fee_msat)); }, (false, false) => { claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], our_payment_preimage) + ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], preimage) .with_custom_tlvs(custom_tlvs) ); }, (false, true) => { - nodes[1].node.claim_funds(our_payment_preimage); - let expected_destinations = vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]; - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], expected_destinations); - pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, our_payment_hash, PaymentFailureReason::RecipientRejected); + nodes[1].node.claim_funds(preimage); + let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); + let reason = PaymentFailureReason::RecipientRejected; + pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, hash, reason); } } } @@ -3918,23 +4095,22 @@ fn test_retry_custom_tlvs() { let (chan_2_update, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 2, 1); // Rebalance - send_payment(&nodes[2], &vec!(&nodes[1])[..], 1_500_000); + send_payment(&nodes[2], &[&nodes[1]], 1_500_000); let amt_msat = 1_000_000; - let (mut route, payment_hash, payment_preimage, payment_secret) = + let (mut route, hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); // Initiate the payment - let payment_id = PaymentId(payment_hash.0); + let id = PaymentId(hash.0); let mut route_params = route.route_params.clone().unwrap(); let custom_tlvs = vec![((1 << 16) + 1, vec![0x42u8; 16])]; - let onion_fields = RecipientOnionFields::secret_only(payment_secret); - let onion_fields = onion_fields.with_custom_tlvs(custom_tlvs.clone()).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let onion = onion.with_custom_tlvs(custom_tlvs.clone()).unwrap(); nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); - nodes[0].node.send_payment(payment_hash, onion_fields, - payment_id, route_params.clone(), Retry::Attempts(1)).unwrap(); + nodes[0].node.send_payment(hash, onion, id, route_params.clone(), Retry::Attempts(1)).unwrap(); check_added_monitors!(nodes[0], 1); // one monitor per path // Add the HTLC along the first hop. @@ -3946,11 +4122,8 @@ fn test_retry_custom_tlvs() { // Attempt to forward the payment and complete the path's failure. expect_pending_htlcs_forwardable!(&nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], - vec![HTLCHandlingFailureType::Forward { - node_id: Some(node_c_id), - channel_id: chan_2_id - }]); + let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2_id }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], [fail]); check_added_monitors!(nodes[1], 1); let htlc_updates = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -3965,14 +4138,15 @@ fn test_retry_custom_tlvs() { _ => panic!("Unexpected event") } events.remove(1); - expect_payment_failed_conditions_event(events, payment_hash, false, - PaymentFailedConditions::new().mpp_parts_remain()); + let conditions = PaymentFailedConditions::new().mpp_parts_remain(); + expect_payment_failed_conditions_event(events, hash, false, conditions); // Rebalance the channel so the retry of the payment can succeed. - send_payment(&nodes[2], &vec!(&nodes[1])[..], 1_500_000); + send_payment(&nodes[2], &[&nodes[1]], 1_500_000); // Retry the payment and make sure it succeeds - route_params.payment_params.previously_failed_channels.push(chan_2_update.contents.short_channel_id); + let chan_2_scid = chan_2_update.contents.short_channel_id; + route_params.payment_params.previously_failed_channels.push(chan_2_scid); route.route_params = Some(route_params.clone()); nodes[0].router.expect_find_route(route_params, Ok(route)); nodes[0].node.process_pending_htlc_forwards(); @@ -3980,7 +4154,7 @@ fn test_retry_custom_tlvs() { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1], &nodes[2]]; - let args = PassAlongPathArgs::new(&nodes[0], path, 1_000_000, payment_hash, events.pop().unwrap()) + let args = PassAlongPathArgs::new(&nodes[0], path, 1_000_000, hash, events.pop().unwrap()) .with_payment_secret(payment_secret) .with_custom_tlvs(custom_tlvs.clone()); do_pass_along_path(args); @@ -4050,44 +4224,50 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: assert_eq!(route.paths.len(), 2); route.paths.sort_by(|path_a, _| { // Sort the path so that the path through nodes[1] comes first - if path_a.hops[0].pubkey == node_b_id { - core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } + if path_a.hops[0].pubkey == node_b_id { Ordering::Less } else { Ordering::Greater } }); - let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]); - let payment_id = PaymentId([42; 32]); + let (preimage, hash, secret) = get_payment_preimage_hash!(&nodes[3]); + let id = PaymentId([42; 32]); let amt_msat = 15_000_000; // Send first part - let onion_fields = RecipientOnionFields { - payment_secret: Some(our_payment_secret), + let onion = RecipientOnionFields { + payment_secret: Some(secret), payment_metadata: None, custom_tlvs: first_tlvs }; - let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, - onion_fields.clone(), payment_id, &route).unwrap(); + let session_privs = + nodes[0].node.test_add_new_pending_payment(hash, onion.clone(), id, &route).unwrap(); let cur_height = nodes[0].best_block_info().1; - nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash, - onion_fields.clone(), amt_msat, cur_height, payment_id, - &None, session_privs[0]).unwrap(); + let path_a = &route.paths[0]; + let priv_a = session_privs[0]; + nodes[0] + .node + .test_send_payment_along_path(path_a, &hash, onion, amt_msat, cur_height, id, &None, priv_a) + .unwrap(); check_added_monitors!(nodes[0], 1); - { - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], amt_msat, our_payment_hash, - Some(our_payment_secret), events.pop().unwrap(), false, None); - } + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let event = events.pop().unwrap(); + let path_a = &[&nodes[1], &nodes[3]]; + pass_along_path(&nodes[0], path_a, amt_msat, hash, Some(secret), event, false, None); + assert!(nodes[3].node.get_and_clear_pending_events().is_empty()); // Send second part - let onion_fields = RecipientOnionFields { - payment_secret: Some(our_payment_secret), + let onion = RecipientOnionFields { + payment_secret: Some(secret), payment_metadata: None, custom_tlvs: second_tlvs }; - nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash, - onion_fields.clone(), amt_msat, cur_height, payment_id, &None, session_privs[1]).unwrap(); + let path_b = &route.paths[1]; + let priv_b = session_privs[1]; + nodes[0] + .node + .test_send_payment_along_path(path_b, &hash, onion, amt_msat, cur_height, id, &None, priv_b) + .unwrap(); check_added_monitors!(nodes[0], 1); { @@ -4124,13 +4304,13 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: } do_claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], our_payment_preimage) + ClaimAlongRouteArgs::new(&nodes[0], &[path_a, &[&nodes[2], &nodes[3]]], preimage) .with_custom_tlvs(expected_tlvs) ); - expect_payment_sent(&nodes[0], our_payment_preimage, Some(Some(2000)), true, true); + expect_payment_sent(&nodes[0], preimage, Some(Some(2000)), true, true); } else { // Expect fail back - let expected_destinations = vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]; + let expected_destinations = [HTLCHandlingFailureType::Receive { payment_hash: hash }]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], expected_destinations); check_added_monitors!(nodes[3], 1); @@ -4138,19 +4318,17 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: nodes[2].node.handle_update_fail_htlc(node_d_id, &fail_updates_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![ - HTLCHandlingFailureType::Forward { - node_id: Some(node_d_id), - channel_id: chan_2_3.2 - }]); + let fail = + HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], [fail]); check_added_monitors!(nodes[2], 1); let fail_updates_2 = get_htlc_update_msgs!(nodes[2], node_a_id); nodes[0].node.handle_update_fail_htlc(node_c_id, &fail_updates_2.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false); - expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, - PaymentFailedConditions::new().mpp_parts_remain()); + let conditions = PaymentFailedConditions::new().mpp_parts_remain(); + expect_payment_failed_conditions(&nodes[0], hash, true, conditions); } } @@ -4164,12 +4342,13 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); let persister; - let new_chain_monitor; + let chain_mon; let mut config = test_default_channel_config(); config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50; - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, Some(config.clone()), Some(config.clone()), Some(config.clone())]); - let nodes_0_deserialized; + let configs = [None, Some(config.clone()), Some(config.clone()), Some(config.clone())]; + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &configs); + let node_d_reload; let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); @@ -4194,9 +4373,13 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); // Send the MPP payment, delivering the updated commitment state to nodes[1]. - nodes[0].node.send_payment(payment_hash, RecipientOnionFields { - payment_secret: Some(payment_secret), payment_metadata: Some(payment_metadata), custom_tlvs: vec![], - }, payment_id, route_params.clone(), Retry::Attempts(1)).unwrap(); + let onion = RecipientOnionFields { + payment_secret: Some(payment_secret), + payment_metadata: Some(payment_metadata), + custom_tlvs: vec![], + }; + let retry = Retry::Attempts(1); + nodes[0].node.send_payment(payment_hash, onion, payment_id, route_params, retry).unwrap(); check_added_monitors!(nodes[0], 2); let mut send_events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -4254,8 +4437,9 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { if do_reload { let mon_bd = get_monitor!(nodes[3], chan_id_bd).encode(); let mon_cd = get_monitor!(nodes[3], chan_id_cd).encode(); - reload_node!(nodes[3], config, &nodes[3].node.encode(), &[&mon_bd, &mon_cd], - persister, new_chain_monitor, nodes_0_deserialized); + let mons = [&mon_bd[..], &mon_cd[..]]; + let node_d_ser = nodes[3].node.encode(); + reload_node!(nodes[3], config, &node_d_ser, &mons[..], persister, chain_mon, node_d_reload); nodes[1].node.peer_disconnected(node_d_id); reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[3])); } @@ -4277,7 +4461,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { expect_pending_htlcs_forwardable!(nodes[2]); check_added_monitors(&nodes[2], 1); let cs_forward = SendEvent::from_node(&nodes[2]); - let cd_channel_used = cs_forward.msgs[0].channel_id; + let cd_chan_id = cs_forward.msgs[0].channel_id; nodes[3].node.handle_update_add_htlc(node_c_id, &cs_forward.msgs[0]); commitment_signed_dance!(nodes[3], nodes[2], cs_forward.commitment_msg, false, true); @@ -4296,14 +4480,15 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { nodes[2].node.handle_update_fail_htlc(node_d_id, &ds_fail.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], ds_fail.commitment_signed, false, true); - expect_pending_htlcs_forwardable_conditions(nodes[2].node.get_and_clear_pending_events(), - &[HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: cd_channel_used }]); + let events = nodes[2].node.get_and_clear_pending_events(); + let fail_type = + HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: cd_chan_id }; + expect_pending_htlcs_forwardable_conditions(events, &[fail_type]); } else { expect_pending_htlcs_forwardable!(nodes[3]); expect_payment_claimable!(nodes[3], payment_hash, payment_secret, amt_msat); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], payment_preimage) - ); + let route: &[&[_]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], route, payment_preimage)); } } @@ -4334,7 +4519,9 @@ fn test_htlc_forward_considers_anchor_outputs_value() { // discovery of this bug. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]); + + let configs = [Some(config.clone()), Some(config.clone()), Some(config.clone())]; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -4364,9 +4551,9 @@ fn test_htlc_forward_considers_anchor_outputs_value() { let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!( nodes[0], nodes[2], sendable_balance_msat + anchor_outpus_value_msat ); - nodes[0].node.send_payment_with_route( - route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -4382,10 +4569,8 @@ fn test_htlc_forward_considers_anchor_outputs_value() { // The forwarding node should reject forwarding it as expected. expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], vec![HTLCHandlingFailureType::Forward { - node_id: Some(node_c_id), - channel_id: chan_id_2 - }]); + let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], [fail]); check_added_monitors(&nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -4404,9 +4589,10 @@ fn test_htlc_forward_considers_anchor_outputs_value() { // invalid update and closes the channel. update_add_htlc.channel_id = chan_id_2; nodes[2].node.handle_update_add_htlc(node_b_id, &update_add_htlc); - check_closed_event(&nodes[2], 1, ClosureReason::ProcessingError { - err: "Remote HTLC add would put them under remote reserve value".to_owned() - }, false, &[node_b_id], 1_000_000); + + let err = "Remote HTLC add would put them under remote reserve value".to_owned(); + let reason = ClosureReason::ProcessingError { err }; + check_closed_event(&nodes[2], 1, reason, false, &[node_b_id], 1_000_000); check_closed_broadcast(&nodes[2], 1, true); check_added_monitors(&nodes[2], 1); } @@ -4474,9 +4660,12 @@ fn peel_payment_onion_custom_tlvs() { fn test_non_strict_forwarding() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let mut config = test_default_channel_config(); config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config)]); + let configs = [Some(config.clone()), Some(config.clone()), Some(config)]; + + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -4499,8 +4688,9 @@ fn test_non_strict_forwarding() { // Send 4 payments over the same route. for i in 0..4 { let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(payment_value), None); - nodes[0].node.send_payment_with_route(route.clone(), payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); @@ -4517,11 +4707,8 @@ fn test_non_strict_forwarding() { // applying non-strict forwarding. // The channel with the least amount of outbound liquidity will be used to maximize the // probability of being able to successfully forward a subsequent HTLC. - assert_eq!(send_event.msgs[0].channel_id, if i < 2 { - channel_id_1 - } else { - channel_id_2 - }); + let exp_id = if i < 2 { channel_id_1 } else { channel_id_2 }; + assert_eq!(send_event.msgs[0].channel_id, exp_id); nodes[2].node.handle_update_add_htlc(node_b_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[1], &send_event.commitment_msg, false); @@ -4537,8 +4724,10 @@ fn test_non_strict_forwarding() { // Send a 5th payment which will fail. let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(payment_value), None); - nodes[0].node.send_payment_with_route(route.clone(), payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); @@ -4549,20 +4738,23 @@ fn test_non_strict_forwarding() { expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); let routed_scid = route.paths[0].hops[1].short_channel_id; - let routed_channel_id = match routed_scid { + let routed_chan_id = match routed_scid { scid if scid == chan_update_1.contents.short_channel_id => channel_id_1, scid if scid == chan_update_2.contents.short_channel_id => channel_id_2, _ => panic!("Unexpected short channel id in route"), }; // The failure to forward will refer to the channel given in the onion. - expect_pending_htlcs_forwardable_conditions(nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: routed_channel_id }]); + let events = nodes[1].node.get_and_clear_pending_events(); + let fail = + HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: routed_chan_id }; + expect_pending_htlcs_forwardable_conditions(events, &[fail]); let updates = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); let events = nodes[0].node.get_and_clear_pending_events(); - expect_payment_failed_conditions_event(events, payment_hash, false, PaymentFailedConditions::new().blamed_scid(routed_scid)); + let conditions = PaymentFailedConditions::new().blamed_scid(routed_scid); + expect_payment_failed_conditions_event(events, payment_hash, false, conditions); } #[test] @@ -4590,17 +4782,16 @@ fn remove_pending_outbounds_on_buggy_router() { let route_params = route.route_params.clone().unwrap(); nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); - nodes[0].node.send_payment( - payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id, route_params, - Retry::Attempts(1) // Even though another attempt is allowed, the payment should fail - ).unwrap(); + // Send the payment with one retry allowed, but the payment should still fail + let onion = RecipientOnionFields::secret_only(payment_secret); + let retry = Retry::Attempts(1); + nodes[0].node.send_payment(payment_hash, onion, payment_id, route_params, retry).unwrap(); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match &events[0] { Event::PaymentPathFailed { failure, payment_failed_permanently, .. } => { - assert_eq!(failure, &PathFailure::InitialSend { - err: APIError::InvalidRoute { err: "Path went through the same channel twice".to_string() } - }); + let err = "Path went through the same channel twice".to_string(); + assert_eq!(failure, &PathFailure::InitialSend { err: APIError::InvalidRoute { err } }); assert!(!payment_failed_permanently); }, _ => panic!() @@ -4661,19 +4852,20 @@ fn pay_route_without_params() { let amt_msat = 10_000; let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); - let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); + let (mut route, hash, preimage, secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); route.route_params.take(); - nodes[0].node.send_payment_with_route( - route, payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0) - ).unwrap(); + + let onion = RecipientOnionFields::secret_only(secret); + let id = PaymentId(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let node_1_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); - pass_along_path(&nodes[0], &[&nodes[1]], amt_msat, payment_hash, Some(payment_secret), node_1_msgs, true, None); + pass_along_path(&nodes[0], &[&nodes[1]], amt_msat, hash, Some(secret), node_1_msgs, true, None); claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], payment_preimage) + ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], preimage) ); } @@ -4704,9 +4896,8 @@ fn max_out_mpp_path() { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs( - 3, &node_cfgs, &[Some(user_cfg.clone()), Some(lsp_cfg.clone()), Some(user_cfg.clone())] - ); + let configs = [Some(user_cfg.clone()), Some(lsp_cfg), Some(user_cfg)]; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 200_000, 0); @@ -4721,7 +4912,11 @@ fn max_out_mpp_path() { let invoice = nodes[2].node.create_bolt11_invoice(invoice_params).unwrap(); let route_params_cfg = crate::routing::router::RouteParametersConfig::default(); - nodes[0].node.pay_for_bolt11_invoice(&invoice, PaymentId([42; 32]), None, route_params_cfg, Retry::Attempts(0)).unwrap(); + let id = PaymentId([42; 32]); + nodes[0] + .node + .pay_for_bolt11_invoice(&invoice, id, None, route_params_cfg, Retry::Attempts(0)) + .unwrap(); assert!(nodes[0].node.list_recent_payments().len() == 1); check_added_monitors(&nodes[0], 2); // one monitor update per MPP part From 6ec9a47193307a4bae1b759aa93262c2a14eef82 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 5 May 2025 00:25:37 +0000 Subject: [PATCH 03/25] f follow pattern more rigorously --- lightning/src/ln/payment_tests.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index c7e690fed38..eb783e60e1e 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -4913,10 +4913,8 @@ fn max_out_mpp_path() { let route_params_cfg = crate::routing::router::RouteParametersConfig::default(); let id = PaymentId([42; 32]); - nodes[0] - .node - .pay_for_bolt11_invoice(&invoice, id, None, route_params_cfg, Retry::Attempts(0)) - .unwrap(); + let retry = Retry::Attempts(0); + nodes[0].node.pay_for_bolt11_invoice(&invoice, id, None, route_params_cfg, retry).unwrap(); assert!(nodes[0].node.list_recent_payments().len() == 1); check_added_monitors(&nodes[0], 2); // one monitor update per MPP part From f180cae9d73028b6a821aaa6d9e2ca09621452da Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 12 May 2025 20:31:50 +0000 Subject: [PATCH 04/25] f restore the payment_ in payment_secret (incl places it wasn't) --- lightning/src/ln/payment_tests.rs | 70 ++++++++++++++++--------------- 1 file changed, 37 insertions(+), 33 deletions(-) diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index eb783e60e1e..e61138cddcc 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -332,7 +332,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { let (chan_3_update, _, chan_3_id, _) = create_announced_chan_between_nodes(&nodes, 1, 3); let (chan_4_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 2, 3); - let (mut route, hash, payment_preimage, pay_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 100_000); + let (mut route, hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 100_000); let path = route.paths[0].clone(); route.paths.push(path); route.paths[0].hops[0].pubkey = node_b_id; @@ -343,7 +343,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { route.paths[1].hops[1].short_channel_id = chan_4_update.contents.short_channel_id; // Initiate the MPP payment. - let onion = RecipientOnionFields::secret_only(pay_secret); + let onion = RecipientOnionFields::secret_only(payment_secret); nodes[0].node.send_payment_with_route(route, hash, onion, PaymentId(hash.0)).unwrap(); check_added_monitors!(nodes[0], 2); // one monitor per path let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -352,7 +352,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { // Pass half of the payment along the first path. let node_1_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); let path = &[&nodes[1], &nodes[3]]; - pass_along_path(&nodes[0], path, 200_000, hash, Some(pay_secret), node_1_msgs, false, None); + pass_along_path(&nodes[0], path, 200_000, hash, Some(payment_secret), node_1_msgs, false, None); if send_partial_mpp { // Time out the partial MPP @@ -390,8 +390,8 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { // Pass half of the payment along the second path. let node_2_msgs = remove_first_msg_event_to_node(&node_c_id, &mut events); let path = &[&nodes[2], &nodes[3]]; - let secret = Some(pay_secret); - pass_along_path(&nodes[0], path, 200_000, hash, secret, node_2_msgs, true, None); + let payment_secret = Some(payment_secret); + pass_along_path(&nodes[0], path, 200_000, hash, payment_secret, node_2_msgs, true, None); // Even after MPP_TIMEOUT_TICKS we should not timeout the MPP if we have all the parts for _ in 0..MPP_TIMEOUT_TICKS { @@ -481,8 +481,8 @@ fn test_mpp_keysend() { ); let preimage = Some(PaymentPreimage([42; 32])); - let secret = PaymentSecret([42; 32]); - let onion = RecipientOnionFields::secret_only(secret); + let payment_secret = PaymentSecret([42; 32]); + let onion = RecipientOnionFields::secret_only(payment_secret); let retry = Retry::Attempts(0); let id = PaymentId([42; 32]); let hash = @@ -494,10 +494,11 @@ fn test_mpp_keysend() { assert_eq!(events.len(), 2); let ev = remove_first_msg_event_to_node(&node_b_id, &mut events); - pass_along_path(&nodes[0], route[0], recv_value, hash, Some(secret), ev, false, preimage); + let payment_secret = Some(payment_secret); + pass_along_path(&nodes[0], route[0], recv_value, hash, payment_secret, ev, false, preimage); let ev = remove_first_msg_event_to_node(&node_c_id, &mut events); - pass_along_path(&nodes[0], route[1], recv_value, hash, Some(secret), ev, true, preimage); + pass_along_path(&nodes[0], route[1], recv_value, hash, payment_secret, ev, true, preimage); claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], route, preimage.unwrap())); } @@ -709,11 +710,11 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // Send two payments - one which will get to nodes[2] and will be claimed, one which we'll time // out and retry. let amt_msat = 1_000_000; - let (route, payment_hash, payment_preimage, secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); let (payment_preimage_1, payment_hash_1, _, payment_id_1) = send_along_route(&nodes[0], route.clone(), &[&nodes[1], &nodes[2]], 1_000_000); let route_params = route.route_params.unwrap().clone(); - let onion = RecipientOnionFields::secret_only(secret); + let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment(payment_hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -884,7 +885,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { nodes[1].node.timer_tick_occurred(); } - let onion = RecipientOnionFields::secret_only(secret); + let onion = RecipientOnionFields::secret_only(payment_secret); // Check that we cannot retry a fulfilled payment nodes[0] .node @@ -892,7 +893,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { .unwrap_err(); // ...but if we send with a different PaymentId the payment should fly let id = PaymentId(payment_hash.0); - let onion = RecipientOnionFields::secret_only(secret); + let onion = RecipientOnionFields::secret_only(payment_secret); nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); @@ -900,7 +901,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { assert_eq!(events.len(), 1); let event = events.pop().unwrap(); let path = &[&nodes[1], &nodes[2]]; - pass_along_path(&nodes[0], path, 1_000_000, payment_hash, Some(secret), event, true, None); + let payment_secret = Some(payment_secret); + pass_along_path(&nodes[0], path, 1_000_000, payment_hash, payment_secret, event, true, None); do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], &[path], payment_preimage)); expect_payment_sent!(nodes[0], payment_preimage, Some(new_route.paths[0].hops[0].fee_msat)); } @@ -1358,14 +1360,14 @@ fn get_ldk_payment_preimage() { check_added_monitors!(nodes[0], 1); // Make sure to use `get_payment_preimage` - let preimage = nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(); + let preimage = Some(nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap()); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let event = events.pop().unwrap(); - let secret = Some(payment_secret); + let payment_secret = Some(payment_secret); let path = &[&nodes[1]]; - pass_along_path(&nodes[0], path, amt_msat, payment_hash, secret, event, true, Some(preimage)); - claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], &[path], preimage)); + pass_along_path(&nodes[0], path, amt_msat, payment_hash, payment_secret, event, true, preimage); + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], &[path], preimage.unwrap())); } #[test] @@ -3862,11 +3864,12 @@ fn do_claim_from_closed_chan(fail_payment: bool) { let chan_bd = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 1_000_000, 0).2; create_announced_chan_between_nodes(&nodes, 2, 3); - let (payment_preimage, hash, secret) = get_payment_preimage_hash!(nodes[3]); + let (payment_preimage, hash, payment_secret) = get_payment_preimage_hash!(nodes[3]); let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); - let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, 10_000_000); + let amt_msat = 10_000_000; + let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); let inflight = nodes[0].node.compute_inflight_htlcs(); let mut route = nodes[0].router.find_route(&node_a_id, &route_params, None, inflight).unwrap(); @@ -3881,7 +3884,7 @@ fn do_claim_from_closed_chan(fail_payment: bool) { let final_cltv = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 8 + 1; nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); - let onion = RecipientOnionFields::secret_only(secret); + let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(hash.0); nodes[0].node.send_payment(hash, onion, id, route_params, Retry::Attempts(1)).unwrap(); @@ -3898,9 +3901,9 @@ fn do_claim_from_closed_chan(fail_payment: bool) { let (msg_a, msg_b) = (send_msgs.remove(0), send_msgs.remove(0)); let (path_a, path_b) = (&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]); - pass_along_path(&nodes[0], path_a, 10_000_000, hash, Some(secret), msg_a, false, None); + pass_along_path(&nodes[0], path_a, amt_msat, hash, Some(payment_secret), msg_a, false, None); let receive_event = - pass_along_path(&nodes[0], path_b, 10_000_000, hash, Some(secret), msg_b, true, None); + pass_along_path(&nodes[0], path_b, amt_msat, hash, Some(payment_secret), msg_b, true, None); match receive_event.unwrap() { Event::PaymentClaimable { claim_deadline, .. } => { @@ -4016,14 +4019,14 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { create_announced_chan_between_nodes(&nodes, 0, 1); let amt_msat = 100_000; - let (mut route, hash, preimage, secret) = get_route_and_payment_hash!(&nodes[0], &nodes[1], amt_msat); + let (mut route, hash, preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], &nodes[1], amt_msat); let id = PaymentId(hash.0); let custom_tlvs = vec![ (if even_tlvs { 5482373482 } else { 5482373483 }, vec![1, 2, 3, 4]), (5482373487, vec![0x42u8; 16]), ]; let onion = RecipientOnionFields { - payment_secret: if spontaneous { None } else { Some(secret) }, + payment_secret: if spontaneous { None } else { Some(payment_secret) }, payment_metadata: None, custom_tlvs: custom_tlvs.clone() }; @@ -4227,13 +4230,13 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: if path_a.hops[0].pubkey == node_b_id { Ordering::Less } else { Ordering::Greater } }); - let (preimage, hash, secret) = get_payment_preimage_hash!(&nodes[3]); + let (preimage, hash, payment_secret) = get_payment_preimage_hash!(&nodes[3]); let id = PaymentId([42; 32]); let amt_msat = 15_000_000; // Send first part let onion = RecipientOnionFields { - payment_secret: Some(secret), + payment_secret: Some(payment_secret), payment_metadata: None, custom_tlvs: first_tlvs }; @@ -4252,13 +4255,13 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: assert_eq!(events.len(), 1); let event = events.pop().unwrap(); let path_a = &[&nodes[1], &nodes[3]]; - pass_along_path(&nodes[0], path_a, amt_msat, hash, Some(secret), event, false, None); + pass_along_path(&nodes[0], path_a, amt_msat, hash, Some(payment_secret), event, false, None); assert!(nodes[3].node.get_and_clear_pending_events().is_empty()); // Send second part let onion = RecipientOnionFields { - payment_secret: Some(secret), + payment_secret: Some(payment_secret), payment_metadata: None, custom_tlvs: second_tlvs }; @@ -4852,10 +4855,10 @@ fn pay_route_without_params() { let amt_msat = 10_000; let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); - let (mut route, hash, preimage, secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); + let (mut route, hash, preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); route.route_params.take(); - let onion = RecipientOnionFields::secret_only(secret); + let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(hash.0); nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); @@ -4863,9 +4866,10 @@ fn pay_route_without_params() { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let node_1_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); - pass_along_path(&nodes[0], &[&nodes[1]], amt_msat, hash, Some(secret), node_1_msgs, true, None); + let path = &[&nodes[1]]; + pass_along_path(&nodes[0], path, amt_msat, hash, Some(payment_secret), node_1_msgs, true, None); claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], preimage) + ClaimAlongRouteArgs::new(&nodes[0], &[path], preimage) ); } From 8757b6566979b6db94d89d6867d428ae05016223 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 12 May 2025 22:34:06 +0000 Subject: [PATCH 05/25] f more _monitor in chain_monitor --- lightning/src/ln/payment_tests.rs | 34 +++++++++++++++---------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index e61138cddcc..8fa55c64648 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -926,16 +926,16 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { manually_accept_config.manually_accept_inbound_channels = true; let persist_1; - let chain_1; + let chain_monitor_1; let persist_2; - let chain_2; + let chain_monitor_2; let persist_3; - let chain_3; + let chain_monitor_3; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(manually_accept_config), None]); - let node_a_reload_1; - let node_a_reload_2; - let node_a_reload_3; + let node_a_1; + let node_a_2; + let node_a_3; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -965,7 +965,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let mon_ser = get_monitor!(nodes[0], chan_id).encode(); let config = test_default_channel_config(); - reload_node!(nodes[0], config, node_a_ser, &[&mon_ser], persist_1, chain_1, node_a_reload_1); + reload_node!(nodes[0], config, node_a_ser, &[&mon_ser], persist_1, chain_monitor_1, node_a_1); nodes[1].node.peer_disconnected(node_a_id); // On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and @@ -1081,7 +1081,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let config = test_default_channel_config(); let monitors = &[&chan_0_monitor_serialized[..], &chan_1_monitor_serialized[..]]; - reload_node!(nodes[0], config, node_a_ser, monitors, persist_2, chain_2, node_a_reload_2); + reload_node!(nodes[0], config, node_a_ser, monitors, persist_2, chain_monitor_2, node_a_2); nodes[1].node.peer_disconnected(node_a_id); nodes[0].node.test_process_background_events(); @@ -1113,7 +1113,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // claimed previously). let config = test_default_channel_config(); let monitors = &[&chan_0_monitor_serialized[..], &chan_1_monitor_serialized[..]]; - reload_node!(nodes[0], config, node_a_ser, monitors, persist_3, chain_3, node_a_reload_3); + reload_node!(nodes[0], config, node_a_ser, monitors, persist_3, chain_monitor_3, node_a_3); nodes[1].node.peer_disconnected(node_a_id); nodes[0].node.test_process_background_events(); @@ -1146,7 +1146,7 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bo let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let persister; - let chain; + let chain_monitor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let node_a_reload; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1226,9 +1226,9 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bo // If we persist the ChannelManager here, we should get the PaymentSent event after // deserialization. - let mut chan_manager_serialized = Vec::new(); + let mut node_a_ser = Vec::new(); if !persist_manager_post_event { - chan_manager_serialized = nodes[0].node.encode(); + node_a_ser = nodes[0].node.encode(); } let mon_ser = get_monitor!(nodes[0], chan_id).encode(); @@ -1241,11 +1241,11 @@ fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bo // If we persist the ChannelManager after we get the PaymentSent event, we shouldn't get it // twice. if persist_manager_post_event { - chan_manager_serialized = nodes[0].node.encode(); + node_a_ser = nodes[0].node.encode(); } // Now reload nodes[0]... - reload_node!(nodes[0], &chan_manager_serialized, &[&mon_ser], persister, chain, node_a_reload); + reload_node!(nodes[0], &node_a_ser, &[&mon_ser], persister, chain_monitor, node_a_reload); if persist_manager_post_event { assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); @@ -1284,7 +1284,7 @@ fn test_fulfill_restart_failure() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let persister; - let chain; + let chain_monitor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let node_b_reload; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1297,7 +1297,7 @@ fn test_fulfill_restart_failure() { // The simplest way to get a failure after a fulfill is to reload nodes[1] from a state // pre-fulfill, which we do by serializing it here. - let chan_manager_serialized = nodes[1].node.encode(); + let node_b_ser = nodes[1].node.encode(); let mon_ser = get_monitor!(nodes[1], chan_id).encode(); nodes[1].node.claim_funds(payment_preimage); @@ -1309,7 +1309,7 @@ fn test_fulfill_restart_failure() { expect_payment_sent(&nodes[0], payment_preimage, None, false, false); // Now reload nodes[1]... - reload_node!(nodes[1], &chan_manager_serialized, &[&mon_ser], persister, chain, node_b_reload); + reload_node!(nodes[1], &node_b_ser, &[&mon_ser], persister, chain_monitor, node_b_reload); nodes[0].node.peer_disconnected(node_b_id); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); From 5a0ced5b6f1a19f9d7ff4349f70d473c25d42cd3 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 1 May 2025 14:04:02 +0000 Subject: [PATCH 06/25] Run `rustfmt` on `payment_tests.rs` --- lightning/src/ln/payment_tests.rs | 1262 ++++++++++++++++++----------- rustfmt_excluded_files | 1 - 2 files changed, 799 insertions(+), 464 deletions(-) diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 8fa55c64648..389d0f15fb9 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -11,30 +11,46 @@ //! serialization ordering between ChannelManager/ChannelMonitors and ensuring we can still retry //! payments thereafter. +use crate::chain::channelmonitor::{ + ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, +}; use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen}; -use crate::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS}; -use crate::sign::EntropySource; -use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PathFailure, PaymentFailureReason, PaymentPurpose}; -use crate::ln::channel::{EXPIRE_PREV_CONFIG_TICKS, get_holder_selected_channel_reserve_satoshis, ANCHOR_OUTPUT_VALUE_SATOSHI}; -use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo}; -use crate::types::features::{Bolt11InvoiceFeatures, ChannelTypeFeatures}; -use crate::ln::msgs; -use crate::ln::types::ChannelId; -use crate::types::payment::{PaymentHash, PaymentSecret, PaymentPreimage}; +use crate::events::{ + ClosureReason, Event, HTLCHandlingFailureType, PathFailure, PaymentFailureReason, + PaymentPurpose, +}; use crate::ln::chan_utils; +use crate::ln::channel::{ + get_holder_selected_channel_reserve_satoshis, ANCHOR_OUTPUT_VALUE_SATOSHI, + EXPIRE_PREV_CONFIG_TICKS, +}; +use crate::ln::channelmanager::{ + HTLCForwardInfo, PaymentId, PendingAddHTLCInfo, PendingHTLCRouting, RecentPaymentDetails, + RecipientOnionFields, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MPP_TIMEOUT_TICKS, +}; +use crate::ln::msgs; use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; -use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, ProbeSendFailure, Retry, RetryableSendFailure}; +use crate::ln::outbound_payment::{ + ProbeSendFailure, Retry, RetryableSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, +}; +use crate::ln::types::ChannelId; use crate::routing::gossip::{EffectiveCapacity, RoutingFees}; -use crate::routing::router::{get_route, Path, PaymentParameters, Route, Router, RouteHint, RouteHintHop, RouteHop, RouteParameters}; +use crate::routing::router::{ + get_route, Path, PaymentParameters, Route, RouteHint, RouteHintHop, RouteHop, RouteParameters, + Router, +}; use crate::routing::scoring::ChannelUsage; -use crate::util::test_utils; +use crate::sign::EntropySource; +use crate::types::features::{Bolt11InvoiceFeatures, ChannelTypeFeatures}; +use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::util::errors::APIError; use crate::util::ser::Writeable; use crate::util::string::UntrustedString; +use crate::util::test_utils; -use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; +use bitcoin::hashes::Hash; use bitcoin::network::Network; use bitcoin::secp256k1::{Secp256k1, SecretKey}; @@ -49,7 +65,7 @@ use core::cmp::Ordering; #[cfg(feature = "std")] use { crate::util::time::Instant as TestTime, - std::time::{SystemTime, Instant, Duration}, + std::time::{Duration, Instant, SystemTime}, }; #[test] @@ -67,7 +83,8 @@ fn mpp_failure() { let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id; let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id; - let (mut route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); + let (mut route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); let path = route.paths[0].clone(); route.paths.push(path); route.paths[0].hops[0].pubkey = node_b_id; @@ -104,7 +121,8 @@ fn mpp_retry() { let amt_msat = 1_000_000; let max_fee = 50_000; let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) - .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap(); + .with_bolt11_features(nodes[3].node.bolt11_invoice_features()) + .unwrap(); let (mut route, hash, preimage, pay_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, amt_msat, Some(max_fee)); let path = route.paths[0].clone(); @@ -154,7 +172,7 @@ fn mpp_retry() { let mut events = nodes[0].node.get_and_clear_pending_events(); match events[1] { Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event") + _ => panic!("Unexpected event"), } events.remove(1); @@ -210,16 +228,21 @@ fn mpp_retry_overpay() { let node_c_id = nodes[2].node.get_our_node_id(); let node_d_id = nodes[3].node.get_our_node_id(); - let (chan_1_update, _, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 40_000, 0); - let (chan_2_update, _, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 40_000, 0); - let (_chan_3_update, _, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 40_000, 0); - let (chan_4_update, _, chan_4_id, _) = create_announced_chan_between_nodes_with_value(&nodes, 3, 2, 40_000, 0); + let (chan_1_update, _, _, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 40_000, 0); + let (chan_2_update, _, _, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 40_000, 0); + let (_chan_3_update, _, _, _) = + create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 40_000, 0); + let (chan_4_update, _, chan_4_id, _) = + create_announced_chan_between_nodes_with_value(&nodes, 3, 2, 40_000, 0); let amt_msat = 70_000_000; let max_fee = Some(1_000_000); let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) - .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap(); + .with_bolt11_features(nodes[3].node.bolt11_invoice_features()) + .unwrap(); let (mut route, hash, payment_preimage, pay_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], payment_params, amt_msat, max_fee); @@ -267,13 +290,12 @@ fn mpp_retry_overpay() { assert!(htlc_updates.update_fulfill_htlcs.is_empty()); assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); check_added_monitors!(nodes[2], 1); - nodes[0].node.handle_update_fail_htlc(node_c_id, - &htlc_updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_c_id, &htlc_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], htlc_updates.commitment_signed, false); let mut events = nodes[0].node.get_and_clear_pending_events(); match events[1] { Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event") + _ => panic!("Unexpected event"), } events.remove(1); let fail_conditions = PaymentFailedConditions::new().mpp_parts_remain(); @@ -332,7 +354,8 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { let (chan_3_update, _, chan_3_id, _) = create_announced_chan_between_nodes(&nodes, 1, 3); let (chan_4_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 2, 3); - let (mut route, hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 100_000); + let (mut route, hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[3], 100_000); let path = route.paths[0].clone(); route.paths.push(path); route.paths[0].hops[0].pubkey = node_b_id; @@ -430,7 +453,9 @@ fn do_test_keysend_payments(public_node: bool) { create_chan_between_nodes(&nodes[0], &nodes[1]); } let route_params = RouteParameters::from_payment_params_and_value( - PaymentParameters::for_keysend(node_b_id, 40, false), 10000); + PaymentParameters::for_keysend(node_b_id, 40, false), + 10000, + ); { let preimage = Some(PaymentPreimage([42; 32])); @@ -455,7 +480,9 @@ fn do_test_keysend_payments(public_node: bool) { if let PaymentPurpose::SpontaneousPayment(preimage) = purpose { claim_payment(&nodes[0], &[&nodes[1]], *preimage); } - } else { panic!(); } + } else { + panic!(); + } } #[test] @@ -524,7 +551,8 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { let (update_a, _, chan_4_chan_id, _) = create_announced_chan_between_nodes(&nodes, 2, 3); let chan_4_id = update_a.contents.short_channel_id; let amount = 40_000; - let (mut route, payment_hash, payment_preimage, _) = get_route_and_payment_hash!(nodes[0], nodes[3], amount); + let (mut route, payment_hash, payment_preimage, _) = + get_route_and_payment_hash!(nodes[0], nodes[3], amount); let preimage = Some(payment_preimage); // Pay along nodes[1] @@ -558,16 +586,16 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { for (_, pending_forwards) in nodes[3].node.forward_htlcs.lock().unwrap().iter_mut() { for f in pending_forwards.iter_mut() { match f { - &mut HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { ref mut forward_info, .. }) => { - match forward_info.routing { - PendingHTLCRouting::ReceiveKeysend { ref mut payment_data, .. } => { - *payment_data = Some(msgs::FinalOnionHopData { - payment_secret: PaymentSecret([42; 32]), - total_msat: amount * 2, - }); - }, - _ => panic!("Expected PendingHTLCRouting::ReceiveKeysend"), - } + &mut HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { + ref mut forward_info, .. + }) => match forward_info.routing { + PendingHTLCRouting::ReceiveKeysend { ref mut payment_data, .. } => { + *payment_data = Some(msgs::FinalOnionHopData { + payment_secret: PaymentSecret([42; 32]), + total_msat: amount * 2, + }); + }, + _ => panic!("Expected PendingHTLCRouting::ReceiveKeysend"), }, _ => {}, } @@ -607,7 +635,9 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { for (_, pending_forwards) in nodes[3].node.forward_htlcs.lock().unwrap().iter_mut() { for f in pending_forwards.iter_mut() { match f { - &mut HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { ref mut forward_info, .. }) => { + &mut HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { + ref mut forward_info, .. + }) => { match forward_info.routing { PendingHTLCRouting::ReceiveKeysend { ref mut payment_data, .. } => { *payment_data = Some(msgs::FinalOnionHopData { @@ -644,7 +674,6 @@ fn test_reject_mpp_keysend_htlc_mismatching_secret() { expect_payment_failed_conditions(&nodes[0], payment_hash, true, PaymentFailedConditions::new()); } - #[test] fn no_pending_leak_on_initial_send_failure() { // In an earlier version of our payment tracking, we'd have a retry entry even when the initial @@ -663,7 +692,8 @@ fn no_pending_leak_on_initial_send_failure() { create_announced_chan_between_nodes(&nodes, 0, 1); - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); nodes[0].node.peer_disconnected(node_b_id); nodes[1].node.peer_disconnected(node_a_id); @@ -710,8 +740,10 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // Send two payments - one which will get to nodes[2] and will be claimed, one which we'll time // out and retry. let amt_msat = 1_000_000; - let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); - let (payment_preimage_1, payment_hash_1, _, payment_id_1) = send_along_route(&nodes[0], route.clone(), &[&nodes[1], &nodes[2]], 1_000_000); + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + let (payment_preimage_1, payment_hash_1, _, payment_id_1) = + send_along_route(&nodes[0], route.clone(), &[&nodes[1], &nodes[2]], 1_000_000); let route_params = route.route_params.unwrap().clone(); let onion = RecipientOnionFields::secret_only(payment_secret); @@ -736,7 +768,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2}] + [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }] ); check_added_monitors(&nodes[1], 1); @@ -765,7 +797,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { assert!(nodes[0].node.has_pending_payments()); nodes[0].node.timer_tick_occurred(); if !confirm_before_reload { - let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + let as_broadcasted_txn = + nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(as_broadcasted_txn.len(), 1); assert_eq!(as_broadcasted_txn[0].compute_txid(), as_commitment_tx.compute_txid()); } else { @@ -791,7 +824,10 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { let as_err = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(as_err.len(), 2); match as_err[1] { - MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => { + MessageSendEvent::HandleError { + node_id, + action: msgs::ErrorAction::SendErrorMessage { ref msg }, + } => { assert_eq!(node_id, node_b_id); nodes[1].node.handle_error(node_a_id, msg); check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", @@ -844,7 +880,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { } mine_transaction(&nodes[0], &bs_htlc_claim_txn); expect_payment_sent(&nodes[0], payment_preimage_1, None, true, false); - connect_blocks(&nodes[0], TEST_FINAL_CLTV*4 + 20); + connect_blocks(&nodes[0], TEST_FINAL_CLTV * 4 + 20); let (first_htlc_timeout_tx, second_htlc_timeout_tx) = { let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); assert_eq!(txn.len(), 2); @@ -852,7 +888,8 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { }; check_spends!(first_htlc_timeout_tx, as_commitment_tx); check_spends!(second_htlc_timeout_tx, as_commitment_tx); - if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn.input[0].previous_output { + if first_htlc_timeout_tx.input[0].previous_output == bs_htlc_claim_txn.input[0].previous_output + { confirm_transaction(&nodes[0], &second_htlc_timeout_tx); } else { confirm_transaction(&nodes[0], &first_htlc_timeout_tx); @@ -871,8 +908,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { // do_claim_payment_along_route expects us to never overpay. { let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); - let mut peer_state = per_peer_state.get(&node_c_id) - .unwrap().lock().unwrap(); + let mut peer_state = per_peer_state.get(&node_c_id).unwrap().lock().unwrap(); let mut channel = peer_state.channel_by_id.get_mut(&chan_id_2).unwrap(); let mut new_config = channel.context().config(); new_config.forwarding_fee_base_msat += 100_000; @@ -932,7 +968,8 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let persist_3; let chain_monitor_3; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(manually_accept_config), None]); + let node_chanmgrs = + create_node_chanmgrs(3, &node_cfgs, &[None, Some(manually_accept_config), None]); let node_a_1; let node_a_2; let node_a_3; @@ -994,7 +1031,10 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { assert_eq!(as_err.len(), 2); let bs_commitment_tx; match as_err[1] { - MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => { + MessageSendEvent::HandleError { + node_id, + action: msgs::ErrorAction::SendErrorMessage { ref msg }, + } => { assert_eq!(node_id, node_b_id); nodes[1].node.handle_error(node_a_id, msg); let msg = format!( @@ -1021,8 +1061,10 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], node_b_id); nodes[1].node.handle_update_fail_htlc(node_c_id, &htlc_fulfill_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }] + ); // Connect the HTLC-Timeout transaction, timing out the HTLC on both nodes (but not confirming // the HTLC-Timeout transaction beyond 1 conf). For dust HTLCs, the HTLC is considered resolved @@ -1059,7 +1101,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let onion = RecipientOnionFields::secret_only(payment_secret); match nodes[0].node.send_payment_with_route(new_route.clone(), hash, onion, payment_id) { Err(RetryableSendFailure::DuplicatePayment) => {}, - _ => panic!("Unexpected error") + _ => panic!("Unexpected error"), } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -1101,7 +1143,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let onion = RecipientOnionFields::secret_only(payment_secret); match nodes[0].node.send_payment_with_route(new_route.clone(), hash, onion, payment_id) { Err(RetryableSendFailure::DuplicatePayment) => {}, - _ => panic!("Unexpected error") + _ => panic!("Unexpected error"), } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -1123,7 +1165,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { let onion = RecipientOnionFields::secret_only(payment_secret); match nodes[0].node.send_payment_with_route(new_route, hash, onion, payment_id) { Err(RetryableSendFailure::DuplicatePayment) => {}, - _ => panic!("Unexpected error") + _ => panic!("Unexpected error"), } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -1134,7 +1176,9 @@ fn test_completed_payment_not_retryable_on_reload() { do_test_completed_payment_not_retryable_on_reload(false); } -fn do_test_dup_htlc_onchain_doesnt_fail_on_reload(persist_manager_post_event: bool, confirm_commitment_tx: bool, payment_timeout: bool) { +fn do_test_dup_htlc_onchain_doesnt_fail_on_reload( + persist_manager_post_event: bool, confirm_commitment_tx: bool, payment_timeout: bool, +) { // When a Channel is closed, any outbound HTLCs which were relayed through it are simply // dropped. From there, the ChannelManager relies on the ChannelMonitor having a copy of the // relevant fail-/claim-back data and processes the HTLC fail/claim when the ChannelMonitor tells @@ -1342,18 +1386,26 @@ fn get_ldk_payment_preimage() { let amt_msat = 60_000; let expiry_secs = 60 * 60; - let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(amt_msat), expiry_secs, None).unwrap(); + let (payment_hash, payment_secret) = + nodes[1].node.create_inbound_payment(Some(amt_msat), expiry_secs, None).unwrap(); let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); let scorer = test_utils::TestScorer::new(); let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet); let random_seed_bytes = keys_manager.get_secure_random_bytes(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); - let route = get_route(&node_a_id, &route_params, + let route = get_route( + &node_a_id, + &route_params, &nodes[0].network_graph.read_only(), - Some(&nodes[0].node.list_usable_channels().iter().collect::>()), nodes[0].logger, - &scorer, &Default::default(), &random_seed_bytes); + Some(&nodes[0].node.list_usable_channels().iter().collect::>()), + nodes[0].logger, + &scorer, + &Default::default(), + &random_seed_bytes, + ); let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.unwrap(), payment_hash, onion, id).unwrap(); @@ -1563,13 +1615,15 @@ fn preflight_probes_yield_event_skip_private_hop() { invoice_features.set_basic_mpp_optional(); let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) - .with_bolt11_features(invoice_features).unwrap(); + .with_bolt11_features(invoice_features) + .unwrap(); let recv_value = 50_000_000; let route_params = RouteParameters::from_payment_params_and_value(payment_params, recv_value); let res = nodes[0].node.send_preflight_probes(route_params, None).unwrap(); - let expected_route: &[(&[&Node], PaymentHash)] = &[(&[&nodes[1], &nodes[2], &nodes[3]], res[0].0)]; + let expected_route: &[(&[&Node], PaymentHash)] = + &[(&[&nodes[1], &nodes[2], &nodes[3]], res[0].0)]; assert_eq!(res.len(), expected_route.len()); @@ -1612,13 +1666,15 @@ fn preflight_probes_yield_event() { invoice_features.set_basic_mpp_optional(); let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) - .with_bolt11_features(invoice_features).unwrap(); + .with_bolt11_features(invoice_features) + .unwrap(); let recv_value = 50_000_000; let route_params = RouteParameters::from_payment_params_and_value(payment_params, recv_value); let res = nodes[0].node.send_preflight_probes(route_params, None).unwrap(); - let expected_route: &[(&[&Node], PaymentHash)] = &[(&[&nodes[1], &nodes[3]], res[0].0), (&[&nodes[2], &nodes[3]], res[1].0)]; + let expected_route: &[(&[&Node], PaymentHash)] = + &[(&[&nodes[1], &nodes[3]], res[0].0), (&[&nodes[2], &nodes[3]], res[1].0)]; assert_eq!(res.len(), expected_route.len()); @@ -1663,13 +1719,15 @@ fn preflight_probes_yield_event_and_skip() { invoice_features.set_basic_mpp_optional(); let payment_params = PaymentParameters::from_node_id(node_e_id, TEST_FINAL_CLTV) - .with_bolt11_features(invoice_features).unwrap(); + .with_bolt11_features(invoice_features) + .unwrap(); let recv_value = 80_000_000; let route_params = RouteParameters::from_payment_params_and_value(payment_params, recv_value); let res = nodes[0].node.send_preflight_probes(route_params, None).unwrap(); - let expected_route: &[(&[&Node], PaymentHash)] = &[(&[&nodes[1], &nodes[2], &nodes[4]], res[0].0)]; + let expected_route: &[(&[&Node], PaymentHash)] = + &[(&[&nodes[1], &nodes[2], &nodes[4]], res[0].0)]; // We check that only one probe was sent, the other one was skipped due to limited liquidity. assert_eq!(res.len(), 1); @@ -1691,8 +1749,10 @@ fn claimed_send_payment_idempotent() { create_announced_chan_between_nodes(&nodes, 0, 1).2; - let (route, hash_b, preimage_b, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); - let (preimage_a, _, _, payment_id) = send_along_route(&nodes[0], route.clone(), &[&nodes[1]], 100_000); + let (route, hash_b, preimage_b, second_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); + let (preimage_a, _, _, payment_id) = + send_along_route(&nodes[0], route.clone(), &[&nodes[1]], 100_000); macro_rules! check_send_rejected { () => { @@ -1709,14 +1769,17 @@ fn claimed_send_payment_idempotent() { // Further, if we try to send a spontaneous payment with the same payment_id it should // also be rejected. let send_result = nodes[0].node.send_spontaneous_payment( - None, RecipientOnionFields::spontaneous_empty(), payment_id, - route.route_params.clone().unwrap(), Retry::Attempts(0) + None, + RecipientOnionFields::spontaneous_empty(), + payment_id, + route.route_params.clone().unwrap(), + Retry::Attempts(0), ); match send_result { Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected send result: {:?}", send_result), } - } + }; } check_send_rejected!(); @@ -1724,9 +1787,7 @@ fn claimed_send_payment_idempotent() { // Claim the payment backwards, but note that the PaymentSent event is still pending and has // not been seen by the user. At this point, from the user perspective nothing has changed, so // we must remain just as idempotent as we were before. - do_claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], preimage_a) - ); + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], preimage_a)); for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS { nodes[0].node.timer_tick_occurred(); @@ -1771,8 +1832,10 @@ fn abandoned_send_payment_idempotent() { create_announced_chan_between_nodes(&nodes, 0, 1).2; - let (route, hash_b, second_payment_preimage, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); - let (_, first_payment_hash, _, payment_id) = send_along_route(&nodes[0], route.clone(), &[&nodes[1]], 100_000); + let (route, hash_b, second_payment_preimage, second_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); + let (_, first_payment_hash, _, payment_id) = + send_along_route(&nodes[0], route.clone(), &[&nodes[1]], 100_000); macro_rules! check_send_rejected { () => { @@ -1784,19 +1847,22 @@ fn abandoned_send_payment_idempotent() { match send_result { Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected send result: {:?}", send_result), - } + }; // Further, if we try to send a spontaneous payment with the same payment_id it should // also be rejected. let send_result = nodes[0].node.send_spontaneous_payment( - None, RecipientOnionFields::spontaneous_empty(), payment_id, - route.route_params.clone().unwrap(), Retry::Attempts(0) + None, + RecipientOnionFields::spontaneous_empty(), + payment_id, + route.route_params.clone().unwrap(), + Retry::Attempts(0), ); match send_result { Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected send result: {:?}", send_result), } - } + }; } check_send_rejected!(); @@ -1832,7 +1898,7 @@ enum InterceptTest { } #[test] -fn test_trivial_inflight_htlc_tracking(){ +fn test_trivial_inflight_htlc_tracking() { // In this test, we test three scenarios: // (1) Sending + claiming a payment successfully should return `None` when querying InFlightHtlcs // (2) Sending a payment without claiming it should return the payment's value (500000) when querying InFlightHtlcs @@ -1861,7 +1927,7 @@ fn test_trivial_inflight_htlc_tracking(){ let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( &NodeId::from_pubkey(&node_a_id), &NodeId::from_pubkey(&node_b_id), - channel_1.context().get_short_channel_id().unwrap() + channel_1.context().get_short_channel_id().unwrap(), ); assert_eq!(chan_1_used_liquidity, None); } @@ -1872,9 +1938,9 @@ fn test_trivial_inflight_htlc_tracking(){ get_channel_ref!(&nodes[1], nodes[2], per_peer_lock, peer_state_lock, chan_2_id); let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&node_b_id) , + &NodeId::from_pubkey(&node_b_id), &NodeId::from_pubkey(&node_c_id), - channel_2.context().get_short_channel_id().unwrap() + channel_2.context().get_short_channel_id().unwrap(), ); assert_eq!(chan_2_used_liquidity, None); @@ -1890,7 +1956,8 @@ fn test_trivial_inflight_htlc_tracking(){ } // Send the payment, but do not claim it. Our inflight HTLCs should contain the pending payment. - let (payment_preimage, payment_hash, _, payment_id) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000); + let (payment_preimage, payment_hash, _, payment_id) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000); let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs(); { let mut per_peer_lock; @@ -1899,9 +1966,9 @@ fn test_trivial_inflight_htlc_tracking(){ get_channel_ref!(&nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_1_id); let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&node_a_id) , + &NodeId::from_pubkey(&node_a_id), &NodeId::from_pubkey(&node_b_id), - channel_1.context().get_short_channel_id().unwrap() + channel_1.context().get_short_channel_id().unwrap(), ); // First hop accounts for expected 1000 msat fee assert_eq!(chan_1_used_liquidity, Some(501000)); @@ -1913,9 +1980,9 @@ fn test_trivial_inflight_htlc_tracking(){ get_channel_ref!(&nodes[1], nodes[2], per_peer_lock, peer_state_lock, chan_2_id); let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&node_b_id) , + &NodeId::from_pubkey(&node_b_id), &NodeId::from_pubkey(&node_c_id), - channel_2.context().get_short_channel_id().unwrap() + channel_2.context().get_short_channel_id().unwrap(), ); assert_eq!(chan_2_used_liquidity, Some(500000)); @@ -1941,9 +2008,9 @@ fn test_trivial_inflight_htlc_tracking(){ get_channel_ref!(&nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_1_id); let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&node_a_id) , + &NodeId::from_pubkey(&node_a_id), &NodeId::from_pubkey(&node_b_id), - channel_1.context().get_short_channel_id().unwrap() + channel_1.context().get_short_channel_id().unwrap(), ); assert_eq!(chan_1_used_liquidity, None); } @@ -1954,9 +2021,9 @@ fn test_trivial_inflight_htlc_tracking(){ get_channel_ref!(&nodes[1], nodes[2], per_peer_lock, peer_state_lock, chan_2_id); let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&node_b_id) , + &NodeId::from_pubkey(&node_b_id), &NodeId::from_pubkey(&node_c_id), - channel_2.context().get_short_channel_id().unwrap() + channel_2.context().get_short_channel_id().unwrap(), ); assert_eq!(chan_2_used_liquidity, None); } @@ -1977,7 +2044,8 @@ fn test_holding_cell_inflight_htlcs() { let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; - let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, payment_hash_1, _, payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); let (_, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]); // Queue up two payments - one will be delivered right away, one immediately goes into the @@ -2003,9 +2071,9 @@ fn test_holding_cell_inflight_htlcs() { get_channel_ref!(&nodes[0], nodes[1], per_peer_lock, peer_state_lock, channel_id); let used_liquidity = inflight_htlcs.used_liquidity_msat( - &NodeId::from_pubkey(&node_a_id) , + &NodeId::from_pubkey(&node_a_id), &NodeId::from_pubkey(&node_b_id), - channel.context().get_short_channel_id().unwrap() + channel.context().get_short_channel_id().unwrap(), ); assert_eq!(used_liquidity, Some(2000000)); @@ -2052,27 +2120,32 @@ fn do_test_intercepted_payment(test: InterceptTest) { let amt_msat = 100_000; let intercept_scid = nodes[1].node.get_intercept_scid(); let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) - .with_route_hints(vec![ - RouteHint(vec![RouteHintHop { - src_node_id: node_b_id, - short_channel_id: intercept_scid, - fees: RoutingFees { - base_msat: 1000, - proportional_millionths: 0, - }, - cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA, - htlc_minimum_msat: None, - htlc_maximum_msat: None, - }]) - ]).unwrap() - .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap(); + .with_route_hints(vec![RouteHint(vec![RouteHintHop { + src_node_id: node_b_id, + short_channel_id: intercept_scid, + fees: RoutingFees { base_msat: 1000, proportional_millionths: 0 }, + cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA, + htlc_minimum_msat: None, + htlc_maximum_msat: None, + }])]) + .unwrap() + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); let route = get_route( - &node_a_id, &route_params, &nodes[0].network_graph.read_only(), None, - nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes - ).unwrap(); - - let (hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap(); + &node_a_id, + &route_params, + &nodes[0].network_graph.read_only(), + None, + nodes[0].logger, + &scorer, + &Default::default(), + &random_seed_bytes, + ) + .unwrap(); + + let (hash, payment_secret) = + nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap(); let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(hash.0); nodes[0].node.send_payment_with_route(route.clone(), hash, onion, id).unwrap(); @@ -2095,14 +2168,18 @@ fn do_test_intercepted_payment(test: InterceptTest) { assert_eq!(events.len(), 1); let (intercept_id, outbound_amt) = match events[0] { crate::events::Event::HTLCIntercepted { - intercept_id, expected_outbound_amount_msat, payment_hash, inbound_amount_msat, requested_next_hop_scid: short_channel_id + intercept_id, + expected_outbound_amount_msat, + payment_hash, + inbound_amount_msat, + requested_next_hop_scid: short_channel_id, } => { assert_eq!(payment_hash, hash); assert_eq!(inbound_amount_msat, route.get_total_amount() + route.get_total_fees()); assert_eq!(short_channel_id, intercept_scid); (intercept_id, expected_outbound_amount_msat) }, - _ => panic!() + _ => panic!(), }; // Check for unknown channel id error. @@ -2152,7 +2229,10 @@ fn do_test_intercepted_payment(test: InterceptTest) { let (_, chan_id) = open_zero_conf_channel(&nodes[1], &nodes[2], None); // Finally, forward the intercepted payment through and claim it. - nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_id, node_c_id, outbound_amt).unwrap(); + nodes[1] + .node + .forward_intercepted_htlc(intercept_id, &chan_id, node_c_id, outbound_amt) + .unwrap(); expect_pending_htlcs_forwardable!(nodes[1]); let payment_event = { @@ -2169,8 +2249,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { commitment_signed_dance!(nodes[2], nodes[1], &payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); - let preimage = - Some(nodes[2].node.get_payment_preimage(hash, payment_secret).unwrap()); + let preimage = Some(nodes[2].node.get_payment_preimage(hash, payment_secret).unwrap()); expect_payment_claimable!(&nodes[2], hash, payment_secret, amt_msat, preimage, node_c_id); let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; @@ -2184,13 +2263,13 @@ fn do_test_intercepted_payment(test: InterceptTest) { assert_eq!(hash, payment_hash); assert_eq!(fee_paid_msat, &Some(1000)); }, - _ => panic!("Unexpected event") + _ => panic!("Unexpected event"), } match events[1] { Event::PaymentPathSuccessful { payment_hash, .. } => { assert_eq!(payment_hash, Some(hash)); }, - _ => panic!("Unexpected event") + _ => panic!("Unexpected event"), } check_added_monitors(&nodes[0], 1); } else if test == InterceptTest::Timeout { @@ -2225,9 +2304,10 @@ fn do_test_intercepted_payment(test: InterceptTest) { let err = format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)); assert_eq!(unknown_intercept_id_err, Err(APIError::APIMisuseError { err })); - let unknown_intercept_id_err = nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap_err(); + let unknown_intercept_id_err = + nodes[1].node.fail_intercepted_htlc(intercept_id).unwrap_err(); let err = format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)); - assert_eq!(unknown_intercept_id_err , APIError::APIMisuseError { err }); + assert_eq!(unknown_intercept_id_err, APIError::APIMisuseError { err }); } } @@ -2245,10 +2325,13 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { let max_in_flight_percent = 10; let mut intercept_forwards_config = test_default_channel_config(); intercept_forwards_config.accept_intercept_htlcs = true; - intercept_forwards_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = max_in_flight_percent; + intercept_forwards_config + .channel_handshake_config + .max_inbound_htlc_value_in_flight_percent_of_channel = max_in_flight_percent; let mut underpay_config = test_default_channel_config(); underpay_config.channel_config.accept_underpaying_htlcs = true; - underpay_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = max_in_flight_percent; + underpay_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = + max_in_flight_percent; let configs = [None, Some(intercept_forwards_config), Some(underpay_config)]; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); @@ -2263,7 +2346,8 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { let mut chan_ids = Vec::new(); for _ in 0..num_mpp_parts { // We choose the channel size so that there can be at most one part pending on each channel. - let channel_size = amt_msat / 1000 / num_mpp_parts as u64 * 100 / max_in_flight_percent as u64 + 100; + let channel_size = + amt_msat / 1000 / num_mpp_parts as u64 * 100 / max_in_flight_percent as u64 + 100; let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_size, 0); let chan = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, channel_size, 0); chan_ids.push(chan.0.channel_id); @@ -2276,20 +2360,20 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { route_hints.push(RouteHint(vec![RouteHintHop { src_node_id: node_b_id, short_channel_id: nodes[1].node.get_intercept_scid(), - fees: RoutingFees { - base_msat: 1000, - proportional_millionths: 0, - }, + fees: RoutingFees { base_msat: 1000, proportional_millionths: 0 }, cltv_expiry_delta: MIN_CLTV_EXPIRY_DELTA, htlc_minimum_msat: None, htlc_maximum_msat: Some(amt_msat / num_mpp_parts as u64 + 5), }])); } let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) - .with_route_hints(route_hints).unwrap() - .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap(); + .with_route_hints(route_hints) + .unwrap() + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); - let (payment_hash, payment_secret) = nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap(); + let (payment_hash, payment_secret) = + nodes[2].node.create_inbound_payment(Some(amt_msat), 60 * 60, None).unwrap(); let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); @@ -2310,15 +2394,21 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { assert_eq!(events.len(), 1); let (intercept_id, expected_outbound_amt_msat) = match events[0] { crate::events::Event::HTLCIntercepted { - intercept_id, expected_outbound_amount_msat, payment_hash: pmt_hash, .. + intercept_id, + expected_outbound_amount_msat, + payment_hash: pmt_hash, + .. } => { assert_eq!(pmt_hash, payment_hash); (intercept_id, expected_outbound_amount_msat) }, - _ => panic!() + _ => panic!(), }; let amt = expected_outbound_amt_msat - skimmed_fee_msat; - nodes[1].node.forward_intercepted_htlc(intercept_id, &chan_ids[idx], node_c_id, amt).unwrap(); + nodes[1] + .node + .forward_intercepted_htlc(intercept_id, &chan_ids[idx], node_c_id, amt) + .unwrap(); expect_pending_htlcs_forwardable!(nodes[1]); let pay_event = { { @@ -2338,12 +2428,18 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { } // Claim the payment and check that the skimmed fee is as expected. - let payment_preimage = nodes[2].node.get_payment_preimage(payment_hash, payment_secret).unwrap(); + let payment_preimage = + nodes[2].node.get_payment_preimage(payment_hash, payment_secret).unwrap(); let events = nodes[2].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { crate::events::Event::PaymentClaimable { - ref payment_hash, ref purpose, amount_msat, counterparty_skimmed_fee_msat, receiver_node_id, .. + ref payment_hash, + ref purpose, + amount_msat, + counterparty_skimmed_fee_msat, + receiver_node_id, + .. } => { assert_eq!(payment_hash, payment_hash); assert_eq!(amt_msat - skimmed_fee_msat * num_mpp_parts as u64, amount_msat); @@ -2365,8 +2461,12 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { } let mut expected_paths_vecs = Vec::new(); let mut expected_paths = Vec::new(); - for _ in 0..num_mpp_parts { expected_paths_vecs.push(vec!(&nodes[1], &nodes[2])); } - for i in 0..num_mpp_parts { expected_paths.push(&expected_paths_vecs[i][..]); } + for _ in 0..num_mpp_parts { + expected_paths_vecs.push(vec![&nodes[1], &nodes[2]]); + } + for i in 0..num_mpp_parts { + expected_paths.push(&expected_paths_vecs[i][..]); + } expected_paths[0].last().unwrap().node.claim_funds(payment_preimage); let args = ClaimAlongRouteArgs::new(&nodes[0], &expected_paths[..], payment_preimage) .with_expected_extra_fees(vec![skimmed_fee_msat as u32; num_mpp_parts]); @@ -2427,9 +2527,11 @@ fn do_automatic_retries(test: AutoRetry) { invoice_features.set_basic_mpp_optional(); let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) - .with_bolt11_features(invoice_features).unwrap(); + .with_bolt11_features(invoice_features) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); - let (_, hash, preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + let (_, hash, preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); macro_rules! pass_failed_attempt_with_retry_along_path { ($failing_channel_id: expr, $expect_pending_htlcs_forwardable: expr) => { @@ -2502,9 +2604,11 @@ fn do_automatic_retries(test: AutoRetry) { let path = &[&nodes[1], &nodes[2]]; pass_along_path(&nodes[0], path, amt_msat, hash, Some(payment_secret), event, true, None); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], preimage) - ); + claim_payment_along_route(ClaimAlongRouteArgs::new( + &nodes[0], + &[&[&nodes[1], &nodes[2]]], + preimage, + )); } else if test == AutoRetry::Spontaneous { let onion = RecipientOnionFields::spontaneous_empty(); let id = PaymentId(hash.0); @@ -2528,9 +2632,7 @@ fn do_automatic_retries(test: AutoRetry) { let path = &[&nodes[1], &nodes[2]]; pass_along_path(&nodes[0], path, amt_msat, hash, None, event, true, Some(preimage)); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[path], preimage) - ); + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], &[path], preimage)); } else if test == AutoRetry::FailAttempts { // Ensure ChannelManager will not retry a payment if it has run out of payment attempts. let onion = RecipientOnionFields::secret_only(payment_secret); @@ -2551,7 +2653,8 @@ fn do_automatic_retries(test: AutoRetry) { let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 0); } else if test == AutoRetry::FailTimeout { - #[cfg(feature = "std")] { + #[cfg(feature = "std")] + { // Ensure ChannelManager will not retry a payment if it times out due to Retry::Timeout. let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(hash.0); @@ -2654,14 +2757,17 @@ fn auto_retry_partial_failure() { // Open three channels, the first has plenty of liquidity, the second and third have ~no // available liquidity, causing any outbound payments routed over it to fail immediately. let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; - let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000); + let chan_2 = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000); let chan_2_id = chan_2.0.contents.short_channel_id; - let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000); + let chan_3 = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 989_000_000); let chan_3_id = chan_3.0.contents.short_channel_id; // Marshall data to send the payment let amt_msat = 10_000_000; - let (_, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); + let (_, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); #[cfg(feature = "std")] let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60; #[cfg(not(feature = "std"))] @@ -2672,7 +2778,8 @@ fn auto_retry_partial_failure() { invoice_features.set_basic_mpp_optional(); let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) - .with_bolt11_features(invoice_features).unwrap(); + .with_bolt11_features(invoice_features) + .unwrap(); // Configure the initial send path let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); @@ -2680,24 +2787,30 @@ fn auto_retry_partial_failure() { let send_route = Route { paths: vec![ - Path { hops: vec![RouteHop { - pubkey: node_b_id, - node_features: nodes[1].node.node_features(), - short_channel_id: chan_1_id, - channel_features: nodes[1].node.channel_features(), - fee_msat: amt_msat / 2, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None }, - Path { hops: vec![RouteHop { - pubkey: node_b_id, - node_features: nodes[1].node.node_features(), - short_channel_id: chan_2_id, - channel_features: nodes[1].node.channel_features(), - fee_msat: amt_msat / 2, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None }, + Path { + hops: vec![RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chan_1_id, + channel_features: nodes[1].node.channel_features(), + fee_msat: amt_msat / 2, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }], + blinded_tail: None, + }, + Path { + hops: vec![RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chan_2_id, + channel_features: nodes[1].node.channel_features(), + fee_msat: amt_msat / 2, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }], + blinded_tail: None, + }, ], route_params: Some(route_params.clone()), }; @@ -2706,29 +2819,36 @@ fn auto_retry_partial_failure() { // Configure the retry1 paths let mut payment_params = route_params.payment_params.clone(); payment_params.previously_failed_channels.push(chan_2_id); - let mut retry_1_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat / 2); + let mut retry_1_params = + RouteParameters::from_payment_params_and_value(payment_params, amt_msat / 2); retry_1_params.max_total_routing_fee_msat = None; let retry_1_route = Route { paths: vec![ - Path { hops: vec![RouteHop { - pubkey: node_b_id, - node_features: nodes[1].node.node_features(), - short_channel_id: chan_1_id, - channel_features: nodes[1].node.channel_features(), - fee_msat: amt_msat / 4, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None }, - Path { hops: vec![RouteHop { - pubkey: node_b_id, - node_features: nodes[1].node.node_features(), - short_channel_id: chan_3_id, - channel_features: nodes[1].node.channel_features(), - fee_msat: amt_msat / 4, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None }, + Path { + hops: vec![RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chan_1_id, + channel_features: nodes[1].node.channel_features(), + fee_msat: amt_msat / 4, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }], + blinded_tail: None, + }, + Path { + hops: vec![RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chan_3_id, + channel_features: nodes[1].node.channel_features(), + fee_msat: amt_msat / 4, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }], + blinded_tail: None, + }, ], route_params: Some(retry_1_params.clone()), }; @@ -2737,12 +2857,13 @@ fn auto_retry_partial_failure() { // Configure the retry2 path let mut payment_params = retry_1_params.payment_params.clone(); payment_params.previously_failed_channels.push(chan_3_id); - let mut retry_2_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat / 4); + let mut retry_2_params = + RouteParameters::from_payment_params_and_value(payment_params, amt_msat / 4); retry_2_params.max_total_routing_fee_msat = None; let retry_2_route = Route { - paths: vec![ - Path { hops: vec![RouteHop { + paths: vec![Path { + hops: vec![RouteHop { pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chan_1_id, @@ -2750,8 +2871,9 @@ fn auto_retry_partial_failure() { fee_msat: amt_msat / 4, cltv_expiry_delta: 100, maybe_announced_channel: true, - }], blinded_tail: None }, - ], + }], + blinded_tail: None, + }], route_params: Some(retry_2_params.clone()), }; nodes[0].router.expect_find_route(retry_2_params, Ok(retry_2_route)); @@ -2855,8 +2977,14 @@ fn auto_retry_partial_failure() { check_added_monitors!(nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); - if let Event::PaymentPathSuccessful { .. } = events[0] {} else { panic!(); } - if let Event::PaymentPathSuccessful { .. } = events[1] {} else { panic!(); } + if let Event::PaymentPathSuccessful { .. } = events[0] { + } else { + panic!(); + } + if let Event::PaymentPathSuccessful { .. } = events[1] { + } else { + panic!(); + } } #[test] @@ -2875,7 +3003,8 @@ fn auto_retry_zero_attempts_send_error() { // Marshall data to send the payment let amt_msat = 10_000_000; - let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(amt_msat), None); + let (_, payment_hash, payment_secret) = + get_payment_preimage_hash(&nodes[1], Some(amt_msat), None); #[cfg(feature = "std")] let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60; #[cfg(not(feature = "std"))] @@ -2886,13 +3015,14 @@ fn auto_retry_zero_attempts_send_error() { invoice_features.set_basic_mpp_optional(); let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) - .with_bolt11_features(invoice_features).unwrap(); + .with_bolt11_features(invoice_features) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); // Override the route search to return a route, rather than failing at the route-finding step. let send_route = Route { - paths: vec![ - Path { hops: vec![RouteHop { + paths: vec![Path { + hops: vec![RouteHop { pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chan_id, @@ -2900,8 +3030,9 @@ fn auto_retry_zero_attempts_send_error() { fee_msat: amt_msat, cltv_expiry_delta: 100, maybe_announced_channel: true, - }], blinded_tail: None }, - ], + }], + blinded_tail: None, + }], route_params: Some(route_params.clone()), }; nodes[0].router.expect_find_route(route_params.clone(), Ok(send_route)); @@ -2913,8 +3044,14 @@ fn auto_retry_zero_attempts_send_error() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); - if let Event::PaymentPathFailed { .. } = events[0] { } else { panic!(); } - if let Event::PaymentFailed { .. } = events[1] { } else { panic!(); } + if let Event::PaymentPathFailed { .. } = events[0] { + } else { + panic!(); + } + if let Event::PaymentFailed { .. } = events[1] { + } else { + panic!(); + } check_added_monitors!(nodes[0], 0); } @@ -2932,7 +3069,8 @@ fn fails_paying_after_rejected_by_payee() { // Marshall data to send the payment let amt_msat = 20_000; - let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); + let (_, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); #[cfg(feature = "std")] let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60; #[cfg(not(feature = "std"))] @@ -2943,7 +3081,8 @@ fn fails_paying_after_rejected_by_payee() { invoice_features.set_basic_mpp_optional(); let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) - .with_bolt11_features(invoice_features).unwrap(); + .with_bolt11_features(invoice_features) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); let onion = RecipientOnionFields::secret_only(payment_secret); @@ -2981,7 +3120,8 @@ fn retry_multi_path_single_failed_payment() { let amt_msat = 100_010_000; - let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); + let (_, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); #[cfg(feature = "std")] let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60; #[cfg(not(feature = "std"))] @@ -2992,32 +3132,39 @@ fn retry_multi_path_single_failed_payment() { invoice_features.set_basic_mpp_optional(); let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) - .with_bolt11_features(invoice_features).unwrap(); - let mut route_params = RouteParameters::from_payment_params_and_value( - payment_params.clone(), amt_msat); + .with_bolt11_features(invoice_features) + .unwrap(); + let mut route_params = + RouteParameters::from_payment_params_and_value(payment_params.clone(), amt_msat); route_params.max_total_routing_fee_msat = None; let chans = nodes[0].node.list_usable_channels(); let mut route = Route { paths: vec![ - Path { hops: vec![RouteHop { - pubkey: node_b_id, - node_features: nodes[1].node.node_features(), - short_channel_id: chans[0].short_channel_id.unwrap(), - channel_features: nodes[1].node.channel_features(), - fee_msat: 10_000, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None }, - Path { hops: vec![RouteHop { - pubkey: node_b_id, - node_features: nodes[1].node.node_features(), - short_channel_id: chans[1].short_channel_id.unwrap(), - channel_features: nodes[1].node.channel_features(), - fee_msat: 100_000_001, // Our default max-HTLC-value is 10% of the channel value, which this is one more than - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None }, + Path { + hops: vec![RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chans[0].short_channel_id.unwrap(), + channel_features: nodes[1].node.channel_features(), + fee_msat: 10_000, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }], + blinded_tail: None, + }, + Path { + hops: vec![RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chans[1].short_channel_id.unwrap(), + channel_features: nodes[1].node.channel_features(), + fee_msat: 100_000_001, // Our default max-HTLC-value is 10% of the channel value, which this is one more than + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }], + blinded_tail: None, + }, ], route_params: Some(route_params.clone()), }; @@ -3039,12 +3186,18 @@ fn retry_multi_path_single_failed_payment() { let effective_capacity = EffectiveCapacity::Unknown; let usage = ChannelUsage { amount_msat: 10_000, inflight_htlc_msat: 0, effective_capacity }; scorer.expect_usage(chans[0].short_channel_id.unwrap(), usage); - let usage = ChannelUsage { amount_msat: 100_000_001, inflight_htlc_msat: 0, effective_capacity }; + let usage = + ChannelUsage { amount_msat: 100_000_001, inflight_htlc_msat: 0, effective_capacity }; scorer.expect_usage(chans[1].short_channel_id.unwrap(), usage); // The retry, 2 paths. Ensure that the in-flight HTLC amount is factored in. - let usage = ChannelUsage { amount_msat: 50_000_001, inflight_htlc_msat: 10_000, effective_capacity }; + let usage = ChannelUsage { + amount_msat: 50_000_001, + inflight_htlc_msat: 10_000, + effective_capacity, + }; scorer.expect_usage(chans[0].short_channel_id.unwrap(), usage); - let usage = ChannelUsage { amount_msat: 50_000_000, inflight_htlc_msat: 0, effective_capacity }; + let usage = + ChannelUsage { amount_msat: 50_000_000, inflight_htlc_msat: 0, effective_capacity }; scorer.expect_usage(chans[1].short_channel_id.unwrap(), usage); } @@ -3054,10 +3207,13 @@ fn retry_multi_path_single_failed_payment() { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently: false, - failure: PathFailure::InitialSend { err: APIError::ChannelUnavailable { .. }}, - short_channel_id: Some(expected_scid), .. } => - { + Event::PaymentPathFailed { + payment_hash: ev_payment_hash, + payment_failed_permanently: false, + failure: PathFailure::InitialSend { err: APIError::ChannelUnavailable { .. } }, + short_channel_id: Some(expected_scid), + .. + } => { assert_eq!(payment_hash, ev_payment_hash); assert_eq!(expected_scid, route.paths[1].hops[0].short_channel_id); }, @@ -3082,7 +3238,8 @@ fn immediate_retry_on_failure() { create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); let amt_msat = 100_000_001; - let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); + let (_, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); #[cfg(feature = "std")] let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60; #[cfg(not(feature = "std"))] @@ -3093,13 +3250,14 @@ fn immediate_retry_on_failure() { invoice_features.set_basic_mpp_optional(); let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) - .with_bolt11_features(invoice_features).unwrap(); + .with_bolt11_features(invoice_features) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); let chans = nodes[0].node.list_usable_channels(); let mut route = Route { - paths: vec![ - Path { hops: vec![RouteHop { + paths: vec![Path { + hops: vec![RouteHop { pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chans[0].short_channel_id.unwrap(), @@ -3107,8 +3265,9 @@ fn immediate_retry_on_failure() { fee_msat: 100_000_001, // Our default max-HTLC-value is 10% of the channel value, which this is one more than cltv_expiry_delta: 100, maybe_announced_channel: true, - }], blinded_tail: None }, - ], + }], + blinded_tail: None, + }], route_params: Some(route_params.clone()), }; nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); @@ -3129,10 +3288,13 @@ fn immediate_retry_on_failure() { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently: false, - failure: PathFailure::InitialSend { err: APIError::ChannelUnavailable { .. }}, - short_channel_id: Some(expected_scid), .. } => - { + Event::PaymentPathFailed { + payment_hash: ev_payment_hash, + payment_failed_permanently: false, + failure: PathFailure::InitialSend { err: APIError::ChannelUnavailable { .. } }, + short_channel_id: Some(expected_scid), + .. + } => { assert_eq!(payment_hash, ev_payment_hash); assert_eq!(expected_scid, route.paths[1].hops[0].short_channel_id); }, @@ -3175,7 +3337,8 @@ fn no_extra_retries_on_back_to_back_fail() { let chan_2_scid = chan_2.0.contents.short_channel_id; let amt_msat = 200_000_000; - let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); + let (_, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[1], amt_msat); #[cfg(feature = "std")] let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60; #[cfg(not(feature = "std"))] @@ -3186,46 +3349,59 @@ fn no_extra_retries_on_back_to_back_fail() { invoice_features.set_basic_mpp_optional(); let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) - .with_bolt11_features(invoice_features).unwrap(); + .with_bolt11_features(invoice_features) + .unwrap(); let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); route_params.max_total_routing_fee_msat = None; let mut route = Route { paths: vec![ - Path { hops: vec![RouteHop { - pubkey: node_b_id, - node_features: nodes[1].node.node_features(), - short_channel_id: chan_1_scid, - channel_features: nodes[1].node.channel_features(), - fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }, RouteHop { - pubkey: node_c_id, - node_features: nodes[2].node.node_features(), - short_channel_id: chan_2_scid, - channel_features: nodes[2].node.channel_features(), - fee_msat: 100_000_000, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None }, - Path { hops: vec![RouteHop { - pubkey: node_b_id, - node_features: nodes[1].node.node_features(), - short_channel_id: chan_1_scid, - channel_features: nodes[1].node.channel_features(), - fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }, RouteHop { - pubkey: node_c_id, - node_features: nodes[2].node.node_features(), - short_channel_id: chan_2_scid, - channel_features: nodes[2].node.channel_features(), - fee_msat: 100_000_000, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None } + Path { + hops: vec![ + RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chan_1_scid, + channel_features: nodes[1].node.channel_features(), + fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + RouteHop { + pubkey: node_c_id, + node_features: nodes[2].node.node_features(), + short_channel_id: chan_2_scid, + channel_features: nodes[2].node.channel_features(), + fee_msat: 100_000_000, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + ], + blinded_tail: None, + }, + Path { + hops: vec![ + RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chan_1_scid, + channel_features: nodes[1].node.channel_features(), + fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + RouteHop { + pubkey: node_c_id, + node_features: nodes[2].node.node_features(), + short_channel_id: chan_2_scid, + channel_features: nodes[2].node.channel_features(), + fee_msat: 100_000_000, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + ], + blinded_tail: None, + }, ], route_params: Some(route_params.clone()), }; @@ -3236,7 +3412,8 @@ fn no_extra_retries_on_back_to_back_fail() { // On retry, we'll only return one path route.paths.remove(1); route.paths[0].hops[1].fee_msat = amt_msat; - let mut retry_params = RouteParameters::from_payment_params_and_value(second_payment_params, amt_msat); + let mut retry_params = + RouteParameters::from_payment_params_and_value(second_payment_params, amt_msat); retry_params.max_total_routing_fee_msat = None; route.route_params = Some(retry_params.clone()); nodes[0].router.expect_find_route(retry_params, Ok(route.clone())); @@ -3285,8 +3462,12 @@ fn no_extra_retries_on_back_to_back_fail() { check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); - let next_hop_failure = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }; - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone(), next_hop_failure.clone()]); + let next_hop_failure = + HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }; + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[next_hop_failure.clone(), next_hop_failure.clone()] + ); check_added_monitors(&nodes[1], 1); let bs_fail_update = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -3310,7 +3491,11 @@ fn no_extra_retries_on_back_to_back_fail() { let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 3); match events[0] { - Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => { + Event::PaymentPathFailed { + payment_hash: ev_payment_hash, + payment_failed_permanently, + .. + } => { assert_eq!(payment_hash, ev_payment_hash); assert_eq!(payment_failed_permanently, false); }, @@ -3321,7 +3506,11 @@ fn no_extra_retries_on_back_to_back_fail() { _ => panic!("Unexpected event"), } match events[2] { - Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => { + Event::PaymentPathFailed { + payment_hash: ev_payment_hash, + payment_failed_permanently, + .. + } => { assert_eq!(payment_hash, ev_payment_hash); assert_eq!(payment_failed_permanently, false); }, @@ -3335,7 +3524,10 @@ fn no_extra_retries_on_back_to_back_fail() { nodes[1].node.handle_update_add_htlc(node_a_id, &retry_htlc_updates.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], &retry_htlc_updates.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone()]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[next_hop_failure.clone()] + ); check_added_monitors(&nodes[1], 1); let bs_fail_update = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -3345,14 +3537,22 @@ fn no_extra_retries_on_back_to_back_fail() { let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => { + Event::PaymentPathFailed { + payment_hash: ev_payment_hash, + payment_failed_permanently, + .. + } => { assert_eq!(payment_hash, ev_payment_hash); assert_eq!(payment_failed_permanently, false); }, _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentFailed { payment_hash: ref ev_payment_hash, payment_id: ref ev_payment_id, reason: ref ev_reason } => { + Event::PaymentFailed { + payment_hash: ref ev_payment_hash, + payment_id: ref ev_payment_id, + reason: ref ev_reason, + } => { assert_eq!(Some(payment_hash), *ev_payment_hash); assert_eq!(PaymentId(payment_hash.0), *ev_payment_id); assert_eq!(PaymentFailureReason::RetriesExhausted, ev_reason.unwrap()); @@ -3383,7 +3583,8 @@ fn test_simple_partial_retry() { let chan_2_scid = chan_2.0.contents.short_channel_id; let amt_msat = 200_000_000; - let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[2], amt_msat); + let (_, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[2], amt_msat); #[cfg(feature = "std")] let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60; #[cfg(not(feature = "std"))] @@ -3394,46 +3595,59 @@ fn test_simple_partial_retry() { invoice_features.set_basic_mpp_optional(); let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) - .with_bolt11_features(invoice_features).unwrap(); + .with_bolt11_features(invoice_features) + .unwrap(); let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); route_params.max_total_routing_fee_msat = None; let mut route = Route { paths: vec![ - Path { hops: vec![RouteHop { - pubkey: node_b_id, - node_features: nodes[1].node.node_features(), - short_channel_id: chan_1_scid, - channel_features: nodes[1].node.channel_features(), - fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }, RouteHop { - pubkey: node_c_id, - node_features: nodes[2].node.node_features(), - short_channel_id: chan_2_scid, - channel_features: nodes[2].node.channel_features(), - fee_msat: 100_000_000, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None }, - Path { hops: vec![RouteHop { - pubkey: node_b_id, - node_features: nodes[1].node.node_features(), - short_channel_id: chan_1_scid, - channel_features: nodes[1].node.channel_features(), - fee_msat: 100_000, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }, RouteHop { - pubkey: node_c_id, - node_features: nodes[2].node.node_features(), - short_channel_id: chan_2_scid, - channel_features: nodes[2].node.channel_features(), - fee_msat: 100_000_000, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None } + Path { + hops: vec![ + RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chan_1_scid, + channel_features: nodes[1].node.channel_features(), + fee_msat: 0, // nodes[1] will fail the payment as we don't pay its fee + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + RouteHop { + pubkey: node_c_id, + node_features: nodes[2].node.node_features(), + short_channel_id: chan_2_scid, + channel_features: nodes[2].node.channel_features(), + fee_msat: 100_000_000, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + ], + blinded_tail: None, + }, + Path { + hops: vec![ + RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chan_1_scid, + channel_features: nodes[1].node.channel_features(), + fee_msat: 100_000, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + RouteHop { + pubkey: node_c_id, + node_features: nodes[2].node.node_features(), + short_channel_id: chan_2_scid, + channel_features: nodes[2].node.channel_features(), + fee_msat: 100_000_000, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + ], + blinded_tail: None, + }, ], route_params: Some(route_params.clone()), }; @@ -3444,7 +3658,8 @@ fn test_simple_partial_retry() { second_payment_params.previously_failed_channels = vec![chan_2_scid]; // On retry, we'll only be asked for one path (or 100k sats) route.paths.remove(0); - let mut retry_params = RouteParameters::from_payment_params_and_value(second_payment_params, amt_msat / 2); + let mut retry_params = + RouteParameters::from_payment_params_and_value(second_payment_params, amt_msat / 2); retry_params.max_total_routing_fee_msat = None; route.route_params = Some(retry_params.clone()); nodes[0].router.expect_find_route(retry_params, Ok(route.clone())); @@ -3481,8 +3696,12 @@ fn test_simple_partial_retry() { commitment_signed_dance!(nodes[1], nodes[0], second_htlc_updates.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - let next_hop_failure = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }; - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[next_hop_failure.clone()]); + let next_hop_failure = + HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }; + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[next_hop_failure.clone()] + ); check_added_monitors(&nodes[1], 2); { @@ -3512,7 +3731,11 @@ fn test_simple_partial_retry() { let mut events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PaymentPathFailed { payment_hash: ev_payment_hash, payment_failed_permanently, .. } => { + Event::PaymentPathFailed { + payment_hash: ev_payment_hash, + payment_failed_permanently, + .. + } => { assert_eq!(payment_hash, ev_payment_hash); assert_eq!(payment_failed_permanently, false); }, @@ -3573,7 +3796,8 @@ fn test_threaded_payment_retries() { let chan_4_scid = chan_4.0.contents.short_channel_id; let amt_msat = 100_000_000; - let (_, payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[2], amt_msat); + let (_, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[2], amt_msat); #[cfg(feature = "std")] let payment_expiry_secs = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_secs() + 60 * 60; #[cfg(not(feature = "std"))] @@ -3584,47 +3808,62 @@ fn test_threaded_payment_retries() { invoice_features.set_basic_mpp_optional(); let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_expiry_time(payment_expiry_secs as u64) - .with_bolt11_features(invoice_features).unwrap(); + .with_bolt11_features(invoice_features) + .unwrap(); let mut route_params = RouteParameters { - payment_params, final_value_msat: amt_msat, max_total_routing_fee_msat: Some(500_000), + payment_params, + final_value_msat: amt_msat, + max_total_routing_fee_msat: Some(500_000), }; let mut route = Route { paths: vec![ - Path { hops: vec![RouteHop { - pubkey: node_b_id, - node_features: nodes[1].node.node_features(), - short_channel_id: chan_1_scid, - channel_features: nodes[1].node.channel_features(), - fee_msat: 0, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }, RouteHop { - pubkey: node_d_id, - node_features: nodes[2].node.node_features(), - short_channel_id: 42, // Set a random SCID which nodes[1] will fail as unknown - channel_features: nodes[2].node.channel_features(), - fee_msat: amt_msat / 1000, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None }, - Path { hops: vec![RouteHop { - pubkey: node_c_id, - node_features: nodes[2].node.node_features(), - short_channel_id: chan_3_scid, - channel_features: nodes[2].node.channel_features(), - fee_msat: 100_000, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }, RouteHop { - pubkey: node_d_id, - node_features: nodes[3].node.node_features(), - short_channel_id: chan_4_scid, - channel_features: nodes[3].node.channel_features(), - fee_msat: amt_msat - amt_msat / 1000, - cltv_expiry_delta: 100, - maybe_announced_channel: true, - }], blinded_tail: None } + Path { + hops: vec![ + RouteHop { + pubkey: node_b_id, + node_features: nodes[1].node.node_features(), + short_channel_id: chan_1_scid, + channel_features: nodes[1].node.channel_features(), + fee_msat: 0, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + RouteHop { + pubkey: node_d_id, + node_features: nodes[2].node.node_features(), + short_channel_id: 42, // Set a random SCID which nodes[1] will fail as unknown + channel_features: nodes[2].node.channel_features(), + fee_msat: amt_msat / 1000, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + ], + blinded_tail: None, + }, + Path { + hops: vec![ + RouteHop { + pubkey: node_c_id, + node_features: nodes[2].node.node_features(), + short_channel_id: chan_3_scid, + channel_features: nodes[2].node.channel_features(), + fee_msat: 100_000, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + RouteHop { + pubkey: node_d_id, + node_features: nodes[3].node.node_features(), + short_channel_id: chan_4_scid, + channel_features: nodes[3].node.channel_features(), + fee_msat: amt_msat - amt_msat / 1000, + cltv_expiry_delta: 100, + maybe_announced_channel: true, + }, + ], + blinded_tail: None, + }, ], route_params: Some(route_params.clone()), }; @@ -3637,13 +3876,15 @@ fn test_threaded_payment_retries() { check_added_monitors!(nodes[0], 2); let mut send_msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(send_msg_events.len(), 2); - send_msg_events.retain(|msg| + send_msg_events.retain(|msg| { if let MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, .. } = msg { // Drop the commitment update for nodes[2], we can just let that one sit pending // forever. *node_id == node_b_id - } else { panic!(); } - ); + } else { + panic!(); + } + }); // from here on out, the retry `RouteParameters` amount will be amt/1000 route_params.final_value_msat /= 1000; @@ -3666,7 +3907,9 @@ fn test_threaded_payment_retries() { } } } } let mut threads = Vec::new(); - for _ in 0..16 { threads.push(std::thread::spawn(thread_body!())); } + for _ in 0..16 { + threads.push(std::thread::spawn(thread_body!())); + } // Back in the main thread, poll pending messages and make sure that we never have more than // one HTLC pending at a time. Note that the commitment_signed_dance will fail horribly if @@ -3684,7 +3927,9 @@ fn test_threaded_payment_retries() { nodes[1].node.process_pending_htlc_forwards(); expect_htlc_handling_failed_destinations!( nodes[1].node.get_and_clear_pending_events(), - &[HTLCHandlingFailureType::InvalidForward { requested_forward_scid: route.paths[0].hops[1].short_channel_id }] + &[HTLCHandlingFailureType::InvalidForward { + requested_forward_scid: route.paths[0].hops[1].short_channel_id + }] ); check_added_monitors(&nodes[1], 1); @@ -3694,7 +3939,8 @@ fn test_threaded_payment_retries() { // many HTLCs at once. let mut new_route_params = route_params.clone(); previously_failed_channels.push(route.paths[0].hops[1].short_channel_id); - new_route_params.payment_params.previously_failed_channels = previously_failed_channels.clone(); + new_route_params.payment_params.previously_failed_channels = + previously_failed_channels.clone(); new_route_params.max_total_routing_fee_msat.as_mut().map(|m| *m -= 100_000); route.paths[0].hops[1].short_channel_id += 1; route.route_params = Some(new_route_params.clone()); @@ -3713,7 +3959,9 @@ fn test_threaded_payment_retries() { let cur_time = Instant::now(); if cur_time > end_time { - for thread in threads.drain(..) { thread.join().unwrap(); } + for thread in threads.drain(..) { + thread.join().unwrap(); + } } // Make sure we have some events to handle when we go around... @@ -3750,7 +3998,8 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: node_a_ser = nodes[0].node.encode(); } - let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (our_payment_preimage, our_payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); if persist_manager_with_payment { node_a_ser = nodes[0].node.encode(); @@ -3783,8 +4032,15 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); - if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[0] {} else { panic!(); } - if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert_eq!(payment_preimage, our_payment_preimage); } else { panic!(); } + if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[0] { + } else { + panic!(); + } + if let Event::PaymentSent { payment_preimage, .. } = events[1] { + assert_eq!(payment_preimage, our_payment_preimage); + } else { + panic!(); + } // Note that we don't get a PaymentPathSuccessful here as we leave the HTLC pending to avoid // the double-claim that would otherwise appear at the end of this test. nodes[0].node.timer_tick_occurred(); @@ -3796,7 +4052,9 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: // payments have since been timed out thanks to `IDEMPOTENCY_TIMEOUT_TICKS`. // A naive implementation of the fix here would wipe the pending payments set, causing a // failure event when we restart. - for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { nodes[0].node.timer_tick_occurred(); } + for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { + nodes[0].node.timer_tick_occurred(); + } let mon_ser = get_monitor!(nodes[0], chan_id).encode(); let node_ser = nodes[0].node.encode(); @@ -3808,7 +4066,9 @@ fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: // Ensure that we don't generate any further events even after the channel-closing commitment // transaction is confirmed on-chain. confirm_transaction(&nodes[0], &as_broadcasted_txn[0]); - for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { nodes[0].node.timer_tick_occurred(); } + for _ in 0..(IDEMPOTENCY_TIMEOUT_TICKS * 2) { + nodes[0].node.timer_tick_occurred(); + } let events = nodes[0].node.get_and_clear_pending_events(); assert!(events.is_empty()); @@ -3866,7 +4126,8 @@ fn do_claim_from_closed_chan(fail_payment: bool) { let (payment_preimage, hash, payment_secret) = get_payment_preimage_hash!(nodes[3]); let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); let amt_msat = 10_000_000; let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); @@ -3874,8 +4135,13 @@ fn do_claim_from_closed_chan(fail_payment: bool) { let mut route = nodes[0].router.find_route(&node_a_id, &route_params, None, inflight).unwrap(); // Make sure the route is ordered as the B->D path before C->D - route.paths.sort_by(|a, _| - if a.hops[0].pubkey == node_b_id { Ordering::Less } else { Ordering::Greater }); + route.paths.sort_by(|a, _| { + if a.hops[0].pubkey == node_b_id { + Ordering::Less + } else { + Ordering::Greater + } + }); // Note that we add an extra 1 in the send pipeline to compensate for any blocks found while // the HTLC is being relayed. @@ -3894,7 +4160,11 @@ fn do_claim_from_closed_chan(fail_payment: bool) { let a_node_id = if let MessageSendEvent::UpdateHTLCs { node_id, .. } = a { node_id } else { panic!() }; let node_b_id = node_b_id; - if *a_node_id == node_b_id { Ordering::Less } else { Ordering::Greater } + if *a_node_id == node_b_id { + Ordering::Less + } else { + Ordering::Greater + } }); assert_eq!(send_msgs.len(), 2); @@ -3962,26 +4232,33 @@ fn do_claim_from_closed_chan(fail_payment: bool) { if let MessageSendEvent::UpdateHTLCs { updates, .. } = &bs_claims[0] { nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true); - } else { panic!(); } + } else { + panic!(); + } expect_payment_sent!(nodes[0], payment_preimage); let ds_claim_msgs = nodes[3].node.get_and_clear_pending_msg_events(); assert_eq!(ds_claim_msgs.len(), 1); - let cs_claim_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = &ds_claim_msgs[0] { + let cs_claim_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = &ds_claim_msgs[0] + { nodes[2].node.handle_update_fulfill_htlc(node_d_id, &updates.update_fulfill_htlcs[0]); let cs_claim_msgs = nodes[2].node.get_and_clear_pending_msg_events(); check_added_monitors(&nodes[2], 1); commitment_signed_dance!(nodes[2], nodes[3], updates.commitment_signed, false, true); expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false); cs_claim_msgs - } else { panic!(); }; + } else { + panic!(); + }; assert_eq!(cs_claim_msgs.len(), 1); if let MessageSendEvent::UpdateHTLCs { updates, .. } = &cs_claim_msgs[0] { nodes[0].node.handle_update_fulfill_htlc(node_c_id, &updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], updates.commitment_signed, false, true); - } else { panic!(); } + } else { + panic!(); + } expect_payment_path_successful!(nodes[0]); } @@ -4019,7 +4296,8 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { create_announced_chan_between_nodes(&nodes, 0, 1); let amt_msat = 100_000; - let (mut route, hash, preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], &nodes[1], amt_msat); + let (mut route, hash, preimage, payment_secret) = + get_route_and_payment_hash!(&nodes[0], &nodes[1], amt_msat); let id = PaymentId(hash.0); let custom_tlvs = vec![ (if even_tlvs { 5482373482 } else { 5482373483 }, vec![1, 2, 3, 4]), @@ -4028,7 +4306,7 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { let onion = RecipientOnionFields { payment_secret: if spontaneous { None } else { Some(payment_secret) }, payment_metadata: None, - custom_tlvs: custom_tlvs.clone() + custom_tlvs: custom_tlvs.clone(), }; if spontaneous { let params = route.route_params.unwrap(); @@ -4062,14 +4340,14 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { nodes[1].node.claim_funds_with_known_custom_tlvs(preimage); let expected_total_fee_msat = pass_claimed_payment_along_route( ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], preimage) - .with_custom_tlvs(custom_tlvs) + .with_custom_tlvs(custom_tlvs), ); expect_payment_sent!(&nodes[0], preimage, Some(expected_total_fee_msat)); }, (false, false) => { claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], preimage) - .with_custom_tlvs(custom_tlvs) + .with_custom_tlvs(custom_tlvs), ); }, (false, true) => { @@ -4078,7 +4356,7 @@ fn do_test_custom_tlvs(spontaneous: bool, even_tlvs: bool, known_tlvs: bool) { expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); let reason = PaymentFailureReason::RecipientRejected; pass_failed_payment_back(&nodes[0], &[&[&nodes[1]]], false, hash, reason); - } + }, } } @@ -4138,7 +4416,7 @@ fn test_retry_custom_tlvs() { let mut events = nodes[0].node.get_and_clear_pending_events(); match events[1] { Event::PendingHTLCsForwardable { .. } => {}, - _ => panic!("Unexpected event") + _ => panic!("Unexpected event"), } events.remove(1); let conditions = PaymentFailedConditions::new().mpp_parts_remain(); @@ -4163,16 +4441,16 @@ fn test_retry_custom_tlvs() { do_pass_along_path(args); claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage) - .with_custom_tlvs(custom_tlvs) + .with_custom_tlvs(custom_tlvs), ); } #[test] fn test_custom_tlvs_consistency() { let even_type_1 = 1 << 16; - let odd_type_1 = (1 << 16)+ 1; + let odd_type_1 = (1 << 16) + 1; let even_type_2 = (1 << 16) + 2; - let odd_type_2 = (1 << 16) + 3; + let odd_type_2 = (1 << 16) + 3; let value_1 = || vec![1, 2, 3, 4]; let differing_value_1 = || vec![1, 2, 3, 5]; let value_2 = || vec![42u8; 16]; @@ -4203,9 +4481,10 @@ fn test_custom_tlvs_consistency() { ); } -fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: Vec<(u64, Vec)>, - expected_receive_tlvs: Option)>>) { - +fn do_test_custom_tlvs_consistency( + first_tlvs: Vec<(u64, Vec)>, second_tlvs: Vec<(u64, Vec)>, + expected_receive_tlvs: Option)>>, +) { let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); @@ -4222,12 +4501,17 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: let chan_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0); let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) - .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap(); + .with_bolt11_features(nodes[3].node.bolt11_invoice_features()) + .unwrap(); let mut route = get_route!(nodes[0], payment_params, 15_000_000).unwrap(); assert_eq!(route.paths.len(), 2); route.paths.sort_by(|path_a, _| { // Sort the path so that the path through nodes[1] comes first - if path_a.hops[0].pubkey == node_b_id { Ordering::Less } else { Ordering::Greater } + if path_a.hops[0].pubkey == node_b_id { + Ordering::Less + } else { + Ordering::Greater + } }); let (preimage, hash, payment_secret) = get_payment_preimage_hash!(&nodes[3]); @@ -4238,7 +4522,7 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: let onion = RecipientOnionFields { payment_secret: Some(payment_secret), payment_metadata: None, - custom_tlvs: first_tlvs + custom_tlvs: first_tlvs, }; let session_privs = nodes[0].node.test_add_new_pending_payment(hash, onion.clone(), id, &route).unwrap(); @@ -4263,7 +4547,7 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: let onion = RecipientOnionFields { payment_secret: Some(payment_secret), payment_metadata: None, - custom_tlvs: second_tlvs + custom_tlvs: second_tlvs, }; let path_b = &route.paths[1]; let priv_b = session_privs[1]; @@ -4308,7 +4592,7 @@ fn do_test_custom_tlvs_consistency(first_tlvs: Vec<(u64, Vec)>, second_tlvs: do_claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[0], &[path_a, &[&nodes[2], &nodes[3]]], preimage) - .with_custom_tlvs(expected_tlvs) + .with_custom_tlvs(expected_tlvs), ); expect_payment_sent(&nodes[0], preimage, Some(Some(2000)), true, true); } else { @@ -4367,12 +4651,14 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { // Pay more than half of each channel's max, requiring MPP let amt_msat = 750_000_000; - let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[3], Some(amt_msat)); + let (payment_preimage, payment_hash, payment_secret) = + get_payment_preimage_hash!(nodes[3], Some(amt_msat)); let payment_id = PaymentId(payment_hash.0); let payment_metadata = vec![44, 49, 52, 142]; let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); // Send the MPP payment, delivering the updated commitment state to nodes[1]. @@ -4426,8 +4712,14 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { let payment_fail_retryable_evs = nodes[0].node.get_and_clear_pending_events(); assert_eq!(payment_fail_retryable_evs.len(), 2); - if let Event::PaymentPathFailed { .. } = payment_fail_retryable_evs[0] {} else { panic!(); } - if let Event::PendingHTLCsForwardable { .. } = payment_fail_retryable_evs[1] {} else { panic!(); } + if let Event::PaymentPathFailed { .. } = payment_fail_retryable_evs[0] { + } else { + panic!(); + } + if let Event::PendingHTLCsForwardable { .. } = payment_fail_retryable_evs[1] { + } else { + panic!(); + } // Before we allow the HTLC to be retried, optionally change the payment_metadata we have // stored for our payment. @@ -4474,8 +4766,10 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { if do_modify { expect_pending_htlcs_forwardable_ignore!(nodes[3]); nodes[3].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_conditions(nodes[3].node.get_and_clear_pending_events(), - &[HTLCHandlingFailureType::Receive {payment_hash}]); + expect_pending_htlcs_forwardable_conditions( + nodes[3].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash }], + ); nodes[3].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[3], 1); @@ -4504,7 +4798,7 @@ fn test_payment_metadata_consistency() { } #[test] -fn test_htlc_forward_considers_anchor_outputs_value() { +fn test_htlc_forward_considers_anchor_outputs_value() { // Tests that: // // 1) Forwarding nodes don't forward HTLCs that would cause their balance to dip below the @@ -4534,15 +4828,28 @@ fn test_htlc_forward_considers_anchor_outputs_value() { const CHAN_AMT: u64 = 1_000_000; const PUSH_MSAT: u64 = 900_000_000; create_announced_chan_between_nodes_with_value(&nodes, 0, 1, CHAN_AMT, 500_000_000); - let (_, _, chan_id_2, _) = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, CHAN_AMT, PUSH_MSAT); + let (_, _, chan_id_2, _) = + create_announced_chan_between_nodes_with_value(&nodes, 1, 2, CHAN_AMT, PUSH_MSAT); - let channel_reserve_msat = get_holder_selected_channel_reserve_satoshis(CHAN_AMT, &config) * 1000; + let channel_reserve_msat = + get_holder_selected_channel_reserve_satoshis(CHAN_AMT, &config) * 1000; let commitment_fee_msat = chan_utils::commit_tx_fee_sat( - *nodes[1].fee_estimator.sat_per_kw.lock().unwrap(), 2, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies() + *nodes[1].fee_estimator.sat_per_kw.lock().unwrap(), + 2, + &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), ) * 1000; let anchor_outpus_value_msat = ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000; - let sendable_balance_msat = CHAN_AMT * 1000 - PUSH_MSAT - channel_reserve_msat - commitment_fee_msat - anchor_outpus_value_msat; - let channel_details = nodes[1].node.list_channels().into_iter().find(|channel| channel.channel_id == chan_id_2).unwrap(); + let sendable_balance_msat = CHAN_AMT * 1000 + - PUSH_MSAT + - channel_reserve_msat + - commitment_fee_msat + - anchor_outpus_value_msat; + let channel_details = nodes[1] + .node + .list_channels() + .into_iter() + .find(|channel| channel.channel_id == chan_id_2) + .unwrap(); assert!(sendable_balance_msat >= channel_details.next_outbound_htlc_minimum_msat); assert!(sendable_balance_msat <= channel_details.next_outbound_htlc_limit_msat); @@ -4552,7 +4859,9 @@ fn test_htlc_forward_considers_anchor_outputs_value() { // Send out an HTLC that would cause the forwarding node to dip below its reserve when // considering the value of anchor outputs. let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!( - nodes[0], nodes[2], sendable_balance_msat + anchor_outpus_value_msat + nodes[0], + nodes[2], + sendable_balance_msat + anchor_outpus_value_msat ); let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); @@ -4561,14 +4870,15 @@ fn test_htlc_forward_considers_anchor_outputs_value() { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - let mut update_add_htlc = if let MessageSendEvent::UpdateHTLCs { updates, .. } = events.pop().unwrap() { - nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); - check_added_monitors(&nodes[1], 0); - commitment_signed_dance!(nodes[1], nodes[0], &updates.commitment_signed, false); - updates.update_add_htlcs[0].clone() - } else { - panic!("Unexpected event"); - }; + let mut update_add_htlc = + if let MessageSendEvent::UpdateHTLCs { updates, .. } = events.pop().unwrap() { + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + check_added_monitors(&nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], &updates.commitment_signed, false); + updates.update_add_htlcs[0].clone() + } else { + panic!("Unexpected event"); + }; // The forwarding node should reject forwarding it as expected. expect_pending_htlcs_forwardable!(nodes[1]); @@ -4613,21 +4923,30 @@ fn peel_payment_onion_custom_tlvs() { let secp_ctx = Secp256k1::new(); let amt_msat = 1000; - let payment_params = PaymentParameters::for_keysend(node_b_id, - TEST_FINAL_CLTV, false); + let payment_params = PaymentParameters::for_keysend(node_b_id, TEST_FINAL_CLTV, false); let route_params = RouteParameters::from_payment_params_and_value(payment_params, amt_msat); let route = functional_test_utils::get_route(&nodes[0], &route_params).unwrap(); let mut recipient_onion = RecipientOnionFields::spontaneous_empty() - .with_custom_tlvs(vec![(414141, vec![42; 1200])]).unwrap(); + .with_custom_tlvs(vec![(414141, vec![42; 1200])]) + .unwrap(); let prng_seed = chanmon_cfgs[0].keys_manager.get_secure_random_bytes(); let session_priv = SecretKey::from_slice(&prng_seed[..]).expect("RNG is busted"); let keysend_preimage = PaymentPreimage([42; 32]); let payment_hash = PaymentHash(Sha256::hash(&keysend_preimage.0).to_byte_array()); let (onion_routing_packet, first_hop_msat, cltv_expiry) = onion_utils::create_payment_onion( - &secp_ctx, &route.paths[0], &session_priv, amt_msat, &recipient_onion, - nodes[0].best_block_info().1, &payment_hash, &Some(keysend_preimage), None, prng_seed - ).unwrap(); + &secp_ctx, + &route.paths[0], + &session_priv, + amt_msat, + &recipient_onion, + nodes[0].best_block_info().1, + &payment_hash, + &Some(keysend_preimage), + None, + prng_seed, + ) + .unwrap(); let update_add = msgs::UpdateAddHTLC { channel_id: ChannelId([0; 32]), @@ -4640,9 +4959,14 @@ fn peel_payment_onion_custom_tlvs() { blinding_point: None, }; let peeled_onion = crate::ln::onion_payment::peel_payment_onion( - &update_add, &chanmon_cfgs[1].keys_manager, &chanmon_cfgs[1].logger, &secp_ctx, - nodes[1].best_block_info().1, false - ).unwrap(); + &update_add, + &chanmon_cfgs[1].keys_manager, + &chanmon_cfgs[1].logger, + &secp_ctx, + nodes[1].best_block_info().1, + false, + ) + .unwrap(); assert_eq!(peeled_onion.incoming_amt_msat, Some(amt_msat)); match peeled_onion.routing { PendingHTLCRouting::ReceiveKeysend { @@ -4655,7 +4979,7 @@ fn peel_payment_onion_custom_tlvs() { assert!(payment_metadata.is_none()); assert!(payment_data.is_none()); }, - _ => panic!() + _ => panic!(), } } @@ -4679,18 +5003,23 @@ fn test_non_strict_forwarding() { // the given value. let payment_value = 1_500_000; create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0); - let (chan_update_1, _, channel_id_1, _) = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 4_950, 0); - let (chan_update_2, _, channel_id_2, _) = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 5_000, 0); + let (chan_update_1, _, channel_id_1, _) = + create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 4_950, 0); + let (chan_update_2, _, channel_id_2, _) = + create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 5_000, 0); // Create a route once. let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) - .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap(); - let route_params = RouteParameters::from_payment_params_and_value(payment_params, payment_value); + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + let route_params = + RouteParameters::from_payment_params_and_value(payment_params, payment_value); let route = functional_test_utils::get_route(&nodes[0], &route_params).unwrap(); // Send 4 payments over the same route. for i in 0..4 { - let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(payment_value), None); + let (payment_preimage, payment_hash, payment_secret) = + get_payment_preimage_hash(&nodes[2], Some(payment_value), None); let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); @@ -4720,13 +5049,16 @@ fn test_non_strict_forwarding() { assert_eq!(events.len(), 1); assert!(matches!(events[0], Event::PaymentClaimable { .. })); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage) - ); + claim_payment_along_route(ClaimAlongRouteArgs::new( + &nodes[0], + &[&[&nodes[1], &nodes[2]]], + payment_preimage, + )); } // Send a 5th payment which will fail. - let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(payment_value), None); + let (_, payment_hash, payment_secret) = + get_payment_preimage_hash(&nodes[2], Some(payment_value), None); let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), payment_hash, onion, id).unwrap(); @@ -4776,8 +5108,10 @@ fn remove_pending_outbounds_on_buggy_router() { let amt_msat = 10_000; let payment_id = PaymentId([42; 32]); let payment_params = PaymentParameters::from_node_id(node_b_id, 0) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); - let (mut route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); + let (mut route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); // Extend the path by itself, essentially simulating route going through same channel twice let cloned_hops = route.paths[0].hops.clone(); @@ -4797,13 +5131,13 @@ fn remove_pending_outbounds_on_buggy_router() { assert_eq!(failure, &PathFailure::InitialSend { err: APIError::InvalidRoute { err } }); assert!(!payment_failed_permanently); }, - _ => panic!() + _ => panic!(), } match events[1] { Event::PaymentFailed { reason, .. } => { assert_eq!(reason.unwrap(), PaymentFailureReason::UnexpectedError); }, - _ => panic!() + _ => panic!(), } assert!(nodes[0].node.list_recent_payments().is_empty()); } @@ -4823,8 +5157,10 @@ fn remove_pending_outbound_probe_on_buggy_path() { let amt_msat = 10_000; let payment_params = PaymentParameters::from_node_id(node_b_id, 0) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); - let (mut route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); + let (mut route, _, _, _) = + get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); // Extend the path by itself, essentially simulating route going through same channel twice let cloned_hops = route.paths[0].hops.clone(); @@ -4832,9 +5168,9 @@ fn remove_pending_outbound_probe_on_buggy_path() { assert_eq!( nodes[0].node.send_probe(route.paths.pop().unwrap()).unwrap_err(), - ProbeSendFailure::ParameterError( - APIError::InvalidRoute { err: "Path went through the same channel twice".to_string() } - ) + ProbeSendFailure::ParameterError(APIError::InvalidRoute { + err: "Path went through the same channel twice".to_string() + }) ); assert!(nodes[0].node.list_recent_payments().is_empty()); } @@ -4854,8 +5190,10 @@ fn pay_route_without_params() { let amt_msat = 10_000; let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); - let (mut route, hash, preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); + let (mut route, hash, preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); route.route_params.take(); let onion = RecipientOnionFields::secret_only(payment_secret); @@ -4868,9 +5206,7 @@ fn pay_route_without_params() { let node_1_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); let path = &[&nodes[1]]; pass_along_path(&nodes[0], path, amt_msat, hash, Some(payment_secret), node_1_msgs, true, None); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[path], preimage) - ); + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], &[path], preimage)); } #[test] diff --git a/rustfmt_excluded_files b/rustfmt_excluded_files index 5d8e47b2501..4f4804f2a30 100644 --- a/rustfmt_excluded_files +++ b/rustfmt_excluded_files @@ -21,7 +21,6 @@ lightning/src/ln/offers_tests.rs lightning/src/ln/onion_payment.rs lightning/src/ln/onion_route_tests.rs lightning/src/ln/outbound_payment.rs -lightning/src/ln/payment_tests.rs lightning/src/ln/peer_handler.rs lightning/src/ln/priv_short_conf_tests.rs lightning/src/ln/reload_tests.rs From c3fabc3aefc43ea8bb211b78a016ca04c234e997 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 27 Apr 2025 15:27:28 +0000 Subject: [PATCH 07/25] Use `node_id_*` rather than `get_our_node_id()` in functional_tests --- lightning/src/ln/functional_tests.rs | 2490 ++++++++++++++++---------- 1 file changed, 1497 insertions(+), 993 deletions(-) diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 2f8a9b53a5d..ee8462ca10c 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -77,23 +77,26 @@ fn test_channel_resumption_fail_post_funding() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 0, 42, None, None).unwrap(); - let open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_chan); - let accept_chan = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_chan); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + nodes[0].node.create_channel(node_b_id, 1_000_000, 0, 42, None, None).unwrap(); + let open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_chan); + let accept_chan = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + nodes[0].node.handle_accept_channel(node_b_id, &accept_chan); let (temp_chan_id, tx, funding_output) = - create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + create_funding_transaction(&nodes[0], &node_b_id, 1_000_000, 42); let new_chan_id = ChannelId::v1_from_funding_outpoint(funding_output); - nodes[0].node.funding_transaction_generated(temp_chan_id, nodes[1].node.get_our_node_id(), tx).unwrap(); + nodes[0].node.funding_transaction_generated(temp_chan_id, node_b_id, tx).unwrap(); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(new_chan_id, true, ClosureReason::DisconnectedPeer)]); // After ddf75afd16 we'd panic on reconnection if we exchanged funding info, so test that // explicitly here. - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { + nodes[0].node.peer_connected(node_b_id, &msgs::Init { features: nodes[1].node.init_features(), networks: None, remote_network_address: None }, true).unwrap(); assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new()); @@ -110,6 +113,9 @@ pub fn test_insane_channel_opens() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(cfg.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Instantiate channel parameters where we push the maximum msats given our // funding satoshis let channel_value_sat = 31337; // same as funding satoshis @@ -117,15 +123,15 @@ pub fn test_insane_channel_opens() { let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000; // Have node0 initiate a channel to node1 with aforementioned parameters - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_sat, push_msat, 42, None, None).unwrap(); + nodes[0].node.create_channel(node_b_id, channel_value_sat, push_msat, 42, None, None).unwrap(); // Extract the channel open message from node0 to node1 - let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); // Test helper that asserts we get the correct error string given a mutator // that supposedly makes the channel open message insane let insane_open_helper = |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| { - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &message_mutator(open_channel_message.clone())); + nodes[1].node.handle_open_channel(node_a_id, &message_mutator(open_channel_message.clone())); let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); let expected_regex = regex::Regex::new(expected_error_str).unwrap(); @@ -173,7 +179,9 @@ pub fn test_funding_exceeds_no_wumbo_limit() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - match nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None, None) { + let node_b_id = nodes[1].node.get_our_node_id(); + + match nodes[0].node.create_channel(node_b_id, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None, None) { Err(APIError::APIMisuseError { err }) => { assert_eq!(format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1), err); }, @@ -190,6 +198,10 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let default_config = UserConfig::default(); // Have node0 initiate a channel to node1 with aforementioned parameters @@ -199,21 +211,21 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000; push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; - let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None, None).unwrap(); - let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let temp_channel_id = nodes[0].node.create_channel(node_b_id, 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None, None).unwrap(); + let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); if !send_from_initiator { open_channel_message.channel_reserve_satoshis = 0; open_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000; } - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_message); + nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); // Extract the channel accept message from node1 to node0 - let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); if send_from_initiator { accept_channel_message.channel_reserve_satoshis = 0; accept_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000; } - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel_message); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); { let sender_node = if send_from_initiator { &nodes[1] } else { &nodes[0] }; let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] }; @@ -255,6 +267,10 @@ pub fn test_async_inbound_update_fee() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); // balancing @@ -294,7 +310,7 @@ pub fn test_async_inbound_update_fee() { _ => panic!("Unexpected event"), }; - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000); @@ -307,24 +323,24 @@ pub fn test_async_inbound_update_fee() { assert_eq!(events_1.len(), 1); SendEvent::from_event(events_1.remove(0)) }; - assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id()); + assert_eq!(payment_event.node_id, node_a_id); assert_eq!(payment_event.msgs.len(), 1); // ...now when the messages get delivered everyone should be happy - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2) - let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &payment_event.commitment_msg); // (2) + let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); // deliver(1), generate (3): - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); - let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); + let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_revoke_and_ack); // deliver (2) - let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); // deliver (2) + let bs_update = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(bs_update.update_add_htlcs.is_empty()); // (4) assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4) assert!(bs_update.update_fail_htlcs.is_empty()); // (4) @@ -332,8 +348,8 @@ pub fn test_async_inbound_update_fee() { assert!(bs_update.update_fee.is_none()); // (4) check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); // deliver (3) - let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); // deliver (3) + let as_update = get_htlc_update_msgs!(nodes[0], node_b_id); assert!(as_update.update_add_htlcs.is_empty()); // (5) assert!(as_update.update_fulfill_htlcs.is_empty()); // (5) assert!(as_update.update_fail_htlcs.is_empty()); // (5) @@ -341,16 +357,16 @@ pub fn test_async_inbound_update_fee() { assert!(as_update.update_fee.is_none()); // (5) check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); // deliver (4) - let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_update.commitment_signed); // deliver (4) + let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // only (6) so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_update.commitment_signed); // deliver (5) - let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_update.commitment_signed); // deliver (5) + let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_revoke); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke); check_added_monitors!(nodes[0], 1); let events_2 = nodes[0].node.get_and_clear_pending_events(); @@ -360,7 +376,7 @@ pub fn test_async_inbound_update_fee() { _ => panic!("Unexpected event"), } - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_revoke); // deliver (6) + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_revoke); // deliver (6) check_added_monitors!(nodes[1], 1); } @@ -372,6 +388,10 @@ pub fn test_update_fee_unordered_raa() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); // balancing @@ -394,7 +414,7 @@ pub fn test_update_fee_unordered_raa() { _ => panic!("Unexpected event"), }; - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000); @@ -407,17 +427,17 @@ pub fn test_update_fee_unordered_raa() { assert_eq!(events_1.len(), 1); SendEvent::from_event(events_1.remove(0)) }; - assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id()); + assert_eq!(payment_event.node_id, node_a_id); assert_eq!(payment_event.msgs.len(), 1); // ...now when the messages get delivered everyone should be happy - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); // (2) - let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &payment_event.commitment_msg); // (2) + let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_revoke_msg); // deliver (2) + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_msg); // deliver (2) check_added_monitors!(nodes[1], 1); // We can't continue, sadly, because our (1) now has a bogus signature @@ -429,6 +449,10 @@ pub fn test_multi_flight_update_fee() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); // A B @@ -470,9 +494,9 @@ pub fn test_multi_flight_update_fee() { }; // Deliver first update_fee/commitment_signed pair, generating (1) and (2): - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg_1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed_1); - let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_update_fee(node_a_id, update_msg_1); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed_1); + let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); check_added_monitors!(nodes[1], 1); // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment @@ -491,15 +515,15 @@ pub fn test_multi_flight_update_fee() { feerate_per_kw: (initial_feerate + 30) as u32, }; - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), &update_msg_2); + nodes[1].node.handle_update_fee(node_a_id, &update_msg_2); update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32; // Deliver (3) - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), &update_msg_2); + nodes[1].node.handle_update_fee(node_a_id, &update_msg_2); // Deliver (1), generating (3) and (4) - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_msg); - let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_msg); + let as_second_update = get_htlc_update_msgs!(nodes[0], node_b_id); check_added_monitors!(nodes[0], 1); assert!(as_second_update.update_add_htlcs.is_empty()); assert!(as_second_update.update_fulfill_htlcs.is_empty()); @@ -510,30 +534,30 @@ pub fn test_multi_flight_update_fee() { assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw); // Deliver (2) commitment_signed - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_commitment_signed); - let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment_signed); + let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); check_added_monitors!(nodes[0], 1); // No commitment_signed so get_event_msg's assert(len == 1) passes - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_revoke_msg); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); // Delever (4) - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed); - let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_second_update.commitment_signed); + let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], node_a_id); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_revoke); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_commitment); - let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_commitment); + let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_revoke); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_revoke); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); } @@ -549,6 +573,9 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + if steps & 0b1000_0000 != 0{ let block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new()); connect_block(&nodes[0], &block); @@ -556,41 +583,41 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { } if steps & 0x0f == 0 { return; } - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap(); - let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); + let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); if steps & 0x0f == 1 { return; } - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel); - let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_open_channel(node_a_id, &open_channel); + let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); if steps & 0x0f == 2 { return; } - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); - let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); + let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &node_b_id, 100000, 42); if steps & 0x0f == 3 { return; } - nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); + nodes[0].node.funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()).unwrap(); check_added_monitors!(nodes[0], 0); - let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); let channel_id = ChannelId::v1_from_funding_txid( funding_created.funding_txid.as_byte_array(), funding_created.funding_output_index ); if steps & 0x0f == 4 { return; } - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created); + nodes[1].node.handle_funding_created(node_a_id, &funding_created); { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); if steps & 0x0f == 5 { return; } - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed); + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed); { let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); @@ -598,7 +625,7 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { added_monitors.clear(); } - expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); + expect_channel_pending_event(&nodes[0], &node_b_id); let events_4 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_4.len(), 0); @@ -609,7 +636,7 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { confirm_transaction_at(&nodes[0], &tx, 2); connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH); create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]); - expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id()); + expect_channel_ready_event(&nodes[0], &node_b_id); } #[xtest(feature = "_externalize_tests")] @@ -640,6 +667,10 @@ pub fn test_update_fee_vanilla() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); { @@ -657,22 +688,22 @@ pub fn test_update_fee_vanilla() { }, _ => panic!("Unexpected event"), }; - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); - let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); + let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &revoke_msg); + nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed); - let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); + let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &revoke_msg); + nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); } @@ -683,6 +714,10 @@ pub fn test_update_fee_that_funder_cannot_afford() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_value = 5000; let push_sats = 700; let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000); @@ -705,9 +740,9 @@ pub fn test_update_fee_that_funder_cannot_afford() { } nodes[0].node.timer_tick_occurred(); check_added_monitors!(nodes[0], 1); - let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let update_msg = get_htlc_update_msgs!(nodes[0], node_b_id); - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap()); + nodes[1].node.handle_update_fee(node_a_id, &update_msg.update_fee.unwrap()); commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false); @@ -736,7 +771,7 @@ pub fn test_update_fee_that_funder_cannot_afford() { let remote_point = { let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); + let chan_lock = per_peer_state.get(&node_a_id).unwrap().lock().unwrap(); let remote_chan = chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); let chan_signer = remote_chan.get_signer(); chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx).unwrap() @@ -744,7 +779,7 @@ pub fn test_update_fee_that_funder_cannot_afford() { let res = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let local_chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); let local_chan = local_chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); let local_chan_signer = local_chan.get_signer(); let nondust_htlcs: Vec = vec![]; @@ -778,17 +813,17 @@ pub fn test_update_fee_that_funder_cannot_afford() { feerate_per_kw: non_buffer_feerate + 4, }; - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), &update_fee); + nodes[1].node.handle_update_fee(node_a_id, &update_fee); //While producing the commitment_signed response after handling a received update_fee request the //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve) //Should produce and error. - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &commit_signed_msg); + nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") }, - [nodes[0].node.get_our_node_id()], channel_value); + [node_a_id], channel_value); } #[xtest(feature = "_externalize_tests")] @@ -805,6 +840,9 @@ pub fn test_update_fee_that_saturates_subs() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(default_config.clone()), Some(default_config)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan_id = create_chan_between_nodes_with_value(&nodes[0], &nodes[1], 10_000, 8_500_000).3; const FEERATE: u32 = 250 * 10; // 10sat/vb @@ -821,7 +859,7 @@ pub fn test_update_fee_that_saturates_subs() { let remote_point = { let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); + let chan_lock = per_peer_state.get(&node_a_id).unwrap().lock().unwrap(); let remote_chan = chan_lock.channel_by_id.get(&chan_id).and_then(Channel::as_funded).unwrap(); let chan_signer = remote_chan.get_signer(); chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, &secp_ctx).unwrap() @@ -829,7 +867,7 @@ pub fn test_update_fee_that_saturates_subs() { let res = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let local_chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); let local_chan = local_chan_lock.channel_by_id.get(&chan_id).and_then(Channel::as_funded).unwrap(); let local_chan_signer = local_chan.get_signer(); let nondust_htlcs: Vec = vec![]; @@ -865,14 +903,14 @@ pub fn test_update_fee_that_saturates_subs() { feerate_per_kw: FEERATE, }; - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), &update_fee); + nodes[1].node.handle_update_fee(node_a_id, &update_fee); - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &commit_signed_msg); + nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") }, - [nodes[0].node.get_our_node_id()], 10_000); + [node_a_id], 10_000); } #[xtest(feature = "_externalize_tests")] @@ -881,6 +919,10 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); // balancing @@ -901,9 +943,9 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { }, _ => panic!("Unexpected event"), }; - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); - let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); + let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); check_added_monitors!(nodes[1], 1); let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 800000); @@ -920,40 +962,40 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // node[1] has nothing to do - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &revoke_msg); + nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed); - let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); + let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &revoke_msg); + nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg); check_added_monitors!(nodes[1], 1); // AwaitingRemoteRevoke ends here - let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let commitment_update = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(commitment_update.update_add_htlcs.len(), 1); assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0); assert_eq!(commitment_update.update_fail_htlcs.len(), 0); assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0); assert_eq!(commitment_update.update_fee.is_none(), true); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed); + nodes[0].node.handle_update_add_htlc(node_b_id, &commitment_update.update_add_htlcs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_update.commitment_signed); check_added_monitors!(nodes[0], 1); - let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], node_b_id); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &revoke); + nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_signed); check_added_monitors!(nodes[1], 1); - let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &revoke); + nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -971,8 +1013,8 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000); send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [node_b_id], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -981,6 +1023,10 @@ pub fn test_update_fee() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let channel_id = chan.2; @@ -1016,15 +1062,15 @@ pub fn test_update_fee() { }, _ => panic!("Unexpected event"), }; - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); // Generate (2) and (3): - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); - let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); + let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], node_a_id); check_added_monitors!(nodes[1], 1); // Deliver (2): - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &revoke_msg); + nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); @@ -1044,27 +1090,27 @@ pub fn test_update_fee() { _ => panic!("Unexpected event"), }; - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); check_added_monitors!(nodes[1], 1); // ... creating (5) - let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes // Handle (3), creating (6): - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed_0); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed_0); check_added_monitors!(nodes[0], 1); - let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes // Deliver (5): - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &revoke_msg); + nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); // Deliver (6), creating (7): - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &revoke_msg_0); - let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg_0); + let commitment_update = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(commitment_update.update_add_htlcs.is_empty()); assert!(commitment_update.update_fulfill_htlcs.is_empty()); assert!(commitment_update.update_fail_htlcs.is_empty()); @@ -1073,20 +1119,20 @@ pub fn test_update_fee() { check_added_monitors!(nodes[1], 1); // Deliver (7) - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_update.commitment_signed); check_added_monitors!(nodes[0], 1); - let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &revoke_msg); + nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30); assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [node_b_id], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1098,6 +1144,11 @@ pub fn fake_network_test() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); @@ -1132,7 +1183,7 @@ pub fn fake_network_test() { // Do some rebalance loop payments, simultaneously let mut hops = Vec::with_capacity(3); hops.push(RouteHop { - pubkey: nodes[2].node.get_our_node_id(), + pubkey: node_c_id, node_features: NodeFeatures::empty(), short_channel_id: chan_2.0.contents.short_channel_id, channel_features: ChannelFeatures::empty(), @@ -1141,7 +1192,7 @@ pub fn fake_network_test() { maybe_announced_channel: true, }); hops.push(RouteHop { - pubkey: nodes[3].node.get_our_node_id(), + pubkey: node_d_id, node_features: NodeFeatures::empty(), short_channel_id: chan_3.0.contents.short_channel_id, channel_features: ChannelFeatures::empty(), @@ -1150,7 +1201,7 @@ pub fn fake_network_test() { maybe_announced_channel: true, }); hops.push(RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chan_4.0.contents.short_channel_id, channel_features: nodes[1].node.channel_features(), @@ -1161,7 +1212,7 @@ pub fn fake_network_test() { hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000; hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000; let payment_params = PaymentParameters::from_node_id( - nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV + node_b_id, TEST_FINAL_CLTV ).with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, 1000000); let payment_preimage_1 = send_along_route(&nodes[1], @@ -1170,7 +1221,7 @@ pub fn fake_network_test() { let mut hops = Vec::with_capacity(3); hops.push(RouteHop { - pubkey: nodes[3].node.get_our_node_id(), + pubkey: node_d_id, node_features: NodeFeatures::empty(), short_channel_id: chan_4.0.contents.short_channel_id, channel_features: ChannelFeatures::empty(), @@ -1179,7 +1230,7 @@ pub fn fake_network_test() { maybe_announced_channel: true, }); hops.push(RouteHop { - pubkey: nodes[2].node.get_our_node_id(), + pubkey: node_c_id, node_features: NodeFeatures::empty(), short_channel_id: chan_3.0.contents.short_channel_id, channel_features: ChannelFeatures::empty(), @@ -1188,7 +1239,7 @@ pub fn fake_network_test() { maybe_announced_channel: true, }); hops.push(RouteHop { - pubkey: nodes[1].node.get_our_node_id(), + pubkey: node_b_id, node_features: nodes[1].node.node_features(), short_channel_id: chan_2.0.contents.short_channel_id, channel_features: nodes[1].node.channel_features(), @@ -1208,17 +1259,17 @@ pub fn fake_network_test() { // Close down the channels... close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [node_b_id], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [node_a_id], 100000); close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000); - check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [node_c_id], 100000); + check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [node_b_id], 100000); close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true); - check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000); - check_closed_event!(nodes[3], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000); + check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [node_d_id], 100000); + check_closed_event!(nodes[3], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [node_c_id], 100000); close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000); - check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [node_d_id], 100000); + check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1230,6 +1281,11 @@ pub fn holding_cell_htlc_counting() { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); @@ -1248,7 +1304,7 @@ pub fn holding_cell_htlc_counting() { let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let initial_payment_event = SendEvent::from_event(events.pop().unwrap()); - assert_eq!(initial_payment_event.node_id, nodes[2].node.get_our_node_id()); + assert_eq!(initial_payment_event.node_id, node_c_id); // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in // the holding cell waiting on B's RAA to send. At this point we should not be able to add @@ -1271,52 +1327,52 @@ pub fn holding_cell_htlc_counting() { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + assert_eq!(payment_event.node_id, node_b_id); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); // We have to forward pending HTLCs twice - once tries to forward the payment forward (and // fails), the second will process the resulting failure and fail the HTLC backward. expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); - let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]); + let bs_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_fail_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true); expect_payment_failed_with_update!(nodes[0], payment_hash_2, false, chan_2.0.contents.short_channel_id, false); // Now forward all the pending HTLCs and claim them back - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]); - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg); + nodes[2].node.handle_update_add_htlc(node_b_id, &initial_payment_event.msgs[0]); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &initial_payment_event.commitment_msg); check_added_monitors!(nodes[2], 1); - let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); + let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], node_b_id); + nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); check_added_monitors!(nodes[1], 1); - let as_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + let as_updates = get_htlc_update_msgs!(nodes[1], node_c_id); - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &bs_commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_commitment_signed); check_added_monitors!(nodes[1], 1); - let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); for ref update in as_updates.update_add_htlcs.iter() { - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), update); + nodes[2].node.handle_update_add_htlc(node_b_id, update); } - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &as_updates.commitment_signed); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &as_updates.commitment_signed); check_added_monitors!(nodes[2], 1); - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_raa); + nodes[2].node.handle_revoke_and_ack(node_b_id, &as_raa); check_added_monitors!(nodes[2], 1); - let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], node_b_id); - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); + nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &bs_commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_commitment_signed); check_added_monitors!(nodes[1], 1); - let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_final_raa); + nodes[2].node.handle_revoke_and_ack(node_b_id, &as_final_raa); check_added_monitors!(nodes[2], 1); expect_pending_htlcs_forwardable!(nodes[2]); @@ -1378,6 +1434,8 @@ pub fn test_duplicate_htlc_different_direction_onchain() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); // post-bump fee (288 satoshis) + dust threshold for output type (294 satoshis) = 582 @@ -1414,7 +1472,7 @@ pub fn test_duplicate_htlc_different_direction_onchain() { mine_transaction(&nodes[0], &remote_txn[0]); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); @@ -1445,7 +1503,7 @@ pub fn test_duplicate_htlc_different_direction_onchain() { match e { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::DisconnectPeer { ref msg } } => { - assert_eq!(node_id, nodes[1].node.get_our_node_id()); + assert_eq!(node_id, node_b_id); assert_eq!(msg.as_ref().unwrap().data, "Channel closed because commitment or closing transaction was confirmed on chain."); }, MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { @@ -1453,7 +1511,7 @@ pub fn test_duplicate_htlc_different_direction_onchain() { assert!(update_fail_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_malformed_htlcs.is_empty()); - assert_eq!(nodes[1].node.get_our_node_id(), *node_id); + assert_eq!(node_b_id, *node_id); }, _ => panic!("Unexpected event"), } @@ -1466,6 +1524,7 @@ pub fn test_basic_channel_reserve() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); @@ -1491,6 +1550,10 @@ pub fn test_fee_spike_violation_fails_htlc() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); let (mut route, payment_hash, _, payment_secret) = @@ -1518,7 +1581,7 @@ pub fn test_fee_spike_violation_fails_htlc() { blinding_point: None, }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &msg); // Now manually create the commitment_signed message corresponding to the update_add // nodes[0] just sent. In the code for construction of this message, "local" refers @@ -1530,7 +1593,7 @@ pub fn test_fee_spike_violation_fails_htlc() { let (local_secret, next_local_point) = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); let local_chan = chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); let chan_signer = local_chan.get_signer(); // Make the signer believe we validated another commitment, so we can release the secret @@ -1541,7 +1604,7 @@ pub fn test_fee_spike_violation_fails_htlc() { }; let remote_point = { let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); + let chan_lock = per_peer_state.get(&node_a_id).unwrap().lock().unwrap(); let remote_chan = chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); let chan_signer = remote_chan.get_signer(); chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx).unwrap() @@ -1563,7 +1626,7 @@ pub fn test_fee_spike_violation_fails_htlc() { let res = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let local_chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); let local_chan = local_chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); let local_chan_signer = local_chan.get_signer(); let commitment_tx = CommitmentTransaction::new( @@ -1592,7 +1655,7 @@ pub fn test_fee_spike_violation_fails_htlc() { }; // Send the commitment_signed message to the nodes[1]. - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &commit_signed_msg); + nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); let _ = nodes[1].node.get_and_clear_pending_msg_events(); // Send the RAA to nodes[1]. @@ -1603,7 +1666,7 @@ pub fn test_fee_spike_violation_fails_htlc() { #[cfg(taproot)] next_local_nonce: None, }; - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa_msg); + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); @@ -1634,6 +1697,7 @@ pub fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let default_config = UserConfig::default(); let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); @@ -1665,6 +1729,9 @@ pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + let default_config = UserConfig::default(); let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); @@ -1704,7 +1771,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { blinding_point: None, }; - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &msg); + nodes[0].node.handle_update_add_htlc(node_b_id, &msg); // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value", 3); assert_eq!(nodes[0].node.list_channels().len(), 0); @@ -1712,7 +1779,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value"); check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() }, - [nodes[1].node.get_our_node_id()], 100000); + [node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1725,6 +1792,7 @@ pub fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let default_config = UserConfig::default(); let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); @@ -1766,6 +1834,10 @@ pub fn test_chan_init_feerate_unaffordability() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let default_config = UserConfig::default(); let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); @@ -1773,16 +1845,16 @@ pub fn test_chan_init_feerate_unaffordability() { // HTLC. let mut push_amt = 100_000_000; push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features); - assert_eq!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt + 1, 42, None, None).unwrap_err(), + assert_eq!(nodes[0].node.create_channel(node_b_id, 100_000, push_amt + 1, 42, None, None).unwrap_err(), APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() }); // During open, we don't have a "counterparty channel reserve" to check against, so that // requirement only comes into play on the open_channel handling side. push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None, None).unwrap(); - let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + nodes[0].node.create_channel(node_b_id, 100_000, push_amt, 42, None, None).unwrap(); + let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); open_channel_msg.push_msat += 1; - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg); + nodes[1].node.handle_open_channel(node_a_id, &open_channel_msg); let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); @@ -1802,6 +1874,7 @@ pub fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000); let payment_amt = 46000; // Dust amount @@ -1830,6 +1903,9 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); let _ = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000); @@ -1855,7 +1931,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event_1.msgs[0]); // Attempt to trigger a channel reserve violation --> payment failure. let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, &channel_type_features); @@ -1884,7 +1960,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { blinding_point: None, }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &msg); // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value", 3); assert_eq!(nodes[1].node.list_channels().len(), 1); @@ -1892,7 +1968,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() }, - [nodes[0].node.get_our_node_id()], 100000); + [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1901,6 +1977,7 @@ pub fn test_inbound_outbound_capacity_is_not_zero() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); let channels0 = node_chanmgrs[0].list_channels(); let channels1 = node_chanmgrs[1].list_channels(); @@ -1930,6 +2007,11 @@ pub fn test_channel_reserve_holding_cell_htlcs() { config.channel_config.forwarding_fee_base_msat = 239; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001); let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001); let chan_2_user_id = nodes[2].node.list_channels()[0].user_channel_id; @@ -1959,7 +2041,7 @@ pub fn test_channel_reserve_holding_cell_htlcs() { // attempt to send amt_msat > their_max_htlc_value_in_flight_msat { - let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0); let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0); route.paths[0].hops.last_mut().unwrap().fee_msat += 1; @@ -1984,7 +2066,7 @@ pub fn test_channel_reserve_holding_cell_htlcs() { break; } - let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0); let route = get_route!(nodes[0], payment_params, recv_value_0).unwrap(); let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0); @@ -2029,7 +2111,7 @@ pub fn test_channel_reserve_holding_cell_htlcs() { assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event_1.msgs[0]); // channel reserve test with htlc pending output > 0 let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs; @@ -2082,28 +2164,28 @@ pub fn test_channel_reserve_holding_cell_htlcs() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // flush the pending htlc - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg); - let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event_1.commitment_msg); + let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); check_added_monitors!(nodes[1], 1); // the pending htlc should be promoted to committed - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_revoke_and_ack); + nodes[0].node.handle_revoke_and_ack(node_b_id, &as_revoke_and_ack); check_added_monitors!(nodes[0], 1); - let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let commitment_update_2 = get_htlc_update_msgs!(nodes[0], node_b_id); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &as_commitment_signed); - let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &as_commitment_signed); + let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &bs_revoke_and_ack); + nodes[1].node.handle_revoke_and_ack(node_a_id, &bs_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); let ref payment_event_11 = expect_forward!(nodes[1]); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event_11.msgs[0]); commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[2]); @@ -2111,15 +2193,15 @@ pub fn test_channel_reserve_holding_cell_htlcs() { // flush the htlcs in the holding cell assert_eq!(commitment_update_2.update_add_htlcs.len(), 2); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]); + nodes[1].node.handle_update_add_htlc(node_a_id, &commitment_update_2.update_add_htlcs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &commitment_update_2.update_add_htlcs[1]); commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false); expect_pending_htlcs_forwardable!(nodes[1]); let ref payment_event_3 = expect_forward!(nodes[1]); assert_eq!(payment_event_3.msgs.len(), 2); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event_3.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event_3.msgs[1]); commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[2]); @@ -2130,7 +2212,7 @@ pub fn test_channel_reserve_holding_cell_htlcs() { Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { assert_eq!(our_payment_hash_21, *payment_hash); assert_eq!(recv_value_21, amount_msat); - assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap()); + assert_eq!(node_c_id, receiver_node_id.unwrap()); assert_eq!(*via_channel_ids, vec![(chan_2.2, Some(chan_2_user_id))]); match &purpose { PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { @@ -2146,7 +2228,7 @@ pub fn test_channel_reserve_holding_cell_htlcs() { Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { assert_eq!(our_payment_hash_22, *payment_hash); assert_eq!(recv_value_22, amount_msat); - assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap()); + assert_eq!(node_c_id, receiver_node_id.unwrap()); assert_eq!(*via_channel_ids, vec![(chan_2.2, Some(chan_2_user_id))]); match &purpose { PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { @@ -2205,6 +2287,10 @@ pub fn channel_reserve_in_flight_removes() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2); @@ -2229,7 +2315,7 @@ pub fn channel_reserve_in_flight_removes() { nodes[1].node.claim_funds(payment_preimage_1); expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1); check_added_monitors!(nodes[1], 1); - let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_removes = get_htlc_update_msgs!(nodes[1], node_a_id); // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not // remove the second HTLC when we send the HTLC back from B to A. @@ -2238,42 +2324,42 @@ pub fn channel_reserve_in_flight_removes() { check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_removes.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_removes.commitment_signed); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_1.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &send_1.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_1.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_1.commitment_msg); check_added_monitors!(nodes[1], 1); // B is already AwaitingRAA, so cant generate a CS here - let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); - let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_cs = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors!(nodes[0], 1); - let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let as_cs = get_htlc_update_msgs!(nodes[0], node_b_id); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_cs.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_cs.commitment_signed); check_added_monitors!(nodes[1], 1); - let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view. // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A // can no longer broadcast a commitment transaction with it and B has the preimage so can go // on-chain as necessary). - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_cs.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs.commitment_signed); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2282,10 +2368,10 @@ pub fn channel_reserve_in_flight_removes() { // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't // resolve the second HTLC from A's point of view. - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors!(nodes[0], 1); expect_payment_path_successful!(nodes[0]); - let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let as_cs = get_htlc_update_msgs!(nodes[0], node_b_id); // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing. @@ -2299,30 +2385,30 @@ pub fn channel_reserve_in_flight_removes() { SendEvent::from_event(events.remove(0)) }; - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &send_2.msgs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &send_2.commitment_msg); + nodes[0].node.handle_update_add_htlc(node_b_id, &send_2.msgs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &send_2.commitment_msg); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // Now just resolve all the outstanding messages/HTLCs for completeness... - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_cs.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_cs.commitment_signed); check_added_monitors!(nodes[1], 1); - let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors!(nodes[0], 1); expect_payment_path_successful!(nodes[0]); - let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let as_cs = get_htlc_update_msgs!(nodes[0], node_b_id); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_cs.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_cs.commitment_signed); check_added_monitors!(nodes[1], 1); - let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors!(nodes[0], 1); expect_pending_htlcs_forwardable!(nodes[0]); @@ -2354,10 +2440,13 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + for node in nodes.iter() { *node.fee_estimator.sat_per_kw.lock().unwrap() = 2000; } + + let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); let node_c_id = nodes[2].node.get_our_node_id(); @@ -2392,14 +2481,14 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac // Check that nodes[1] fails the HTLC upstream expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { - node_id: Some(nodes[2].node.get_our_node_id()), + node_id: Some(node_c_id), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); - let htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + let htlc_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates; - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new().blamed_chan_closed(true)); @@ -2414,7 +2503,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac // Expect handling another fail back event, but the HTLC is already gone expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { - node_id: Some(nodes[2].node.get_our_node_id()), + node_id: Some(node_c_id), channel_id: chan_2.2 }]); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2423,7 +2512,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac nodes[2].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); check_added_monitors!(nodes[2], 1); - get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + get_htlc_update_msgs(&nodes[2], &node_b_id); connect_blocks(&nodes[2], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2); let node_2_txn = test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::SUCCESS); @@ -2441,11 +2530,11 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash }]); check_added_monitors!(nodes[2], 1); - let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + let commitment_update = get_htlc_update_msgs(&nodes[2], &node_b_id); let update_fail = commitment_update.update_fail_htlcs[0].clone(); - nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &update_fail); - let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id()); + nodes[1].node.handle_update_fail_htlc(node_c_id, &update_fail); + let err_msg = get_err_msg(&nodes[1], &node_c_id); assert_eq!(err_msg.channel_id, chan_2.2); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); }, @@ -2453,11 +2542,11 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac nodes[2].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); check_added_monitors!(nodes[2], 1); - let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + let commitment_update = get_htlc_update_msgs(&nodes[2], &node_b_id); let update_fulfill = commitment_update.update_fulfill_htlcs[0].clone(); - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &update_fulfill); - let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id()); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &update_fulfill); + let err_msg = get_err_msg(&nodes[1], &node_c_id); assert_eq!(err_msg.channel_id, chan_2.2); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); }, @@ -2473,6 +2562,12 @@ pub fn channel_monitor_network_test() { let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, None, None, None, None]); let nodes = create_network(5, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + let node_e_id = nodes[4].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); @@ -2494,10 +2589,10 @@ pub fn channel_monitor_network_test() { // Simple case with no pending HTLCs: let error_message = "Channel force-closed"; - nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &node_a_id, error_message.to_string()).unwrap(); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [node_a_id], 100000); { let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE); assert_eq!(node_txn.len(), 1); @@ -2513,7 +2608,7 @@ pub fn channel_monitor_network_test() { check_closed_broadcast!(nodes[0], true); assert_eq!(nodes[0].node.list_channels().len(), 0); assert_eq!(nodes[1].node.list_channels().len(), 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); // One pending HTLC is discarded by the force-close: let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000); @@ -2521,7 +2616,7 @@ pub fn channel_monitor_network_test() { // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not // broadcasted until we reach the timelock time). let error_message = "Channel force-closed"; - nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &node_c_id, error_message.to_string()).unwrap(); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); { @@ -2535,8 +2630,8 @@ pub fn channel_monitor_network_test() { check_closed_broadcast!(nodes[2], true); assert_eq!(nodes[1].node.list_channels().len(), 0); assert_eq!(nodes[2].node.list_channels().len(), 1); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[2].node.get_our_node_id()], 100000); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [node_c_id], 100000); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); macro_rules! claim_funds { ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => { @@ -2562,7 +2657,7 @@ pub fn channel_monitor_network_test() { // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2] // HTLC-Timeout and a nodes[3] claim against it (+ its own announces) let error_message = "Channel force-closed"; - nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &node_d_id, error_message.to_string()).unwrap(); check_added_monitors!(nodes[2], 1); check_closed_broadcast!(nodes[2], true); let node2_commitment_txid; @@ -2581,8 +2676,8 @@ pub fn channel_monitor_network_test() { check_closed_broadcast!(nodes[3], true); assert_eq!(nodes[2].node.list_channels().len(), 0); assert_eq!(nodes[3].node.list_channels().len(), 1); - check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[3].node.get_our_node_id()], 100000); - check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); + check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [node_d_id], 100000); + check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and // confusing us in the following tests. @@ -2605,7 +2700,7 @@ pub fn channel_monitor_network_test() { }; match events[0] { MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => { - assert_eq!(node_id, nodes[4].node.get_our_node_id()); + assert_eq!(node_id, node_e_id); }, _ => panic!("Unexpected event"), } @@ -2637,20 +2732,20 @@ pub fn channel_monitor_network_test() { }; match events[0] { MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => { - assert_eq!(node_id, nodes[3].node.get_our_node_id()); + assert_eq!(node_id, node_d_id); }, _ => panic!("Unexpected event"), } check_added_monitors!(nodes[4], 1); test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS); - check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [nodes[3].node.get_our_node_id()], 100000); + check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [node_d_id], 100000); mine_transaction(&nodes[4], &node_txn[0]); check_preimage_claim(&nodes[4], &node_txn); (close_chan_update_1, close_chan_update_2) }; - let node_id_4 = nodes[4].node.get_our_node_id(); - let node_id_3 = nodes[3].node.get_our_node_id(); + let node_id_4 = node_e_id; + let node_id_3 = node_d_id; nodes[3].gossip_sync.handle_channel_update(Some(node_id_4), &close_chan_update_2).unwrap(); nodes[4].gossip_sync.handle_channel_update(Some(node_id_3), &close_chan_update_1).unwrap(); assert_eq!(nodes[3].node.list_channels().len(), 0); @@ -2676,9 +2771,14 @@ pub fn test_justice_tx_htlc_timeout() { let mut chanmon_cfgs = create_chanmon_cfgs(2); chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true; chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true; + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some new channels: let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -2709,7 +2809,7 @@ pub fn test_justice_tx_htlc_timeout() { node_txn.clear(); } check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE); mine_transaction(&nodes[0], &revoked_local_txn[0]); @@ -2717,7 +2817,7 @@ pub fn test_justice_tx_htlc_timeout() { // Verify broadcast of revoked HTLC-timeout let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); // Broadcast revoked HTLC-timeout on node 1 mine_transaction(&nodes[1], &node_txn[1]); test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone()); @@ -2742,9 +2842,14 @@ pub fn test_justice_tx_htlc_success() { let mut chanmon_cfgs = create_chanmon_cfgs(2); chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true; chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true; + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some new channels: let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -2772,11 +2877,11 @@ pub fn test_justice_tx_htlc_success() { test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE); mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS); check_added_monitors!(nodes[1], 1); mine_transaction(&nodes[0], &node_txn[1]); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone()); } get_announce_close_broadcast_events(&nodes, 0, 1); @@ -2792,6 +2897,10 @@ pub fn revoked_output_claim() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2); @@ -2804,7 +2913,7 @@ pub fn revoked_output_claim() { // Inform nodes[1] that nodes[0] broadcast a stale tx mine_transaction(&nodes[1], &revoked_local_txn[0]); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output @@ -2814,7 +2923,7 @@ pub fn revoked_output_claim() { mine_transaction(&nodes[0], &revoked_local_txn[0]); get_announce_close_broadcast_events(&nodes, 0, 1); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -2836,6 +2945,10 @@ fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: let node_cfgs = create_node_cfgs_with_persisters(2, &chanmon_cfgs, persisters.iter().collect()); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); if !broadcast_initial_commitment { @@ -2860,12 +2973,12 @@ fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: check_added_monitors!(nodes[1], 1); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, - &[nodes[0].node.get_our_node_id()], 100_000); + &[node_a_id], 100_000); get_announce_close_broadcast_events(&nodes, 1, 0); check_added_monitors!(nodes[0], 1); check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false, - &[nodes[1].node.get_our_node_id()], 100_000); + &[node_b_id], 100_000); // Check that the justice tx has sent the revoked output value to nodes[1] let monitor = get_monitor!(nodes[1], channel_id); @@ -2891,6 +3004,9 @@ pub fn claim_htlc_outputs() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some new channel: let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -2916,10 +3032,10 @@ pub fn claim_htlc_outputs() { { mine_transaction(&nodes[0], &revoked_local_txn[0]); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); mine_transaction(&nodes[1], &revoked_local_txn[0]); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); @@ -2972,6 +3088,10 @@ pub fn test_multiple_package_conflicts() { create_node_chanmgrs(3, &node_cfgs, &[Some(user_cfg.clone()), Some(user_cfg.clone()), Some(user_cfg)]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Since we're using anchor channels, make sure each node has a UTXO for paying fees. let coinbase_tx = Transaction { version: Version::TWO, @@ -3054,7 +3174,7 @@ pub fn test_multiple_package_conflicts() { 1, ClosureReason::CommitmentTxConfirmed, false, - &[nodes[2].node.get_our_node_id()], + &[node_c_id], CHAN_CAPACITY, ); check_closed_broadcast!(nodes[1], true); @@ -3101,7 +3221,7 @@ pub fn test_multiple_package_conflicts() { 1, ClosureReason::CommitmentTxConfirmed, false, - &[nodes[1].node.get_our_node_id()], + &[node_b_id], CHAN_CAPACITY, ); check_closed_broadcast!(nodes[2], true); @@ -3146,21 +3266,21 @@ pub fn test_multiple_package_conflicts() { // // Because two update_fulfill_htlc messages are created at once, the commitment_signed_dance // macro doesn't work properly and we must process the first update_fulfill_htlc manually. - let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc( - nodes[1].node.get_our_node_id(), + node_b_id, &updates.update_fulfill_htlcs[0], ); nodes[0] .node - .handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &updates.commitment_signed); + .handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); check_added_monitors(&nodes[0], 1); let (revoke_ack, commit_signed) = - get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &revoke_ack); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &commit_signed); + get_revoke_commit_msgs(&nodes[0], &node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_ack); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commit_signed); check_added_monitors(&nodes[1], 4); let events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -3169,7 +3289,7 @@ pub fn test_multiple_package_conflicts() { MessageSendEvent::SendRevokeAndACK { node_id: _, msg } => msg, _ => panic!("Unexpected event"), }; - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), revoke_ack); + nodes[0].node.handle_revoke_and_ack(node_b_id, revoke_ack); expect_payment_sent!(nodes[0], preimage_1); let updates = match &events[0] { @@ -3178,7 +3298,7 @@ pub fn test_multiple_package_conflicts() { }; assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc( - nodes[1].node.get_our_node_id(), + node_b_id, &updates.update_fulfill_htlcs[0], ); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); @@ -3231,6 +3351,10 @@ pub fn test_htlc_on_chain_success() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); @@ -3258,7 +3382,7 @@ pub fn test_htlc_on_chain_success() { nodes[2].node.claim_funds(our_payment_preimage_2); expect_payment_claimed!(nodes[2], payment_hash_2, 3_000_000); check_added_monitors!(nodes[2], 2); - let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); @@ -3267,7 +3391,7 @@ pub fn test_htlc_on_chain_success() { mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx) assert_eq!(node_txn.len(), 2); check_spends!(node_txn[0], commitment_tx[0]); @@ -3329,8 +3453,8 @@ pub fn test_htlc_on_chain_success() { } assert_eq!(events.len(), 3); - let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); - let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events); + let nodes_2_event = remove_first_msg_event_to_node(&node_c_id, &mut events); + let nodes_0_event = remove_first_msg_event_to_node(&node_a_id, &mut events); match nodes_2_event { MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {}, @@ -3343,7 +3467,7 @@ pub fn test_htlc_on_chain_success() { assert!(update_fail_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_malformed_htlcs.is_empty()); - assert_eq!(nodes[0].node.get_our_node_id(), *node_id); + assert_eq!(node_a_id, *node_id); }, _ => panic!("Unexpected event"), }; @@ -3389,7 +3513,7 @@ pub fn test_htlc_on_chain_success() { mine_transaction(&nodes[1], &node_a_commitment_tx[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert!(node_txn.len() == 1 || node_txn.len() == 2); // HTLC-Success, RBF bump of above aggregated HTLC txn let commitment_spend = @@ -3457,6 +3581,11 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + *nodes[0].connect_style.borrow_mut() = connect_style; *nodes[1].connect_style.borrow_mut() = connect_style; *nodes[2].connect_style.borrow_mut() = connect_style; @@ -3487,14 +3616,14 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { assert!(!update_fail_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert!(update_fail_malformed_htlcs.is_empty()); - assert_eq!(nodes[1].node.get_our_node_id(), *node_id); + assert_eq!(node_b_id, *node_id); }, _ => panic!("Unexpected event"), }; mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 0); @@ -3502,7 +3631,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence mine_transaction(&nodes[1], &commitment_tx[0]); check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, - &[nodes[2].node.get_our_node_id()], 100000); + &[node_c_id], 100000); let htlc_expiry = get_monitor!(nodes[1], chan_2.2).get_claimable_balances().iter().filter_map(|bal| if let Balance::MaybeTimeoutClaimableHTLC { claimable_height, .. } = bal { Some(*claimable_height) @@ -3531,7 +3660,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -3541,7 +3670,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { assert!(!update_fail_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert!(update_fail_malformed_htlcs.is_empty()); - assert_eq!(nodes[0].node.get_our_node_id(), *node_id); + assert_eq!(node_a_id, *node_id); }, _ => panic!("Unexpected event"), }; @@ -3555,7 +3684,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], commitment_tx[0]); @@ -3579,6 +3708,10 @@ pub fn test_simple_commitment_revoked_fail_backward() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Create some initial channels create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); @@ -3592,12 +3725,12 @@ pub fn test_simple_commitment_revoked_fail_backward() { let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -3607,9 +3740,9 @@ pub fn test_simple_commitment_revoked_fail_backward() { assert_eq!(update_fail_htlcs.len(), 1); assert!(update_fulfill_htlcs.is_empty()); assert!(update_fail_malformed_htlcs.is_empty()); - assert_eq!(nodes[0].node.get_our_node_id(), *node_id); + assert_eq!(node_a_id, *node_id); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true); expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_2.0.contents.short_channel_id, true); }, @@ -3638,6 +3771,10 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Create some initial channels create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); @@ -3652,7 +3789,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let value = if use_dust { // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as // well, so HTLCs at exactly the dust limit will not be included in commitment txn. - nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id()) + nodes[2].node.per_peer_state.read().unwrap().get(&node_b_id) .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context().holder_dust_limit_satoshis * 1000 } else { 3000000 }; @@ -3663,50 +3800,50 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use nodes[2].node.fail_htlc_backwards(&first_payment_hash); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: first_payment_hash }]); check_added_monitors!(nodes[2], 1); - let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fee.is_none()); - nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[1].node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[0]); let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true); // Drop the last RAA from 3 -> 2 nodes[2].node.fail_htlc_backwards(&second_payment_hash); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: second_payment_hash }]); check_added_monitors!(nodes[2], 1); - let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fee.is_none()); - nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &updates.commitment_signed); + nodes[1].node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &updates.commitment_signed); check_added_monitors!(nodes[1], 1); // Note that nodes[1] is in AwaitingRAA, so won't send a CS - let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_raa); + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); + nodes[2].node.handle_revoke_and_ack(node_b_id, &as_raa); check_added_monitors!(nodes[2], 1); nodes[2].node.fail_htlc_backwards(&third_payment_hash); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: third_payment_hash }]); check_added_monitors!(nodes[2], 1); - let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fee.is_none()); - nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[1].node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[0]); // At this point first_payment_hash has dropped out of the latest two commitment // transactions that nodes[1] is tracking... - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &updates.commitment_signed); check_added_monitors!(nodes[1], 1); // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS - let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_raa); + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); + nodes[2].node.handle_revoke_and_ack(node_b_id, &as_raa); check_added_monitors!(nodes[2], 1); // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting @@ -3719,7 +3856,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use check_added_monitors!(nodes[1], 0); if deliver_bs_raa { - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &bs_raa); + nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_raa); // One monitor for the new revocation preimage, no second on as we won't generate a new // commitment transaction for nodes[0] until process_pending_htlc_forwards(). check_added_monitors!(nodes[1], 1); @@ -3766,10 +3903,10 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 }); if deliver_bs_raa { - let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); + let nodes_2_event = remove_first_msg_event_to_node(&node_c_id, &mut events); match nodes_2_event { MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { - assert_eq!(nodes[2].node.get_our_node_id(), *node_id); + assert_eq!(node_c_id, *node_id); assert_eq!(update_add_htlcs.len(), 1); assert!(update_fulfill_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); @@ -3779,7 +3916,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use } } - let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); + let nodes_2_event = remove_first_msg_event_to_node(&node_c_id, &mut events); match nodes_2_event { MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { channel_id, ref data }) }, node_id: _ } => { assert_eq!(channel_id, chan_2.2); @@ -3788,18 +3925,18 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use _ => panic!("Unexpected event"), } - let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events); + let nodes_0_event = remove_first_msg_event_to_node(&node_a_id, &mut events); match nodes_0_event { MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 3); assert!(update_fulfill_htlcs.is_empty()); assert!(update_fail_malformed_htlcs.is_empty()); - assert_eq!(nodes[0].node.get_our_node_id(), *node_id); + assert_eq!(node_a_id, *node_id); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[1]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[2]); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true); @@ -3883,6 +4020,9 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); // Alice -> Bob: Route a payment but without Bob sending revoke_and_ack. @@ -3897,7 +4037,7 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + assert_eq!(payment_event.node_id, node_b_id); assert_eq!(payment_event.msgs.len(), 1); } @@ -3935,7 +4075,7 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { skimmed_fee_msat: None, blinding_point: None, }; - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &update_add_htlc); + nodes[0].node.handle_update_add_htlc(node_b_id, &update_add_htlc); } let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 3); @@ -3970,6 +4110,10 @@ pub fn test_htlc_ignore_latest_remote_commitment() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + if *nodes[1].connect_style.borrow() == ConnectStyle::FullBlockViaListen { // We rely on the ability to connect a block redundantly, which isn't allowed via // `chain::Listen`, so we never run the test if we randomly get assigned that @@ -3979,11 +4123,11 @@ pub fn test_htlc_ignore_latest_remote_commitment() { let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3; let error_message = "Channel force-closed"; route_payment(&nodes[0], &[&nodes[1]], 10000000); - nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &node_b_id, error_message.to_string()).unwrap(); connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [node_b_id], 100000); let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); assert_eq!(node_txn.len(), 2); @@ -3994,7 +4138,7 @@ pub fn test_htlc_ignore_latest_remote_commitment() { connect_block(&nodes[1], &block); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); // Duplicate the connect_block call since this may happen due to other listeners // registering new transactions @@ -4008,6 +4152,11 @@ pub fn test_force_close_fail_back() { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes(&nodes, 1, 2); @@ -4023,7 +4172,7 @@ pub fn test_force_close_fail_back() { SendEvent::from_event(events.remove(0)) }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -4034,20 +4183,20 @@ pub fn test_force_close_fail_back() { assert_eq!(payment_event.msgs.len(), 1); check_added_monitors!(nodes[1], 1); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &payment_event.commitment_msg); check_added_monitors!(nodes[2], 1); - let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let (_, _) = get_revoke_commit_msgs!(nodes[2], node_b_id); // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!). let error_message = "Channel force-closed"; let channel_id = payment_event.commitment_msg[0].channel_id; - nodes[2].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[2].node.force_close_broadcasting_latest_txn(&channel_id, &node_b_id, error_message.to_string()).unwrap(); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [node_b_id], 100000); let commitment_tx = { let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't @@ -4062,7 +4211,7 @@ pub fn test_force_close_fail_back() { // Note no UpdateHTLCs event here from nodes[1] to nodes[0]! check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success.. { @@ -4095,6 +4244,10 @@ pub fn test_dup_events_on_peer_disconnect() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); @@ -4102,12 +4255,12 @@ pub fn test_dup_events_on_peer_disconnect() { nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], payment_hash, 1_000_000); check_added_monitors!(nodes[1], 1); - let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]); + let claim_msgs = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &claim_msgs.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.pending_htlc_claims.0 = 1; @@ -4124,20 +4277,23 @@ pub fn test_peer_disconnected_before_funding_broadcasted() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Open a channel between `nodes[0]` and `nodes[1]`, for which the funding transaction is never // broadcasted, even though it's created by `nodes[0]`. - let expected_temporary_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap(); - let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel); - let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel); + let expected_temporary_channel_id = nodes[0].node.create_channel(node_b_id, 1_000_000, 500_000_000, 42, None, None).unwrap(); + let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_channel); + let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); - let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &node_b_id, 1_000_000, 42); assert_eq!(temporary_channel_id, expected_temporary_channel_id); - assert!(nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).is_ok()); + assert!(nodes[0].node.funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()).is_ok()); - let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id); // Even though the funding transaction is created by `nodes[0]`, the `FundingCreated` msg is @@ -4148,8 +4304,8 @@ pub fn test_peer_disconnected_before_funding_broadcasted() { } // The peers disconnect before the funding is broadcasted. - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); // The time for peers to reconnect expires. for _ in 0..UNFUNDED_CHANNEL_AGE_LIMIT_TICKS { @@ -4160,9 +4316,9 @@ pub fn test_peer_disconnected_before_funding_broadcasted() { // `DiscardFunding` event when the peers are disconnected and do not reconnect before the // funding transaction is broadcasted. check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true - , [nodes[1].node.get_our_node_id()], 1000000); + , [node_b_id], 1000000); check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false - , [nodes[0].node.get_our_node_id()], 1000000); + , [node_a_id], 1000000); } #[xtest(feature = "_externalize_tests")] @@ -4172,11 +4328,15 @@ pub fn test_simple_peer_disconnect() { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes(&nodes, 1, 2); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.send_channel_ready = (true, true); reconnect_nodes(reconnect_args); @@ -4186,8 +4346,8 @@ pub fn test_simple_peer_disconnect() { fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2); claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); let (payment_preimage_3, payment_hash_3, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000); @@ -4195,8 +4355,8 @@ pub fn test_simple_peer_disconnect() { let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage_3) @@ -4249,6 +4409,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let mut as_channel_ready = None; let channel_id = if messages_delivered == 0 { let (channel_ready, chan_id, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001); @@ -4273,30 +4436,30 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id); + assert_eq!(node_b_id, payment_event.node_id); if messages_delivered < 2 { // Drop the payment_event messages, and let them get re-generated in reconnect_nodes! } else { - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); if messages_delivered >= 3 { - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); - let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); if messages_delivered >= 4 { - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); if messages_delivered >= 5 { - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_commitment_signed); - let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment_signed); + let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); if messages_delivered >= 6 { - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_revoke_and_ack); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); } @@ -4305,8 +4468,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken } } - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); if messages_delivered < 3 { if simulate_broken_lnd { // lnd has a long-standing bug where they send a channel_ready prior to a @@ -4317,7 +4480,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken // in `reconnect_nodes` but we currently don't fail based on that. // // See-also - nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &as_channel_ready.as_ref().unwrap().0); + nodes[1].node.handle_channel_ready(node_a_id, &as_channel_ready.as_ref().unwrap().0); } // Even if the channel_ready messages get exchanged, as long as nothing further was // received on either side, both sides will need to resend them. @@ -4365,8 +4528,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken }; } - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); nodes[1].node.process_pending_htlc_forwards(); @@ -4377,7 +4540,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { assert_eq!(payment_hash_1, *payment_hash); assert_eq!(amount_msat, 1_000_000); - assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id()); + assert_eq!(receiver_node_id.unwrap(), node_b_id); assert_eq!(*via_channel_ids, vec![(channel_id, Some(user_channel_id))]); match &purpose { PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { @@ -4398,7 +4561,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken assert_eq!(events_3.len(), 1); let (update_fulfill_htlc, commitment_signed) = match events_3[0] { MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); @@ -4410,7 +4573,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken }; if messages_delivered >= 1 { - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_fulfill_htlc); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_htlc); let events_4 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_4.len(), 1); @@ -4423,23 +4586,23 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken } if messages_delivered >= 2 { - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); check_added_monitors!(nodes[0], 1); - let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], node_b_id); if messages_delivered >= 3 { - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_revoke_and_ack); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); if messages_delivered >= 4 { - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_commitment_signed); - let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed); + let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); if messages_delivered >= 5 { - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); } @@ -4448,8 +4611,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken } } - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); if messages_delivered < 2 { let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.pending_htlc_claims.0 = 1; @@ -4484,8 +4647,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken expect_payment_path_successful!(nodes[0]); } if messages_delivered <= 5 { - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); } reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); @@ -4525,6 +4688,10 @@ pub fn test_channel_ready_without_best_block_updated() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks; let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0); @@ -4537,8 +4704,8 @@ pub fn test_channel_ready_without_best_block_updated() { nodes[0].node.transactions_confirmed(&conf_block_header, &conf_txn[..], conf_height); // Ensure nodes[0] generates a channel_ready after the transactions_confirmed - let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &as_channel_ready); + let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, node_b_id); + nodes[1].node.handle_channel_ready(node_a_id, &as_channel_ready); } #[xtest(feature = "_externalize_tests")] @@ -4548,6 +4715,9 @@ pub fn test_channel_monitor_skipping_block_when_channel_manager_is_leading() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Let channel_manager get ahead of chain_monitor by 1 block. // This is to emulate race-condition where newly added channel_monitor skips processing 1 block, // in case where client calls block_connect on channel_manager first and then on chain_monitor. @@ -4569,8 +4739,8 @@ pub fn test_channel_monitor_skipping_block_when_channel_manager_is_leading() { connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH); // Ensure nodes[0] generates a channel_ready after the transactions_confirmed - let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &as_channel_ready); + let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, node_b_id); + nodes[1].node.handle_channel_ready(node_a_id, &as_channel_ready); } #[xtest(feature = "_externalize_tests")] @@ -4580,6 +4750,9 @@ pub fn test_channel_monitor_skipping_block_when_channel_manager_is_lagging() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Let chain_monitor get ahead of channel_manager by 1 block. // This is to emulate race-condition where newly added channel_monitor skips processing 1 block, // in case where client calls block_connect on chain_monitor first and then on channel_manager. @@ -4604,8 +4777,8 @@ pub fn test_channel_monitor_skipping_block_when_channel_manager_is_lagging() { connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH); // Ensure nodes[0] generates a channel_ready after the transactions_confirmed - let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &as_channel_ready); + let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, node_b_id); + nodes[1].node.handle_channel_ready(node_a_id, &as_channel_ready); } #[xtest(feature = "_externalize_tests")] @@ -4616,6 +4789,10 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); @@ -4641,14 +4818,14 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { assert_eq!(events_2.len(), 1); match events_2[0] { MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_htlcs.is_empty()); assert!(update_fail_malformed_htlcs.is_empty()); assert!(update_fee.is_none()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_htlcs[0]); let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { @@ -4659,31 +4836,31 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { _ => panic!("Unexpected event"), } - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), commitment_signed); - let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, commitment_signed); + let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); }, _ => panic!("Unexpected event"), } - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { + nodes[0].node.peer_connected(node_b_id, &msgs::Init { features: nodes[1].node.init_features(), networks: None, remote_network_address: None }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { + nodes[1].node.peer_connected(node_a_id, &msgs::Init { features: nodes[0].node.init_features(), networks: None, remote_network_address: None }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); + nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); + nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); assert!(as_resp.0.is_none()); @@ -4699,14 +4876,14 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty()); assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); assert!(as_resp.2.as_ref().unwrap().update_fee.is_none()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed); - let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(node_a_id, &as_resp.2.as_ref().unwrap().update_add_htlcs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_resp.2.as_ref().unwrap().commitment_signed); + let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap()); - let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(node_a_id, as_resp.1.as_ref().unwrap()); + let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(bs_second_commitment_signed.update_add_htlcs.is_empty()); assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty()); assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty()); @@ -4714,8 +4891,8 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { assert!(bs_second_commitment_signed.update_fee.is_none()); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); - let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); + let as_commitment_signed = get_htlc_update_msgs!(nodes[0], node_b_id); assert!(as_commitment_signed.update_add_htlcs.is_empty()); assert!(as_commitment_signed.update_fulfill_htlcs.is_empty()); assert!(as_commitment_signed.update_fail_htlcs.is_empty()); @@ -4723,17 +4900,17 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { assert!(as_commitment_signed.update_fee.is_none()); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed); - let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_commitment_signed.commitment_signed); + let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed); - let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed.commitment_signed); + let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_revoke_and_ack); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); @@ -4755,7 +4932,7 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { _ => panic!("Unexpected event"), } - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); @@ -4771,6 +4948,9 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let our_payment_hash = if send_partial_mpp { @@ -4808,13 +4988,13 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); check_added_monitors!(nodes[1], 1); - let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(htlc_timeout_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1); assert!(htlc_timeout_updates.update_fail_malformed_htlcs.is_empty()); assert!(htlc_timeout_updates.update_fee.is_none()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_timeout_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false); // 100_000 msat as u64, followed by the height at which we failed back above let mut expected_failure_data = (100_000 as u64).to_be_bytes().to_vec(); @@ -4834,6 +5014,11 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); @@ -4857,7 +5042,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { if forwarded_htlc { check_added_monitors!(nodes[0], 1); let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); } @@ -4869,13 +5054,13 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { connect_blocks(&nodes[1], 1); if forwarded_htlc { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); let fail_commit = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(fail_commit.len(), 1); match fail_commit[0] { MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, ref commitment_signed, .. }, .. } => { - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, true, true); }, _ => unreachable!(), @@ -4928,12 +5113,14 @@ pub fn test_claim_sizeable_push_msat() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000); let error_message = "Channel force-closed"; - nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &node_a_id, error_message.to_string()).unwrap(); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [node_a_id], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], chan.3); @@ -4957,13 +5144,17 @@ pub fn test_claim_on_remote_sizeable_push_msat() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let error_message = "Channel force-closed"; let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000); - nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &node_b_id, error_message.to_string()).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [node_b_id], 100000); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 1); @@ -4973,7 +5164,7 @@ pub fn test_claim_on_remote_sizeable_push_msat() { mine_transaction(&nodes[1], &node_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -4991,6 +5182,8 @@ pub fn test_claim_on_remote_revoked_sizeable_push_msat() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000); let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2); @@ -5001,7 +5194,7 @@ pub fn test_claim_on_remote_revoked_sizeable_push_msat() { mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); mine_transaction(&nodes[1], &node_txn[0]); @@ -5021,6 +5214,8 @@ pub fn test_static_spendable_outputs_preimage_tx() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -5053,7 +5248,7 @@ pub fn test_static_spendable_outputs_preimage_tx() { assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); mine_transaction(&nodes[1], &node_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -5068,6 +5263,8 @@ pub fn test_static_spendable_outputs_timeout_tx() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -5097,7 +5294,7 @@ pub fn test_static_spendable_outputs_timeout_tx() { assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); mine_transaction(&nodes[1], &node_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[1], our_payment_hash, false); @@ -5114,6 +5311,8 @@ fn do_test_static_spendable_outputs_justice_tx_revoked_commitment_tx(split_tx: b let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -5131,7 +5330,7 @@ fn do_test_static_spendable_outputs_justice_tx_revoked_commitment_tx(split_tx: b mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); // If the HTLC expires in more than COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE blocks, we'll // claim both the revoked and HTLC outputs in one transaction, otherwise we'll split them as we @@ -5169,6 +5368,9 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -5183,7 +5385,7 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -5201,7 +5403,7 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()])); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); // There will be 2 justice transactions: // - One on the unpinnable, revoked to_self output on the commitment transaction and on @@ -5239,6 +5441,9 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -5256,7 +5461,7 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(revoked_htlc_txn.len(), 1); @@ -5272,7 +5477,7 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()])); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); // There will be 2 justice transactions, one on the revoked HTLC output on the commitment // transaction, and one on the revoked to_self output on the HTLC-success transaction. @@ -5317,6 +5522,10 @@ pub fn test_onchain_to_onchain_claim() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); @@ -5337,7 +5546,7 @@ pub fn test_onchain_to_onchain_claim() { nodes[2].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); check_added_monitors!(nodes[2], 1); - let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); @@ -5346,7 +5555,7 @@ pub fn test_onchain_to_onchain_claim() { mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx) assert_eq!(c_txn.len(), 1); @@ -5379,8 +5588,8 @@ pub fn test_onchain_to_onchain_claim() { check_added_monitors!(nodes[1], 1); let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 3); - let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut msg_events); - let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut msg_events); + let nodes_2_event = remove_first_msg_event_to_node(&node_c_id, &mut msg_events); + let nodes_0_event = remove_first_msg_event_to_node(&node_a_id, &mut msg_events); match nodes_2_event { MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {}, @@ -5393,7 +5602,7 @@ pub fn test_onchain_to_onchain_claim() { assert!(update_fail_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_malformed_htlcs.is_empty()); - assert_eq!(nodes[0].node.get_our_node_id(), *node_id); + assert_eq!(node_a_id, *node_id); }, _ => panic!("Unexpected event"), }; @@ -5407,7 +5616,7 @@ pub fn test_onchain_to_onchain_claim() { // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2); mine_transaction(&nodes[1], &commitment_tx[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: HTLC-Success tx assert_eq!(b_txn.len(), 1); @@ -5436,6 +5645,11 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]); let mut nodes = create_network(5, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_e_id = nodes[4].node.get_our_node_id(); + // Create the required channels and route one HTLC from A to D and another from A to E. create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); @@ -5452,7 +5666,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { let (our_payment_preimage, duplicate_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], 900_000); let payment_secret = nodes[4].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, None).unwrap(); - let payment_params = PaymentParameters::from_node_id(nodes[4].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_e_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[4].node.bolt11_invoice_features()).unwrap(); let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[4], payment_params, 800_000); send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[4]]], 800_000, duplicate_payment_hash, payment_secret); @@ -5467,7 +5681,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { mine_transaction(&nodes[1], &commitment_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); // Confirm blocks until both HTLCs expire and get a transaction which times out one HTLC. connect_blocks(&nodes[1], TEST_FINAL_CLTV + config.channel_config.cltv_expiry_delta as u32); @@ -5500,9 +5714,9 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { nodes[4].node.claim_funds(our_payment_preimage); expect_payment_claimed!(nodes[4], duplicate_payment_hash, 800_000); check_added_monitors!(nodes[4], 1); - let updates = get_htlc_update_msgs!(nodes[4], nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_fulfill_htlc(nodes[4].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - let _cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[4], node_c_id); + nodes[2].node.handle_update_fulfill_htlc(node_e_id, &updates.update_fulfill_htlcs[0]); + let _cs_updates = get_htlc_update_msgs!(nodes[2], node_b_id); expect_payment_forwarded!(nodes[2], nodes[1], nodes[4], Some(196), false, false); check_added_monitors!(nodes[2], 1); commitment_signed_dance!(nodes[2], nodes[4], &updates.commitment_signed, false); @@ -5512,7 +5726,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { // preimage). mine_transaction(&nodes[2], &commitment_txn[0]); check_added_monitors!(nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); check_closed_broadcast(&nodes[2], 1, true); let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); @@ -5538,8 +5752,8 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { // Mine the HTLC timeout transaction on node B. mine_transaction(&nodes[1], &htlc_timeout_tx); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); - let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }]); + let htlc_updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); let first_htlc_id = htlc_updates.update_fail_htlcs[0].htlc_id; @@ -5547,7 +5761,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_updates.update_fail_htlcs[0]); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true); expect_payment_failed_with_update!(nodes[0], duplicate_payment_hash, false, chan_2.0.contents.short_channel_id, true); @@ -5556,7 +5770,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { // provide to node A. mine_transaction(&nodes[1], htlc_success_tx_to_confirm); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(392), true, true); - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); @@ -5564,7 +5778,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { assert!(updates.update_fail_malformed_htlcs.is_empty()); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false); expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true); } @@ -5576,6 +5790,8 @@ pub fn test_dynamic_spendable_outputs_local_htlc_success_tx() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -5592,7 +5808,7 @@ pub fn test_dynamic_spendable_outputs_local_htlc_success_tx() { mine_transaction(&nodes[1], &local_txn[0]); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let events = nodes[1].node.get_and_clear_pending_msg_events(); match events[0] { MessageSendEvent::UpdateHTLCs { .. } => {}, @@ -5645,6 +5861,13 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]); let nodes = create_network(6, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + let node_e_id = nodes[4].node.get_our_node_id(); + let node_f_id = nodes[5].node.get_our_node_id(); + let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2); let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2); let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3); @@ -5656,7 +5879,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000); assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2); - let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id()) + let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&node_c_id) .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context().holder_dust_limit_satoshis; // 0th HTLC: let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee @@ -5712,11 +5935,11 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations); check_added_monitors!(nodes[4], 1); - let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id()); - nodes[3].node.handle_update_fail_htlc(nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[0]); - nodes[3].node.handle_update_fail_htlc(nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[1]); - nodes[3].node.handle_update_fail_htlc(nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[2]); - nodes[3].node.handle_update_fail_htlc(nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[3]); + let four_removes = get_htlc_update_msgs!(nodes[4], node_d_id); + nodes[3].node.handle_update_fail_htlc(node_e_id, &four_removes.update_fail_htlcs[0]); + nodes[3].node.handle_update_fail_htlc(node_e_id, &four_removes.update_fail_htlcs[1]); + nodes[3].node.handle_update_fail_htlc(node_e_id, &four_removes.update_fail_htlcs[2]); + nodes[3].node.handle_update_fail_htlc(node_e_id, &four_removes.update_fail_htlcs[3]); commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false); // Fail 3rd below-dust and 7th above-dust HTLCs @@ -5731,31 +5954,31 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2); check_added_monitors!(nodes[5], 1); - let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id()); - nodes[3].node.handle_update_fail_htlc(nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[0]); - nodes[3].node.handle_update_fail_htlc(nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]); + let two_removes = get_htlc_update_msgs!(nodes[5], node_d_id); + nodes[3].node.handle_update_fail_htlc(node_f_id, &two_removes.update_fail_htlcs[0]); + nodes[3].node.handle_update_fail_htlc(node_f_id, &two_removes.update_fail_htlcs[1]); commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false); let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2); // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events let failed_destinations_3 = vec![ - HTLCHandlingFailureType::Forward { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCHandlingFailureType::Forward { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCHandlingFailureType::Forward { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCHandlingFailureType::Forward { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, - HTLCHandlingFailureType::Forward { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, - HTLCHandlingFailureType::Forward { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, + HTLCHandlingFailureType::Forward { node_id: Some(node_e_id), channel_id: chan_3_4.2 }, + HTLCHandlingFailureType::Forward { node_id: Some(node_e_id), channel_id: chan_3_4.2 }, + HTLCHandlingFailureType::Forward { node_id: Some(node_e_id), channel_id: chan_3_4.2 }, + HTLCHandlingFailureType::Forward { node_id: Some(node_e_id), channel_id: chan_3_4.2 }, + HTLCHandlingFailureType::Forward { node_id: Some(node_f_id), channel_id: chan_3_5.2 }, + HTLCHandlingFailureType::Forward { node_id: Some(node_f_id), channel_id: chan_3_5.2 }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3); check_added_monitors!(nodes[3], 1); - let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[1]); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[2]); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[3]); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[4]); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[5]); + let six_removes = get_htlc_update_msgs!(nodes[3], node_c_id); + nodes[2].node.handle_update_fail_htlc(node_d_id, &six_removes.update_fail_htlcs[0]); + nodes[2].node.handle_update_fail_htlc(node_d_id, &six_removes.update_fail_htlcs[1]); + nodes[2].node.handle_update_fail_htlc(node_d_id, &six_removes.update_fail_htlcs[2]); + nodes[2].node.handle_update_fail_htlc(node_d_id, &six_removes.update_fail_htlcs[3]); + nodes[2].node.handle_update_fail_htlc(node_d_id, &six_removes.update_fail_htlcs[4]); + nodes[2].node.handle_update_fail_htlc(node_d_id, &six_removes.update_fail_htlcs[5]); if deliver_last_raa { commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false); } else { @@ -5798,13 +6021,13 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno if deliver_last_raa { expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true); - let expected_destinations: Vec = repeat(HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect(); + let expected_destinations: Vec = repeat(HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 }).take(3).collect(); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations); } else { let expected_destinations: Vec = if announce_latest { - repeat(HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect() + repeat(HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 }).take(9).collect() } else { - repeat(HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect() + repeat(HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 }).take(6).collect() }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations); @@ -5819,7 +6042,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { // Both under-dust HTLCs and the one above-dust HTLC that we had already failed // should be failed-backwards here. - let target = if *node_id == nodes[0].node.get_our_node_id() { + let target = if *node_id == node_a_id { // If announce_latest, expect 0th, 1st, 4th, 8th, 10th HTLCs, else only 0th, 1st, 10th below-dust HTLCs for htlc in &updates.update_fail_htlcs { assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 6 || if announce_latest { htlc.htlc_id == 3 || htlc.htlc_id == 5 } else { false }); @@ -5833,17 +6056,17 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno for htlc in &updates.update_fail_htlcs { assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 5 || if announce_latest { htlc.htlc_id == 4 } else { false }); } - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + assert_eq!(*node_id, node_b_id); assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 4 } else { 3 }); &nodes[1] }; - target.node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); - target.node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[1]); - target.node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[2]); + target.node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[0]); + target.node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[1]); + target.node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[2]); if announce_latest { - target.node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[3]); - if *node_id == nodes[0].node.get_our_node_id() { - target.node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[4]); + target.node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[3]); + if *node_id == node_a_id { + target.node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[4]); } } commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true); @@ -5935,6 +6158,8 @@ pub fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -5947,7 +6172,7 @@ pub fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { mine_transaction(&nodes[0], &local_txn[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires let htlc_timeout = { @@ -6001,6 +6226,8 @@ pub fn test_key_derivation_params() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some initial channels // Create a dummy channel to advance index by one and thus test re-derivation correctness // for node 0 @@ -6035,7 +6262,7 @@ pub fn test_key_derivation_params() { connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); let htlc_timeout = { let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -6071,13 +6298,16 @@ pub fn test_static_output_closing_tx() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2; mine_transaction(&nodes[0], &closing_tx); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [node_b_id], 100000); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); @@ -6085,7 +6315,7 @@ pub fn test_static_output_closing_tx() { check_spends!(spend_txn[0], closing_tx); mine_transaction(&nodes[1], &closing_tx); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [node_a_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -6098,6 +6328,10 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 }); @@ -6108,14 +6342,14 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, if use_dust { 50000 } else { 3_000_000 }); - let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); + let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_updates.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_updates.commitment_signed); check_added_monitors!(nodes[0], 1); - let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_updates.0); + let as_updates = get_revoke_commit_msgs!(nodes[0], node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_updates.0); check_added_monitors!(nodes[1], 1); let starting_block = nodes[1].best_block_info(); @@ -6127,7 +6361,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS }); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [node_a_id], 100000); } fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { @@ -6135,6 +6369,9 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if use_dust { 50000 } else { 3000000 }); @@ -6142,7 +6379,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); - let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let _as_update = get_htlc_update_msgs!(nodes[0], node_b_id); // As far as A is concerned, the HTLC is now present only in the latest remote commitment // transaction, however it is not in A's latest local commitment, so we can just broadcast that @@ -6158,7 +6395,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [node_b_id], 100000); } fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) { @@ -6166,6 +6403,10 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present @@ -6178,19 +6419,19 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); check_added_monitors!(nodes[1], 1); - let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); + let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_updates.commitment_signed); check_added_monitors!(nodes[0], 1); - let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_updates.0); + let as_updates = get_revoke_commit_msgs!(nodes[0], node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_updates.0); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_updates.1); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_updates.1); check_added_monitors!(nodes[1], 1); - let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); if check_revoke_no_close { - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); check_added_monitors!(nodes[0], 1); } @@ -6204,7 +6445,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [node_b_id], 100000); } else { expect_payment_failed!(nodes[0], our_payment_hash, true); } @@ -6245,6 +6486,10 @@ pub fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Force duplicate randomness for every get-random call for node in nodes.iter() { *node.keys_manager.override_random_bytes.lock().unwrap() = Some([0; 32]); @@ -6253,14 +6498,14 @@ pub fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer. let channel_value_satoshis=10000; let push_msat=10001; - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap(); - let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel); - get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + nodes[0].node.create_channel(node_b_id, channel_value_satoshis, push_msat, 42, None, None).unwrap(); + let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &node0_to_1_send_open_channel); + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); // Create a second channel with the same random values. This used to panic due to a colliding // channel_id, but now panics due to a colliding outbound SCID alias. - assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err()); + assert!(nodes[0].node.create_channel(node_b_id, channel_value_satoshis, push_msat, 42, None, None).is_err()); } #[xtest(feature = "_externalize_tests")] @@ -6270,22 +6515,24 @@ pub fn bolt2_open_channel_sending_node_checks_part2() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + // BOLT #2 spec: Sending node must set funding_satoshis to less than 2^24 satoshis let channel_value_satoshis=2^24; let push_msat=10001; - assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err()); + assert!(nodes[0].node.create_channel(node_b_id, channel_value_satoshis, push_msat, 42, None, None).is_err()); // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis let channel_value_satoshis=10000; // Test when push_msat is equal to 1000 * funding_satoshis. let push_msat=1000*channel_value_satoshis+1; - assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_err()); + assert!(nodes[0].node.create_channel(node_b_id, channel_value_satoshis, push_msat, 42, None, None).is_err()); // BOLT #2 spec: Sending node must set set channel_reserve_satoshis greater than or equal to dust_limit_satoshis let channel_value_satoshis=10000; let push_msat=10001; - assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).is_ok()); //Create a valid channel - let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + assert!(nodes[0].node.create_channel(node_b_id, channel_value_satoshis, push_msat, 42, None, None).is_ok()); //Create a valid channel + let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.common_fields.dust_limit_satoshis); // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0 @@ -6315,14 +6562,17 @@ pub fn bolt2_open_channel_sane_dust_limit() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_value_satoshis=1000000; let push_msat=10001; - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42, None, None).unwrap(); - let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + nodes[0].node.create_channel(node_b_id, channel_value_satoshis, push_msat, 42, None, None).unwrap(); + let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); node0_to_1_send_open_channel.common_fields.dust_limit_satoshis = 547; node0_to_1_send_open_channel.channel_reserve_satoshis = 100001; - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &node0_to_1_send_open_channel); + nodes[1].node.handle_open_channel(node_a_id, &node0_to_1_send_open_channel); let events = nodes[1].node.get_and_clear_pending_msg_events(); let err_msg = match events[0] { MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { @@ -6343,6 +6593,10 @@ pub fn test_fail_holding_cell_htlc_upon_free() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); // First nodes[0] generates an update_fee, setting the channel's @@ -6363,7 +6617,7 @@ pub fn test_fail_holding_cell_htlc_upon_free() { _ => panic!("Unexpected event"), }; - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; @@ -6381,10 +6635,10 @@ pub fn test_fail_holding_cell_htlc_upon_free() { assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); // Flush the pending fee update. - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); - let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); + let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], node_a_id); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_revoke_and_ack); + nodes[0].node.handle_revoke_and_ack(node_b_id, &as_revoke_and_ack); check_added_monitors!(nodes[0], 1); // Upon receipt of the RAA, there will be an attempt to resend the holding cell @@ -6423,6 +6677,10 @@ pub fn test_free_and_fail_holding_cell_htlcs() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); // First nodes[0] generates an update_fee, setting the channel's @@ -6443,7 +6701,7 @@ pub fn test_free_and_fail_holding_cell_htlcs() { _ => panic!("Unexpected event"), }; - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msg.unwrap()); + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); let channel_reserve = chan_stat.channel_reserve_msat; @@ -6468,11 +6726,11 @@ pub fn test_free_and_fail_holding_cell_htlcs() { assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2); // Flush the pending fee update. - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), commitment_signed); - let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); + let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &revoke_and_ack); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed); + nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_and_ack); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); check_added_monitors!(nodes[0], 2); // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs, @@ -6511,9 +6769,9 @@ pub fn test_free_and_fail_holding_cell_htlcs() { MessageSendEvent::SendRevokeAndACK { msg, .. } => msg, _ => panic!("Unexpected event"), }; - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -6532,8 +6790,8 @@ pub fn test_free_and_fail_holding_cell_htlcs() { check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, amt_1); - let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]); + let update_msgs = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_msgs.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true); expect_payment_sent!(nodes[0], payment_preimage_1); } @@ -6552,6 +6810,11 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { config.channel_config.forwarding_fee_proportional_millionths = 0; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000); @@ -6573,7 +6836,7 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { _ => panic!("Unexpected event"), }; - nodes[2].node.handle_update_fee(nodes[1].node.get_our_node_id(), update_msg.unwrap()); + nodes[2].node.handle_update_fee(node_b_id, update_msg.unwrap()); let mut chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan_0_1.2); let channel_reserve = chan_stat.channel_reserve_msat; @@ -6593,7 +6856,7 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { SendEvent::from_event(events.remove(0)) }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -6602,11 +6865,11 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); // Flush the pending fee update. - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), commitment_signed); - let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, commitment_signed); + let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], node_b_id); check_added_monitors!(nodes[2], 1); - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &raa); - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &commitment_signed); + nodes[1].node.handle_revoke_and_ack(node_c_id, &raa); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &commitment_signed); check_added_monitors!(nodes[1], 2); // A final RAA message is generated to finalize the fee update. @@ -6620,7 +6883,7 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { _ => panic!("Unexpected event"), }; - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &raa_msg); + nodes[2].node.handle_revoke_and_ack(node_b_id, &raa_msg); check_added_monitors!(nodes[2], 1); assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); @@ -6651,14 +6914,14 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { }; // Pass the failure messages back to nodes[0]. - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_msg); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed); + nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_msg); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); // Complete the HTLC failure+removal process. - let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], node_b_id); check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &commitment_signed); + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_signed); check_added_monitors!(nodes[1], 2); let final_raa_event = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(final_raa_event.len(), 1); @@ -6666,7 +6929,7 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => msg.clone(), _ => panic!("Unexpected event"), }; - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &raa); expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_1_2.0.contents.short_channel_id, false); check_added_monitors!(nodes[0], 1); } @@ -6679,9 +6942,12 @@ pub fn test_payment_route_reaching_same_channel_twice() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0) + let payment_params = PaymentParameters::from_node_id(node_b_id, 0) .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000); @@ -6707,6 +6973,7 @@ pub fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); @@ -6725,6 +6992,7 @@ pub fn test_update_add_htlc_bolt2_sender_zero_value_msat() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); @@ -6745,21 +7013,25 @@ pub fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); updates.update_add_htlcs[0].amount_msat = 0; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC", 3); check_closed_broadcast!(nodes[1], true).unwrap(); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() }, - [nodes[0].node.get_our_node_id()], 100000); + [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -6770,9 +7042,12 @@ pub fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0) + let payment_params = PaymentParameters::from_node_id(node_b_id, 0) .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000); route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001; @@ -6791,8 +7066,11 @@ pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increme let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); - let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) + let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&node_a_id) .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().counterparty_max_accepted_htlcs as u64; // Fetch a route in advance as we will be unable to once we're unable to send. @@ -6813,7 +7091,7 @@ pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increme } SendEvent::from_event(events.remove(0)) }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); @@ -6834,6 +7112,7 @@ pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let channel_value = 100000; let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0); let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat; @@ -6860,11 +7139,15 @@ pub fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); let htlc_minimum_msat: u64; { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); let channel = chan_lock.channel_by_id.get(&chan.2).unwrap(); htlc_minimum_msat = channel.context().get_holder_htlc_minimum_msat(); } @@ -6873,14 +7156,14 @@ pub fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -6890,6 +7173,10 @@ pub fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); @@ -6904,19 +7191,19 @@ pub fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); // Even though channel-initiator senders are required to respect the fee_spike_reserve, // at this time channel-initiatee receivers are not required to enforce that senders // respect the fee_spike_reserve. updates.update_add_htlcs[0].amount_msat = max_can_send + commit_tx_fee_outbound + 1; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -6927,6 +7214,9 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); let send_amt = 3999999; @@ -6954,16 +7244,16 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { for i in 0..50 { msg.htlc_id = i as u64; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &msg); } msg.htlc_id = (50) as u64; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &msg); assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -6973,21 +7263,25 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 1000000); } #[xtest(feature = "_externalize_tests")] @@ -6998,20 +7292,23 @@ pub fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); updates.update_add_htlcs[0].cltv_expiry = 500000000; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height"); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -7024,47 +7321,50 @@ pub fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); //Disconnect and Reconnect - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); + nodes[0].node.peer_connected(node_b_id, &msgs::Init { features: nodes[1].node.init_features(), networks: None, remote_network_address: None }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { + nodes[1].node.peer_connected(node_a_id, &msgs::Init { features: nodes[0].node.init_features(), networks: None, remote_network_address: None }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); + nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); + nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); handle_chan_reestablish_msgs!(nodes[1], nodes[0]); //Resend HTLC - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); assert_eq!(updates.commitment_signed.len(), 1); assert_eq!(updates.commitment_signed[0].htlc_signatures.len(), 1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &updates.commitment_signed); check_added_monitors!(nodes[1], 1); - let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let _bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -7075,14 +7375,18 @@ pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); let update_msg = msgs::UpdateFulfillHTLC{ channel_id: chan.2, @@ -7090,13 +7394,13 @@ pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { payment_preimage: our_payment_preimage, }; - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_msg); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_msg); assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -7107,14 +7411,18 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); let update_msg = msgs::UpdateFailHTLC{ channel_id: chan.2, @@ -7123,13 +7431,13 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { attribution_data: Some(AttributionData::new()) }; - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_msg); + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_msg); assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -7140,14 +7448,18 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitme let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); let update_msg = msgs::UpdateFailMalformedHTLC{ channel_id: chan.2, htlc_id: 0, @@ -7155,13 +7467,13 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitme failure_code: 0x8000, }; - nodes[0].node.handle_update_fail_malformed_htlc(nodes[1].node.get_our_node_id(), &update_msg); + nodes[0].node.handle_update_fail_malformed_htlc(node_b_id, &update_msg); assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -7172,6 +7484,9 @@ pub fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000); @@ -7198,13 +7513,13 @@ pub fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { update_fulfill_msg.htlc_id = 1; - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_fulfill_msg); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_msg); assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find"); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -7215,6 +7530,9 @@ pub fn test_update_fulfill_htlc_bolt2_wrong_preimage() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000); @@ -7241,13 +7559,13 @@ pub fn test_update_fulfill_htlc_bolt2_wrong_preimage() { update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_fulfill_msg); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_msg); assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -7258,6 +7576,10 @@ pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_me let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); @@ -7265,10 +7587,10 @@ pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_me RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true); expect_pending_htlcs_forwardable!(nodes[1]); @@ -7291,13 +7613,13 @@ pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_me } }; update_msg.failure_code &= !0x8000; - nodes[0].node.handle_update_fail_malformed_htlc(nodes[1].node.get_our_node_id(), &update_msg); + nodes[0].node.handle_update_fail_malformed_htlc(node_b_id, &update_msg); assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set"); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 1000000); } #[xtest(feature = "_externalize_tests")] @@ -7309,6 +7631,11 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000); @@ -7323,7 +7650,7 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -7335,7 +7662,7 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ //Second Hop payment_event.msgs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); check_added_monitors!(nodes[2], 0); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); @@ -7358,11 +7685,11 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ } }; - nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), &update_msg.0); + nodes[1].node.handle_update_fail_malformed_htlc(node_c_id, &update_msg.0); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }]); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 1); @@ -7387,6 +7714,11 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); @@ -7400,7 +7732,7 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { SendEvent::from_node(&nodes[0]) }; - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); @@ -7409,7 +7741,7 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { // Second Hop payment_event.msgs[0].onion_routing_packet.version = 1; // Trigger an invalid_onion_version error - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); check_added_monitors!(nodes[2], 0); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); @@ -7424,7 +7756,7 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { // Set the NODE bit (BADONION and PERM already set in invalid_onion_version error) update_msg.failure_code |= 0x2000; - nodes[1].node.handle_update_fail_malformed_htlc(nodes[2].node.get_our_node_id(), &update_msg); + nodes[1].node.handle_update_fail_malformed_htlc(node_c_id, &update_msg); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true); }, _ => panic!("Unexpected event"), @@ -7432,14 +7764,14 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { - node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + node_id: Some(node_c_id), channel_id: chan_2.2 }]); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 1); check_added_monitors!(nodes[1], 1); match events_4[0] { MessageSendEvent::UpdateHTLCs { ref updates, .. } => { - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true); }, _ => panic!("Unexpected event"), @@ -7479,9 +7811,13 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan =create_announced_chan_between_nodes(&nodes, 0, 1); - let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&node_a_id) .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis; // We route 2 dust-HTLCs between A and B @@ -7498,9 +7834,9 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_2 }]); check_added_monitors!(nodes[1], 1); - let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &remove.commitment_signed); + let remove = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &remove.update_fail_htlcs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &remove.commitment_signed); check_added_monitors!(nodes[0], 1); // Cache one local commitment tx as lastest @@ -7509,13 +7845,13 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { let events = nodes[0].node.get_and_clear_pending_msg_events(); match events[0] { MessageSendEvent::SendRevokeAndACK { node_id, .. } => { - assert_eq!(node_id, nodes[1].node.get_our_node_id()); + assert_eq!(node_id, node_b_id); }, _ => panic!("Unexpected event"), } match events[1] { MessageSendEvent::UpdateHTLCs { node_id, .. } => { - assert_eq!(node_id, nodes[1].node.get_our_node_id()); + assert_eq!(node_id, node_b_id); }, _ => panic!("Unexpected event"), } @@ -7530,7 +7866,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); @@ -7572,9 +7908,13 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id()) + let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&node_a_id) .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis; let (_payment_preimage_1, dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000); @@ -7593,7 +7933,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { if local { // We fail dust-HTLC 1 by broadcast of local commitment tx mine_transaction(&nodes[0], &as_commitment_tx[0]); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[0], dust_hash, false); @@ -7613,7 +7953,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { mine_transaction(&nodes[0], &bs_commitment_tx[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires @@ -7657,11 +7997,15 @@ pub fn test_user_configurable_csv_delay() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let logger = TestLogger::new(); // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new() if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator::new(253)), - &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0, + &nodes[0].keys_manager, &nodes[0].keys_manager, node_b_id, &nodes[1].node.init_features(), 1000000, 1000000, 0, &low_our_to_self_config, 0, 42, None, &logger) { match error { @@ -7671,11 +8015,11 @@ pub fn test_user_configurable_csv_delay() { } else { assert!(false) } // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new() - nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap(); - let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); + nodes[1].node.create_channel(node_a_id, 1000000, 1000000, 42, None, None).unwrap(); + let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, node_a_id); open_channel.common_fields.to_self_delay = 200; if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator::new(253)), - &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, + &nodes[0].keys_manager, &nodes[0].keys_manager, node_b_id, &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false) { match error { @@ -7688,11 +8032,11 @@ pub fn test_user_configurable_csv_delay() { } else { assert!(false); } // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel() - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap(); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())); - let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + nodes[0].node.create_channel(node_b_id, 1000000, 1000000, 42, None, None).unwrap(); + nodes[1].node.handle_open_channel(node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id)); + let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); accept_channel.common_fields.to_self_delay = 200; - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); let reason_msg; if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] { match action { @@ -7703,14 +8047,14 @@ pub fn test_user_configurable_csv_delay() { _ => { panic!(); } } } else { panic!(); } - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }, [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }, [node_b_id], 1000000); // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new() - nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None, None).unwrap(); - let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); + nodes[1].node.create_channel(node_a_id, 1000000, 1000000, 42, None, None).unwrap(); + let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, node_a_id); open_channel.common_fields.to_self_delay = 200; if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator::new(253)), - &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, + &nodes[0].keys_manager, &nodes[0].keys_manager, node_b_id, &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false) { match error { @@ -7734,15 +8078,18 @@ pub fn test_check_htlc_underpaying() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some initial channels create_announced_chan_between_nodes(&nodes, 0, 1); let scorer = test_utils::TestScorer::new(); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, 10_000); - let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(), + let route = get_route(&node_a_id, &route_params, &nodes[0].network_graph.read_only(), None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap(); let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]); let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap(); @@ -7753,7 +8100,7 @@ pub fn test_check_htlc_underpaying() { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); // Note that we first have to wait a random delay before processing the receipt of the HTLC, @@ -7780,7 +8127,7 @@ pub fn test_check_htlc_underpaying() { }; check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlc); + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlc); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true); // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32 @@ -7799,6 +8146,9 @@ pub fn test_announce_disable_channels() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Connect a dummy node for proper future events broadcasting connect_dummy_node(&nodes[0]); @@ -7807,8 +8157,8 @@ pub fn test_announce_disable_channels() { create_announced_chan_between_nodes(&nodes, 0, 1); // Disconnect peers - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); for _ in 0..DISABLE_GOSSIP_TICKS + 1 { nodes[0].node.timer_tick_occurred(); @@ -7829,31 +8179,31 @@ pub fn test_announce_disable_channels() { } } // Reconnect peers - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { + nodes[0].node.peer_connected(node_b_id, &msgs::Init { features: nodes[1].node.init_features(), networks: None, remote_network_address: None }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 3); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { + nodes[1].node.peer_connected(node_a_id, &msgs::Init { features: nodes[0].node.init_features(), networks: None, remote_network_address: None }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 3); // Reestablish chan_1 - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); + nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); + nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); handle_chan_reestablish_msgs!(nodes[1], nodes[0]); // Reestablish chan_2 - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[1]); + nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[1]); handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[1]); + nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[1]); handle_chan_reestablish_msgs!(nodes[1], nodes[0]); // Reestablish chan_3 - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[2]); + nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[2]); handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[2]); + nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[2]); handle_chan_reestablish_msgs!(nodes[1], nodes[0]); for _ in 0..ENABLE_GOSSIP_TICKS { @@ -7891,10 +8241,12 @@ pub fn test_bump_penalty_txn_on_revoked_commitment() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; - let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_a_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap(); let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000); send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000); @@ -7987,19 +8339,22 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps) - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 50).with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + let payment_params = PaymentParameters::from_node_id(node_b_id, 50).with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); let scorer = test_utils::TestScorer::new(); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000); - let route = get_route(&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph.read_only(), None, + let route = get_route(&node_a_id, &route_params, &nodes[0].network_graph.read_only(), None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap(); let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0; - let payment_params = PaymentParameters::from_node_id(nodes[0].node.get_our_node_id(), 50) + let payment_params = PaymentParameters::from_node_id(node_a_id, 50) .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000); - let route = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None, + let route = get_route(&node_b_id, &route_params, &nodes[1].network_graph.read_only(), None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap(); let failed_payment_hash = send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000).1; @@ -8014,7 +8369,7 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()])); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 1000000); connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above) let revoked_htlc_txn = { @@ -8258,13 +8613,17 @@ pub fn test_counterparty_raa_skip_no_crash() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; let per_commitment_secret; let next_per_commitment_point; { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let mut guard = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); let keys = guard.channel_by_id.get(&channel_id).and_then(Channel::as_funded).unwrap() .get_signer(); @@ -8283,7 +8642,7 @@ pub fn test_counterparty_raa_skip_no_crash() { &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2).unwrap()).unwrap()); } - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), + nodes[1].node.handle_revoke_and_ack(node_a_id, &msgs::RevokeAndACK { channel_id, per_commitment_secret, @@ -8294,7 +8653,7 @@ pub fn test_counterparty_raa_skip_no_crash() { assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack"); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() } - , [nodes[0].node.get_our_node_id()], 100000); + , [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -8307,6 +8666,8 @@ pub fn test_bump_txn_sanitize_tracking_maps() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); // Lock HTLC in both directions let (payment_preimage_1, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000); @@ -8327,7 +8688,7 @@ pub fn test_bump_txn_sanitize_tracking_maps() { mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 1000000); let penalty_txn = { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 2); //ChannelMonitor: justice txn * 2 @@ -8360,6 +8721,8 @@ pub fn test_channel_conf_timeout() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let _funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 100_000); // The outbound node should wait forever for confirmation: @@ -8375,12 +8738,12 @@ pub fn test_channel_conf_timeout() { connect_blocks(&nodes[1], 1); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut, [node_a_id], 1000000); let close_ev = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(close_ev.len(), 1); match close_ev[0] { MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { ref msg }, ref node_id } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); assert_eq!(msg.as_ref().unwrap().data, "Channel closed because funding transaction failed to confirm within 2016 blocks"); }, _ => panic!("Unexpected event"), @@ -8394,14 +8757,16 @@ pub fn test_override_channel_config() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + // Node0 initiates a channel to node1 using the override config. let mut override_config = UserConfig::default(); override_config.channel_handshake_config.our_to_self_delay = 200; - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(override_config)).unwrap(); + nodes[0].node.create_channel(node_b_id, 16_000_000, 12_000_000, 42, None, Some(override_config)).unwrap(); // Assert the channel created by node0 is using the override config. - let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); assert_eq!(res.common_fields.channel_flags, 0); assert_eq!(res.common_fields.to_self_delay, 200); } @@ -8415,12 +8780,15 @@ pub fn test_override_0msat_htlc_minimum() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(zero_config.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 16_000_000, 12_000_000, 42, None, Some(zero_config)).unwrap(); - let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + nodes[0].node.create_channel(node_b_id, 16_000_000, 12_000_000, 42, None, Some(zero_config)).unwrap(); + let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); assert_eq!(res.common_fields.htlc_minimum_msat, 1); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &res); - let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_open_channel(node_a_id, &res); + let res = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); assert_eq!(res.common_fields.htlc_minimum_msat, 1); } @@ -8487,10 +8855,13 @@ pub fn test_manually_accept_inbound_channel_request() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap(); - let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap(); + let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &res); + nodes[1].node.handle_open_channel(node_a_id, &res); // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before // accepting the inbound channel request. @@ -8517,7 +8888,7 @@ pub fn test_manually_accept_inbound_channel_request() { let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { - nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23, Some(config_overrides)).unwrap(); + nodes[1].node.accept_inbound_channel(&temporary_channel_id, &node_a_id, 23, Some(config_overrides)).unwrap(); } _ => panic!("Unexpected event"), } @@ -8528,7 +8899,7 @@ pub fn test_manually_accept_inbound_channel_request() { let ref accept_channel: AcceptChannel; match accept_msg_ev[0] { MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); // Assert overriden handshake parameter. assert_eq!(msg.common_fields.max_accepted_htlcs, 3); @@ -8539,18 +8910,18 @@ pub fn test_manually_accept_inbound_channel_request() { } // Continue channel opening process until channel update messages are sent. - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel); - let (temporary_channel_id, tx, funding_outpoint) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42); - nodes[0].node.unsafe_manual_funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), funding_outpoint).unwrap(); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); + let (temporary_channel_id, tx, funding_outpoint) = create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); + nodes[0].node.unsafe_manual_funding_transaction_generated(temporary_channel_id, node_b_id, funding_outpoint).unwrap(); check_added_monitors!(nodes[0], 0); - let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created); + let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); + nodes[1].node.handle_funding_created(node_a_id, &funding_created); check_added_monitors!(nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed); + let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed); check_added_monitors!(nodes[0], 1); let events = &nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); @@ -8563,7 +8934,7 @@ pub fn test_manually_accept_inbound_channel_request() { }; match &events[1] { crate::events::Event::ChannelPending { counterparty_node_id, .. } => { - assert_eq!(*&nodes[1].node.get_our_node_id(), *counterparty_node_id); + assert_eq!(*&node_b_id, *counterparty_node_id); }, _ => panic!("Unexpected event"), }; @@ -8571,19 +8942,19 @@ pub fn test_manually_accept_inbound_channel_request() { mine_transaction(&nodes[0], &tx); mine_transaction(&nodes[1], &tx); - let as_channel_ready = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id()); - nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &as_channel_ready); - let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()); - nodes[0].node.handle_channel_ready(nodes[1].node.get_our_node_id(), &as_channel_ready); + let as_channel_ready = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, node_a_id); + nodes[1].node.handle_channel_ready(node_a_id, &as_channel_ready); + let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, node_b_id); + nodes[0].node.handle_channel_ready(node_b_id, &as_channel_ready); - expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id()); - expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_ready_event(&nodes[0], &node_b_id); + expect_channel_ready_event(&nodes[1], &node_a_id); // Assert that the overriden base fee surfaces in the channel update. - let channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); + let channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); assert_eq!(channel_update.contents.fee_base_msat, 555); - get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id); } #[xtest(feature = "_externalize_tests")] @@ -8595,10 +8966,13 @@ pub fn test_manually_reject_inbound_channel_request() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap(); - let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap(); + let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &res); + nodes[1].node.handle_open_channel(node_a_id, &res); // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before // rejecting the inbound channel request. @@ -8607,7 +8981,7 @@ pub fn test_manually_reject_inbound_channel_request() { let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { - nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &node_a_id, error_message.to_string()).unwrap(); } _ => panic!("Unexpected event"), } @@ -8617,7 +8991,7 @@ pub fn test_manually_reject_inbound_channel_request() { match close_msg_ev[0] { MessageSendEvent::HandleError { ref node_id, .. } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); } _ => panic!("Unexpected event"), } @@ -8635,10 +9009,13 @@ pub fn test_can_not_accept_inbound_channel_twice() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap(); - let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &res); + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap(); + let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + + nodes[1].node.handle_open_channel(node_a_id, &res); // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before // accepting the inbound channel request. @@ -8647,8 +9024,8 @@ pub fn test_can_not_accept_inbound_channel_twice() { let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { - nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0, None).unwrap(); - let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0, None); + nodes[1].node.accept_inbound_channel(&temporary_channel_id, &node_a_id, 0, None).unwrap(); + let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, &node_a_id, 0, None); match api_res { Err(APIError::APIMisuseError { err }) => { assert_eq!(err, "No such channel awaiting to be accepted."); @@ -8666,7 +9043,7 @@ pub fn test_can_not_accept_inbound_channel_twice() { match accept_msg_ev[0] { MessageSendEvent::SendAcceptChannel { ref node_id, .. } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); } _ => panic!("Unexpected event"), } @@ -8679,8 +9056,10 @@ pub fn test_can_not_accept_unknown_inbound_channel() { let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]); let nodes = create_network(2, &node_cfg, &node_chanmgr); + let node_b_id = nodes[1].node.get_our_node_id(); + let unknown_channel_id = ChannelId::new_zero(); - let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &nodes[1].node.get_our_node_id(), 0, None); + let api_res = nodes[0].node.accept_inbound_channel(&unknown_channel_id, &node_b_id, 0, None); match api_res { Err(APIError::APIMisuseError { err }) => { assert_eq!(err, "No such channel awaiting to be accepted."); @@ -8703,6 +9082,10 @@ pub fn test_onion_value_mpp_set_calculation() { let node_chanmgrs = create_node_chanmgrs(node_count, &node_cfgs, &vec![None; node_count]); let mut nodes = create_network(node_count, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id; let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id; @@ -8714,17 +9097,17 @@ pub fn test_onion_value_mpp_set_calculation() { let sample_path = route.paths.pop().unwrap(); let mut path_1 = sample_path.clone(); - path_1.hops[0].pubkey = nodes[1].node.get_our_node_id(); + path_1.hops[0].pubkey = node_b_id; path_1.hops[0].short_channel_id = chan_1_id; - path_1.hops[1].pubkey = nodes[3].node.get_our_node_id(); + path_1.hops[1].pubkey = node_d_id; path_1.hops[1].short_channel_id = chan_3_id; path_1.hops[1].fee_msat = 100_000; route.paths.push(path_1); let mut path_2 = sample_path.clone(); - path_2.hops[0].pubkey = nodes[2].node.get_our_node_id(); + path_2.hops[0].pubkey = node_c_id; path_2.hops[0].short_channel_id = chan_2_id; - path_2.hops[1].pubkey = nodes[3].node.get_our_node_id(); + path_2.hops[1].pubkey = node_d_id; path_2.hops[1].short_channel_id = chan_4_id; path_2.hops[1].fee_msat = 1_000; route.paths.push(path_2); @@ -8795,8 +9178,7 @@ pub fn test_onion_value_mpp_set_calculation() { ); } -fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) { - +fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64){ let routing_node_count = msat_amounts.len(); let node_count = routing_node_count + 2; @@ -8877,6 +9259,9 @@ pub fn test_simple_mpp() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id; let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id; @@ -8885,10 +9270,10 @@ pub fn test_simple_mpp() { let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); let path = route.paths[0].clone(); route.paths.push(path); - route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0].hops[0].pubkey = node_b_id; route.paths[0].hops[0].short_channel_id = chan_1_id; route.paths[0].hops[1].short_channel_id = chan_3_id; - route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[1].hops[0].pubkey = node_c_id; route.paths[1].hops[0].short_channel_id = chan_2_id; route.paths[1].hops[1].short_channel_id = chan_4_id; send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret); @@ -8905,6 +9290,8 @@ pub fn test_preimage_storage() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; { @@ -8915,7 +9302,7 @@ pub fn test_preimage_storage() { check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); } // Note that after leaving the above scope we have no knowledge of any arguments or return @@ -8944,6 +9331,9 @@ pub fn test_bad_secret_hash() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; let random_payment_hash = PaymentHash([42; 32]); @@ -8958,7 +9348,7 @@ pub fn test_bad_secret_hash() { check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); // We have to forward pending HTLCs once to process the receipt of the HTLC and then @@ -8971,7 +9361,7 @@ pub fn test_bad_secret_hash() { let mut events = nodes[1].node.get_and_clear_pending_msg_events(); match events.pop().unwrap() { MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } } => { - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); }, _ => panic!("Unexpected event"), @@ -9018,6 +9408,9 @@ pub fn test_update_err_monitor_lockdown() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some initial channel let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -9054,9 +9447,9 @@ pub fn test_update_err_monitor_lockdown() { check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 9_000_000); - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; @@ -9088,6 +9481,9 @@ pub fn test_concurrent_monitor_claim() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some initial channel let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -9156,9 +9552,9 @@ pub fn test_concurrent_monitor_claim() { RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); check_added_monitors!(nodes[1], 1); - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(updates.update_add_htlcs.len(), 1); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + nodes[0].node.handle_update_add_htlc(node_b_id, &updates.update_add_htlcs[0]); { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; @@ -9193,7 +9589,7 @@ pub fn test_concurrent_monitor_claim() { connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); check_closed_broadcast(&nodes[0], 1, true); check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false, - [nodes[1].node.get_our_node_id()], 100000); + [node_b_id], 100000); watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height); check_added_monitors(&nodes[0], 1); { @@ -9221,25 +9617,28 @@ pub fn test_pre_lockin_no_chan_closed_update() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create an initial channel - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap(); - let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_chan_msg); - let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_chan_msg); + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); + let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); + let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + nodes[0].node.handle_accept_channel(node_b_id, &accept_chan_msg); // Move the first channel through the funding flow... - let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); + let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &node_b_id, 100000, 42); - nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); + nodes[0].node.funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()).unwrap(); check_added_monitors!(nodes[0], 0); - let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }); - nodes[0].node.handle_error(nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() }); + nodes[0].node.handle_error(node_b_id, &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() }); assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty()); check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true, - [nodes[1].node.get_our_node_id()], 100000); + [node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -9256,6 +9655,8 @@ pub fn test_htlc_no_detection() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); @@ -9274,7 +9675,7 @@ pub fn test_htlc_no_detection() { chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); let htlc_timeout = { @@ -9310,6 +9711,10 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Create some initial channels let chan_ab = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 10001); @@ -9351,7 +9756,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain if broadcast_alice { check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); } } @@ -9362,14 +9767,14 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); - let carol_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let carol_updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(carol_updates.update_add_htlcs.is_empty()); assert!(carol_updates.update_fail_htlcs.is_empty()); assert!(carol_updates.update_fail_malformed_htlcs.is_empty()); assert!(carol_updates.update_fee.is_none()); assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &carol_updates.update_fulfill_htlcs[0]); let went_onchain = go_onchain_before_fulfill || force_closing_node == 1; expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if went_onchain { None } else { Some(1000) }, went_onchain, false); // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage. @@ -9378,12 +9783,12 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain assert_eq!(events.len(), 1); match events[0] { MessageSendEvent::UpdateHTLCs { ref node_id, .. } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); }, _ => panic!("Unexpected event"), }; } - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &carol_updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &carol_updates.commitment_signed); // One monitor update for the preimage to update the Bob<->Alice channel, one monitor update // Carol<->Bob's updated commitment transaction info. check_added_monitors!(nodes[1], 2); @@ -9392,34 +9797,34 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain assert_eq!(events.len(), 2); let bob_revocation = match events[0] { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[2].node.get_our_node_id()); + assert_eq!(*node_id, node_c_id); (*msg).clone() }, _ => panic!("Unexpected event"), }; let bob_updates = match events[1] { MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { - assert_eq!(*node_id, nodes[2].node.get_our_node_id()); + assert_eq!(*node_id, node_c_id); (*updates).clone() }, _ => panic!("Unexpected event"), }; - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bob_revocation); + nodes[2].node.handle_revoke_and_ack(node_b_id, &bob_revocation); check_added_monitors!(nodes[2], 1); - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bob_updates.commitment_signed); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &bob_updates.commitment_signed); check_added_monitors!(nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let carol_revocation = match events[0] { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + assert_eq!(*node_id, node_b_id); (*msg).clone() }, _ => panic!("Unexpected event"), }; - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &carol_revocation); + nodes[1].node.handle_revoke_and_ack(node_c_id, &carol_revocation); check_added_monitors!(nodes[1], 1); // If this test requires the force-closed channel to not be on-chain until after the fulfill, @@ -9432,7 +9837,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain if broadcast_alice { check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); } let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); if broadcast_alice { @@ -9485,13 +9890,17 @@ pub fn test_duplicate_temporary_channel_id_from_different_peers() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Create an first channel channel - nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap(); - let mut open_chan_msg_chan_1_0 = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); + nodes[1].node.create_channel(node_a_id, 100000, 10001, 42, None, None).unwrap(); + let mut open_chan_msg_chan_1_0 = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, node_a_id); // Create an second channel - nodes[2].node.create_channel(nodes[0].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap(); - let mut open_chan_msg_chan_2_0 = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); + nodes[2].node.create_channel(node_a_id, 100000, 10001, 43, None, None).unwrap(); + let mut open_chan_msg_chan_2_0 = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, node_a_id); // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0]. @@ -9499,26 +9908,26 @@ pub fn test_duplicate_temporary_channel_id_from_different_peers() { // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same // `temporary_channel_id` as they are from different peers. - nodes[0].node.handle_open_channel(nodes[1].node.get_our_node_id(), &open_chan_msg_chan_1_0); + nodes[0].node.handle_open_channel(node_b_id, &open_chan_msg_chan_1_0); { let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match &events[0] { MessageSendEvent::SendAcceptChannel { node_id, msg } => { - assert_eq!(node_id, &nodes[1].node.get_our_node_id()); + assert_eq!(node_id, &node_b_id); assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id); }, _ => panic!("Unexpected event"), } } - nodes[0].node.handle_open_channel(nodes[2].node.get_our_node_id(), &open_chan_msg_chan_2_0); + nodes[0].node.handle_open_channel(node_c_id, &open_chan_msg_chan_2_0); { let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match &events[0] { MessageSendEvent::SendAcceptChannel { node_id, msg } => { - assert_eq!(node_id, &nodes[2].node.get_our_node_id()); + assert_eq!(node_id, &node_c_id); assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id); }, _ => panic!("Unexpected event"), @@ -9542,11 +9951,14 @@ pub fn test_peer_funding_sidechannel() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let temp_chan_id_ab = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0); let temp_chan_id_ca = exchange_open_accept_chan(&nodes[1], &nodes[0], 1_000_000, 0); let (_, tx, funding_output) = - create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + create_funding_transaction(&nodes[0], &node_b_id, 1_000_000, 42); let cs_funding_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(cs_funding_events.len(), 1); @@ -9555,21 +9967,21 @@ pub fn test_peer_funding_sidechannel() { _ => panic!("Unexpected event {:?}", cs_funding_events), } - nodes[1].node.funding_transaction_generated_unchecked(temp_chan_id_ca, nodes[0].node.get_our_node_id(), tx.clone(), funding_output.index).unwrap(); - let funding_created_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_funding_created(nodes[1].node.get_our_node_id(), &funding_created_msg); - get_event_msg!(nodes[0], MessageSendEvent::SendFundingSigned, nodes[1].node.get_our_node_id()); - expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[1].node.funding_transaction_generated_unchecked(temp_chan_id_ca, node_a_id, tx.clone(), funding_output.index).unwrap(); + let funding_created_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, node_a_id); + nodes[0].node.handle_funding_created(node_b_id, &funding_created_msg); + get_event_msg!(nodes[0], MessageSendEvent::SendFundingSigned, node_b_id); + expect_channel_pending_event(&nodes[0], &node_b_id); check_added_monitors!(nodes[0], 1); - let res = nodes[0].node.funding_transaction_generated(temp_chan_id_ab, nodes[1].node.get_our_node_id(), tx.clone()); + let res = nodes[0].node.funding_transaction_generated(temp_chan_id_ab, node_b_id, tx.clone()); let err_msg = format!("{:?}", res.unwrap_err()); assert!(err_msg.contains("An existing channel using ID")); assert!(err_msg.contains("is open with peer")); let channel_id = ChannelId::v1_from_funding_outpoint(funding_output); - let reason = ClosureReason::ProcessingError { err: format!("An existing channel using ID {} is open with peer {}", channel_id, nodes[1].node.get_our_node_id()), }; + let reason = ClosureReason::ProcessingError { err: format!("An existing channel using ID {} is open with peer {}", channel_id, node_b_id), }; check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(temp_chan_id_ab, true, reason)]); - get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()); + get_err_msg(&nodes[0], &node_b_id); } #[xtest(feature = "_externalize_tests")] @@ -9584,10 +9996,13 @@ pub fn test_duplicate_conflicting_funding_from_second_peer() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let temp_chan_id = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0); let (_, tx, funding_outpoint) = - create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + create_funding_transaction(&nodes[0], &node_b_id, 1_000_000, 42); let real_chan_id = ChannelId::v1_from_funding_outpoint(funding_outpoint); // Now that we have a funding outpoint, create a dummy `ChannelMonitor` and insert it into @@ -9596,19 +10011,19 @@ pub fn test_duplicate_conflicting_funding_from_second_peer() { let dummy_monitor = get_monitor!(nodes[2], dummy_chan_id).clone(); nodes[0].chain_monitor.chain_monitor.watch_channel(real_chan_id, dummy_monitor).unwrap(); - nodes[0].node.funding_transaction_generated(temp_chan_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); + nodes[0].node.funding_transaction_generated(temp_chan_id, node_b_id, tx.clone()).unwrap(); - let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); - let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); + nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); + let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); check_added_monitors!(nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed_msg); + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); // At this point, the channel should be closed, after having generated one monitor write (the // watch_channel call which failed), but zero monitor updates. check_added_monitors!(nodes[0], 1); - get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()); + get_err_msg(&nodes[0], &node_b_id); let err_reason = ClosureReason::ProcessingError { err: "Channel ID was a duplicate".to_owned() }; check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(temp_chan_id, true, err_reason)]); } @@ -9625,31 +10040,34 @@ pub fn test_duplicate_funding_err_in_funding() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]); let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.compute_txid(), index: 0 }; assert_eq!(ChannelId::v1_from_funding_outpoint(real_chan_funding_txo), real_channel_id); - nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); - let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + nodes[2].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); + let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, node_b_id); let node_c_temp_chan_id = open_chan_msg.common_fields.temporary_channel_id; open_chan_msg.common_fields.temporary_channel_id = real_channel_id; - nodes[1].node.handle_open_channel(nodes[2].node.get_our_node_id(), &open_chan_msg); - let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[2].node.get_our_node_id()); + nodes[1].node.handle_open_channel(node_c_id, &open_chan_msg); + let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_c_id); accept_chan_msg.common_fields.temporary_channel_id = node_c_temp_chan_id; - nodes[2].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_chan_msg); + nodes[2].node.handle_accept_channel(node_b_id, &accept_chan_msg); // Now that we have a second channel with the same funding txo, send a bogus funding message // and let nodes[1] remove the inbound channel. - let (_, funding_tx, _) = create_funding_transaction(&nodes[2], &nodes[1].node.get_our_node_id(), 100_000, 42); + let (_, funding_tx, _) = create_funding_transaction(&nodes[2], &node_b_id, 100_000, 42); - nodes[2].node.funding_transaction_generated(node_c_temp_chan_id, nodes[1].node.get_our_node_id(), funding_tx).unwrap(); + nodes[2].node.funding_transaction_generated(node_c_temp_chan_id, node_b_id, funding_tx).unwrap(); - let mut funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + let mut funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, node_b_id); funding_created_msg.temporary_channel_id = real_channel_id; // Make the signature invalid by changing the funding output funding_created_msg.funding_output_index += 10; - nodes[1].node.handle_funding_created(nodes[2].node.get_our_node_id(), &funding_created_msg); - get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id()); + nodes[1].node.handle_funding_created(node_c_id, &funding_created_msg); + get_err_msg(&nodes[1], &node_c_id); let err = "Invalid funding_created signature from peer".to_owned(); let reason = ClosureReason::ProcessingError { err }; let expected_closing = ExpectedCloseEvent::from_id_reason(real_channel_id, false, reason); @@ -9670,15 +10088,18 @@ pub fn test_duplicate_chan_id() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create an initial channel - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap(); - let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_chan_msg); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())); + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); + let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); + nodes[0].node.handle_accept_channel(node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id)); // Try to create a second channel with the same temporary_channel_id as the first and check // that it is rejected. - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_chan_msg); + nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -9689,33 +10110,33 @@ pub fn test_duplicate_chan_id() { // the same non-temporary channel_id. However, currently we do not, so we just // move forward with it. assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id); - assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert_eq!(node_id, node_a_id); }, _ => panic!("Unexpected event"), } } // Move the first channel through the funding flow... - let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); + let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &node_b_id, 100000, 42); - nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); + nodes[0].node.funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()).unwrap(); check_added_monitors!(nodes[0], 0); - let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); let channel_id = ChannelId::v1_from_funding_txid( funding_created_msg.funding_txid.as_byte_array(), funding_created_msg.funding_output_index ); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); + nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }; let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint); @@ -9727,7 +10148,7 @@ pub fn test_duplicate_chan_id() { // Technically this is allowed by the spec, but we don't support it and there's little reason // to. Still, it shouldn't cause any other issues. open_chan_msg.common_fields.temporary_channel_id = channel_id; - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_chan_msg); + nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -9736,22 +10157,22 @@ pub fn test_duplicate_chan_id() { // Technically, at this point, nodes[1] would be justified in thinking both // channels are closed, but currently we do not, so we just move forward with it. assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id); - assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert_eq!(node_id, node_a_id); }, _ => panic!("Unexpected event"), } } // Now try to create a second channel which has a duplicate funding output. - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap(); - let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_chan_2_msg); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())); - create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); + let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_chan_2_msg); + nodes[0].node.handle_accept_channel(node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id)); + create_funding_transaction(&nodes[0], &node_b_id, 100000, 42); // Get and check the FundingGenerationReady event let funding_created = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let mut a_peer_state = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); // Once we call `get_funding_created` the channel has a duplicate channel_id as // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we // try to create another channel. Instead, we drop the channel entirely here (leaving the @@ -9765,7 +10186,7 @@ pub fn test_duplicate_chan_id() { }.unwrap() }; check_added_monitors!(nodes[0], 0); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created); + nodes[1].node.handle_funding_created(node_a_id, &funding_created); // At this point we'll look up if the channel_id is present and immediately fail the channel // without trying to persist the `ChannelMonitor`. check_added_monitors!(nodes[1], 0); @@ -9785,7 +10206,7 @@ pub fn test_duplicate_chan_id() { // Technically, at this point, nodes[1] would be justified in thinking both // channels are closed, but currently we do not, so we just move forward with it. assert_eq!(msg.channel_id, channel_id); - assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert_eq!(node_id, node_a_id); }, _ => panic!("Unexpected event"), } @@ -9793,14 +10214,14 @@ pub fn test_duplicate_chan_id() { // finally, finish creating the original channel and send a payment over it to make sure // everything is functional. - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed_msg); + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); { let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); assert_eq!(added_monitors.len(), 1); assert_eq!(added_monitors[0].0, channel_id); added_monitors.clear(); } - expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); + expect_channel_pending_event(&nodes[0], &node_b_id); let events_4 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_4.len(), 0); @@ -9826,6 +10247,8 @@ pub fn test_error_chans_closed() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some initial channels let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); @@ -9836,15 +10259,15 @@ pub fn test_error_chans_closed() { assert_eq!(nodes[2].node.list_usable_channels().len(), 1); // Closing a channel from a different peer has no effect - nodes[0].node.handle_error(nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() }); + nodes[0].node.handle_error(node_b_id, &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() }); assert_eq!(nodes[0].node.list_usable_channels().len(), 3); // Closing one channel doesn't impact others - nodes[0].node.handle_error(nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() }); + nodes[0].node.handle_error(node_b_id, &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() }); check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], false); check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }, - [nodes[1].node.get_our_node_id()], 100000); + [node_b_id], 100000); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); assert_eq!(nodes[0].node.list_usable_channels().len(), 2); assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2); @@ -9852,10 +10275,10 @@ pub fn test_error_chans_closed() { // A null channel ID should close all channels let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); - nodes[0].node.handle_error(nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: ChannelId::new_zero(), data: "ERR".to_owned() }); + nodes[0].node.handle_error(node_b_id, &msgs::ErrorMessage { channel_id: ChannelId::new_zero(), data: "ERR".to_owned() }); check_added_monitors!(nodes[0], 2); check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }, - [nodes[1].node.get_our_node_id(); 2], 100000); + [node_b_id; 2], 100000); let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); match events[0] { @@ -9875,7 +10298,7 @@ pub fn test_error_chans_closed() { assert_eq!(nodes[0].node.list_usable_channels().len(), 1); assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); assert_eq!(nodes[0].node.list_usable_channels().len(), 1); assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2); } @@ -9897,11 +10320,14 @@ pub fn test_invalid_funding_tx() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 10_000, 42, None, None).unwrap(); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + nodes[0].node.create_channel(node_b_id, 100_000, 10_000, 42, None, None).unwrap(); + nodes[1].node.handle_open_channel(node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id)); + nodes[0].node.handle_accept_channel(node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id)); - let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42); + let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); // Create a witness program which can be spent by a 4-empty-stack-elements witness and which is // 136 bytes long. This matches our "accepted HTLC preimage spend" matching, previously causing @@ -9914,14 +10340,14 @@ pub fn test_invalid_funding_tx() { output.script_pubkey = ScriptBuf::new_p2wsh(&wit_program_script.wscript_hash()); } - nodes[0].node.funding_transaction_generated_unchecked(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone(), 0).unwrap(); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id())); + nodes[0].node.funding_transaction_generated_unchecked(temporary_channel_id, node_b_id, tx.clone(), 0).unwrap(); + nodes[1].node.handle_funding_created(node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id)); check_added_monitors!(nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); + nodes[0].node.handle_funding_signed(node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id)); check_added_monitors!(nodes[0], 1); - expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); + expect_channel_pending_event(&nodes[0], &node_b_id); let events_1 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_1.len(), 0); @@ -9933,12 +10359,12 @@ pub fn test_invalid_funding_tx() { let expected_err = "funding tx had wrong script/value or output index"; confirm_transaction_at(&nodes[1], &tx, 1); check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() }, - [nodes[0].node.get_our_node_id()], 100000); + [node_a_id], 100000); check_added_monitors!(nodes[1], 1); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); if let msgs::ErrorAction::DisconnectPeer { msg } = action { assert_eq!(msg.as_ref().unwrap().data, "Channel closed because of an exception: ".to_owned() + expected_err); } else { panic!(); } @@ -9982,31 +10408,34 @@ pub fn test_coinbase_funding_tx() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap(); - let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); + let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel); - let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_open_channel(node_a_id, &open_channel); + let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); // Create the coinbase funding transaction. - let (temporary_channel_id, tx, _) = create_coinbase_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); + let (temporary_channel_id, tx, _) = create_coinbase_funding_transaction(&nodes[0], &node_b_id, 100000, 42); - nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); + nodes[0].node.funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()).unwrap(); check_added_monitors!(nodes[0], 0); - let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created); + nodes[1].node.handle_funding_created(node_a_id, &funding_created); check_added_monitors!(nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed); + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed); check_added_monitors!(nodes[0], 1); - expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); + expect_channel_pending_event(&nodes[0], &node_b_id); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); // Starting at height 0, we "confirm" the coinbase at height 1. @@ -10018,13 +10447,13 @@ pub fn test_coinbase_funding_tx() { // Now connect one more block which results in 100 confirmations of the coinbase transaction. connect_blocks(&nodes[0], 1); // There should now be a `channel_ready` which can be handled. - let _ = &nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &get_event_msg!(&nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id())); + let _ = &nodes[1].node.handle_channel_ready(node_a_id, &get_event_msg!(&nodes[0], MessageSendEvent::SendChannelReady, node_b_id)); confirm_transaction_at(&nodes[1], &tx, 1); connect_blocks(&nodes[1], COINBASE_MATURITY - 2); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); connect_blocks(&nodes[1], 1); - expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_ready_event(&nodes[1], &node_a_id); create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]); } @@ -10049,19 +10478,24 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks; + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); let node_c_id = nodes[2].node.get_our_node_id(); + *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks; + + let node_c_id = node_c_id; + create_announced_chan_between_nodes(&nodes, 0, 1); let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2); let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); - nodes[1].node.peer_disconnected(nodes[2].node.get_our_node_id()); - nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(node_c_id); + nodes[2].node.peer_disconnected(node_b_id); let error_message = "Channel force-closed"; - nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &node_c_id, error_message.to_string()).unwrap(); check_closed_broadcast!(nodes[1], true); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[2].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [node_c_id], 100000); check_added_monitors!(nodes[1], 1); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 1); @@ -10093,16 +10527,16 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // additional block built on top of the current chain. nodes[1].chain_monitor.chain_monitor.transactions_confirmed( &nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: channel_id }]); check_added_monitors!(nodes[1], 1); - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true); @@ -10132,9 +10566,12 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); - let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); let route = get_route!(nodes[0], payment_params, 10_000).unwrap(); @@ -10147,7 +10584,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); } expect_pending_htlcs_forwardable!(nodes[1]); @@ -10161,7 +10598,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); // At this point, nodes[1] would notice it has too much value for the payment. It will // assume the second is a privacy attack (no longer particularly relevant @@ -10184,11 +10621,11 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); - let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let fail_updates_1 = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_updates_1.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_updates_1.update_fail_htlcs[1]); commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false); let failure_events = nodes[0].node.get_and_clear_pending_events(); @@ -10203,8 +10640,8 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); - let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); + let fail_updates_1 = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_updates_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false); expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new()); @@ -10238,18 +10675,23 @@ pub fn test_inconsistent_mpp_params() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0); let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0); - let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id(), TEST_FINAL_CLTV) + let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap(); let mut route = get_route!(nodes[0], payment_params, 15_000_000).unwrap(); assert_eq!(route.paths.len(), 2); route.paths.sort_by(|path_a, _| { // Sort the path so that the path through nodes[1] comes first - if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() { + if path_a.hops[0].pubkey == node_b_id { core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } }); @@ -10287,7 +10729,7 @@ pub fn test_inconsistent_mpp_params() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[2].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[2]); @@ -10297,7 +10739,7 @@ pub fn test_inconsistent_mpp_params() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[3].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[3].node.handle_update_add_htlc(node_c_id, &payment_event.msgs[0]); check_added_monitors!(nodes[3], 0); commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true); @@ -10312,15 +10754,15 @@ pub fn test_inconsistent_mpp_params() { check_added_monitors!(nodes[3], 1); - let fail_updates_1 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_fail_htlc(nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); + let fail_updates_1 = get_htlc_update_msgs!(nodes[3], node_c_id); + nodes[2].node.handle_update_fail_htlc(node_d_id, &fail_updates_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 }]); check_added_monitors!(nodes[2], 1); - let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]); + let fail_updates_2 = get_htlc_update_msgs!(nodes[2], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_c_id, &fail_updates_2.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false); expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain()); @@ -10351,6 +10793,8 @@ pub fn test_double_partial_claim() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0); @@ -10360,7 +10804,7 @@ pub fn test_double_partial_claim() { assert_eq!(route.paths.len(), 2); route.paths.sort_by(|path_a, _| { // Sort the path so that the path through nodes[1] comes first - if path_a.hops[0].pubkey == nodes[1].node.get_our_node_id() { + if path_a.hops[0].pubkey == node_b_id { core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } }); @@ -10387,7 +10831,7 @@ pub fn test_double_partial_claim() { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); - let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); + let node_1_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, false, None); // At this point nodes[3] has received one half of the payment, and the user goes to handle @@ -10454,20 +10898,23 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap(); - let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + nodes[0].node.create_channel(node_b_id, 1_000_000, 500_000_000, 42, None, None).unwrap(); + let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); open_channel.common_fields.max_htlc_value_in_flight_msat = 50_000_000; open_channel.common_fields.max_accepted_htlcs = 60; if on_holder_tx { open_channel.common_fields.dust_limit_satoshis = 546; } - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel); - let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel); + nodes[1].node.handle_open_channel(node_a_id, &open_channel); + let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); - let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &node_b_id, 1_000_000, 42); if on_holder_tx { let mut node_0_per_peer_lock; @@ -10480,14 +10927,14 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e } } - nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id())); + nodes[0].node.funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()).unwrap(); + nodes[1].node.handle_funding_created(node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id)); check_added_monitors!(nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); + nodes[0].node.handle_funding_signed(node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id)); check_added_monitors!(nodes[0], 1); - expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); + expect_channel_pending_event(&nodes[0], &node_b_id); let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready); @@ -10504,7 +10951,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let (dust_buffer_feerate, max_dust_htlc_exposure_msat) = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); let chan = chan_lock.channel_by_id.get(&channel_id).unwrap(); (chan.context().get_dust_buffer_feerate(None) as u64, chan.context().get_max_dust_htlc_exposure_msat(253)) @@ -10580,7 +11027,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.remove(0)); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[0].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[0], nodes[1], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[0]); expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); @@ -10681,6 +11128,10 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config)]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Leave enough on the funder side to let it pay the mining fees for a commit tx with tons of htlcs let chan_id_1 = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1_000_000, 750_000_000).2; @@ -10741,7 +11192,7 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.remove(0)); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[0].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[0], nodes[1], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[0]); expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); @@ -10751,13 +11202,13 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { check_added_monitors!(nodes[0], 1); // Clear the failed htlc - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[0], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); - nodes[1].node.handle_update_fail_htlc(nodes[0].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[1].node.handle_update_fail_htlc(node_a_id, &updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false); expect_payment_failed!(nodes[1], payment_hash, false); @@ -10789,19 +11240,19 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { check_added_monitors(&nodes[2], 1); let send = SendEvent::from_node(&nodes[2]); - nodes[0].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &send.msgs[0]); + nodes[0].node.handle_update_add_htlc(node_c_id, &send.msgs[0]); commitment_signed_dance!(nodes[0], nodes[2], send.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[0]); check_added_monitors(&nodes[0], 1); - let node_id_1 = nodes[1].node.get_our_node_id(); + let node_id_1 = node_b_id; expect_htlc_handling_failed_destinations!( nodes[0].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Forward { node_id: Some(node_id_1), channel_id: chan_id_1 }] ); - let fail = get_htlc_update_msgs(&nodes[0], &nodes[2].node.get_our_node_id()); - nodes[2].node.handle_update_fail_htlc(nodes[0].node.get_our_node_id(), &fail.update_fail_htlcs[0]); + let fail = get_htlc_update_msgs(&nodes[0], &node_c_id); + nodes[2].node.handle_update_fail_htlc(node_a_id, &fail.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[0], fail.commitment_signed, false); expect_payment_failed_conditions(&nodes[2], payment_hash, false, PaymentFailedConditions::new()); } @@ -10867,11 +11318,14 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(default_config), Some(fixed_limit_config)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan_id = create_chan_between_nodes_with_value(&nodes[0], &nodes[1], 100_000, 50_000_000).3; let node_1_dust_buffer_feerate = { let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); + let chan_lock = per_peer_state.get(&node_a_id).unwrap().lock().unwrap(); let chan = chan_lock.channel_by_id.get(&chan_id).unwrap(); chan.context().get_dust_buffer_feerate(None) as u64 }; @@ -10900,7 +11354,7 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); @@ -10910,13 +11364,13 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) check_added_monitors!(nodes[1], 1); // Clear the failed htlc - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); expect_payment_failed!(nodes[0], payment_hash, false); @@ -10929,7 +11383,7 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) assert_eq!(nodes[1].node.list_channels()[0].pending_inbound_htlcs.len(), DUST_HTLC_COUNT); // Set node 1's max dust htlc exposure equal to the `expected_dust_exposure_msat` - nodes[1].node.update_partial_channel_config(&nodes[0].node.get_our_node_id(), &[chan_id], &ChannelConfigUpdate { + nodes[1].node.update_partial_channel_config(&node_a_id, &[chan_id], &ChannelConfigUpdate { max_dust_htlc_exposure_msat: Some(MaxDustHTLCExposure::FixedLimitMsat(expected_dust_exposure_msat)), ..ChannelConfigUpdate::default() }).unwrap(); @@ -10955,7 +11409,7 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) } // Set node 1's max dust htlc exposure to 1msat below `expected_dust_exposure_msat` - nodes[1].node.update_partial_channel_config(&nodes[0].node.get_our_node_id(), &[chan_id], &ChannelConfigUpdate { + nodes[1].node.update_partial_channel_config(&node_a_id, &[chan_id], &ChannelConfigUpdate { max_dust_htlc_exposure_msat: Some(MaxDustHTLCExposure::FixedLimitMsat(expected_dust_exposure_msat - 1)), ..ChannelConfigUpdate::default() }).unwrap(); @@ -10982,7 +11436,7 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) assert_eq!(nodes[1].node.list_channels()[0].pending_inbound_htlcs.len(), DUST_HTLC_COUNT); // Set node 1's max dust htlc exposure equal to `expected_dust_exposure_msat` - nodes[1].node.update_partial_channel_config(&nodes[0].node.get_our_node_id(), &[chan_id], &ChannelConfigUpdate { + nodes[1].node.update_partial_channel_config(&node_a_id, &[chan_id], &ChannelConfigUpdate { max_dust_htlc_exposure_msat: Some(MaxDustHTLCExposure::FixedLimitMsat(expected_dust_exposure_msat)), ..ChannelConfigUpdate::default() }).unwrap(); @@ -11012,11 +11466,14 @@ pub fn test_non_final_funding_tx() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); - let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_message); - let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel_message); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let temp_channel_id = nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); + let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); let best_height = nodes[0].node.best_block.read().unwrap().height; @@ -11034,7 +11491,7 @@ pub fn test_non_final_funding_tx() { _ => panic!("Unexpected event"), }; // Transaction should fail as it's evaluated as non-final for propagation. - match nodes[0].node.funding_transaction_generated(temp_channel_id, nodes[1].node.get_our_node_id(), tx.clone()) { + match nodes[0].node.funding_transaction_generated(temp_channel_id, node_b_id, tx.clone()) { Err(APIError::APIMisuseError { err }) => { assert_eq!(format!("Funding transaction absolute timelock is non-final"), err); }, @@ -11042,7 +11499,7 @@ pub fn test_non_final_funding_tx() { } let err = "Error in transaction funding: Misuse error: Funding transaction absolute timelock is non-final".to_owned(); check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(temp_channel_id, false, ClosureReason::ProcessingError { err })]); - assert_eq!(get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()).data, "Failed to fund channel"); + assert_eq!(get_err_msg(&nodes[0], &node_b_id).data, "Failed to fund channel"); } #[xtest(feature = "_externalize_tests")] @@ -11052,11 +11509,14 @@ pub fn test_non_final_funding_tx_within_headroom() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); - let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_message); - let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel_message); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let temp_channel_id = nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); + let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); let best_height = nodes[0].node.best_block.read().unwrap().height; @@ -11075,8 +11535,8 @@ pub fn test_non_final_funding_tx_within_headroom() { }; // Transaction should be accepted if it's in a +1 headroom from best block. - assert!(nodes[0].node.funding_transaction_generated(temp_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).is_ok()); - get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + assert!(nodes[0].node.funding_transaction_generated(temp_channel_id, node_b_id, tx.clone()).is_ok()); + get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); } #[xtest(feature = "_externalize_tests")] @@ -11089,6 +11549,8 @@ pub fn accept_busted_but_better_fee() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + create_chan_between_nodes(&nodes[0], &nodes[1]); // Set nodes[1] to expect 5,000 sat/kW. @@ -11109,7 +11571,7 @@ pub fn accept_busted_but_better_fee() { assert_eq!(events.len(), 1); match events[0] { MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap()); + nodes[1].node.handle_update_fee(node_a_id, update_fee.as_ref().unwrap()); commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false); }, _ => panic!("Unexpected event"), @@ -11128,7 +11590,7 @@ pub fn accept_busted_but_better_fee() { assert_eq!(events.len(), 1); match events[0] { MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap()); + nodes[1].node.handle_update_fee(node_a_id, update_fee.as_ref().unwrap()); commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false); }, _ => panic!("Unexpected event"), @@ -11147,10 +11609,10 @@ pub fn accept_busted_but_better_fee() { assert_eq!(events.len(), 1); match events[0] { MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => { - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap()); + nodes[1].node.handle_update_fee(node_a_id, update_fee.as_ref().unwrap()); check_closed_event!(nodes[1], 1, ClosureReason::PeerFeerateTooLow { peer_feerate_sat_per_kw: 1000, required_feerate_sat_per_kw: 5000, - }, [nodes[0].node.get_our_node_id()], 100000); + }, [node_a_id], 100000); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); }, @@ -11163,6 +11625,10 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let min_final_cltv_expiry_delta = 120; let final_cltv_expiry_delta = if valid_delta { min_final_cltv_expiry_delta + 2 } else { min_final_cltv_expiry_delta - 2 }; @@ -11170,7 +11636,7 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash create_chan_between_nodes(&nodes[0], &nodes[1]); - let payment_parameters = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), final_cltv_expiry_delta as u32); + let payment_parameters = PaymentParameters::from_node_id(node_b_id, final_cltv_expiry_delta as u32); let (payment_hash, payment_preimage, payment_secret) = if use_user_hash { let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1], Some(recv_value), Some(min_final_cltv_expiry_delta)); @@ -11186,13 +11652,13 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); if valid_delta { expect_payment_claimable!(nodes[1], payment_hash, payment_secret, recv_value, if use_user_hash { - None } else { Some(payment_preimage) }, nodes[1].node.get_our_node_id()); + None } else { Some(payment_preimage) }, node_b_id); claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); } else { @@ -11200,8 +11666,8 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash check_added_monitors!(nodes[1], 1); - let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &fail_updates.update_fail_htlcs[0]); + let fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], fail_updates.commitment_signed, false, true); expect_payment_failed!(nodes[0], payment_hash, true); @@ -11225,6 +11691,9 @@ pub fn test_disconnects_peer_awaiting_response_ticks() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Asserts a disconnect event is queued to the user. let check_disconnect_event = |node: &Node, should_disconnect: bool| { let disconnect_event = node.node.get_and_clear_pending_msg_events().iter().find_map(|event| @@ -11268,58 +11737,58 @@ pub fn test_disconnects_peer_awaiting_response_ticks() { *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2; nodes[0].node.timer_tick_occurred(); check_added_monitors!(&nodes[0], 1); - let alice_fee_update = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), alice_fee_update.update_fee.as_ref().unwrap()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &alice_fee_update.commitment_signed); + let alice_fee_update = get_htlc_update_msgs(&nodes[0], &node_b_id); + nodes[1].node.handle_update_fee(node_a_id, alice_fee_update.update_fee.as_ref().unwrap()); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &alice_fee_update.commitment_signed); check_added_monitors!(&nodes[1], 1); // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`. - let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bob_revoke_and_ack); + let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bob_revoke_and_ack); check_added_monitors!(&nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bob_commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bob_commitment_signed); check_added_monitors(&nodes[0], 1); // Alice then needs to send her final `RevokeAndACK` to complete the commitment dance. We // pretend Bob hasn't received the message and check whether he'll disconnect Alice after // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`. - let alice_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let alice_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); check_disconnect(&nodes[1]); // Now, we'll reconnect them to test awaiting a `ChannelReestablish` message. // // Note that since the commitment dance didn't complete above, Alice is expected to resend her // final `RevokeAndACK` to Bob to complete it. - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); let bob_init = msgs::Init { features: nodes[1].node.init_features(), networks: None, remote_network_address: None }; - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &bob_init, true).unwrap(); + nodes[0].node.peer_connected(node_b_id, &bob_init, true).unwrap(); let alice_init = msgs::Init { features: nodes[0].node.init_features(), networks: None, remote_network_address: None }; - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &alice_init, true).unwrap(); + nodes[1].node.peer_connected(node_a_id, &alice_init, true).unwrap(); // Upon reconnection, Alice sends her `ChannelReestablish` to Bob. Alice, however, hasn't // received Bob's yet, so she should disconnect him after reaching // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`. let alice_channel_reestablish = get_event_msg!( - nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id() + nodes[0], MessageSendEvent::SendChannelReestablish, node_b_id ); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &alice_channel_reestablish); + nodes[1].node.handle_channel_reestablish(node_a_id, &alice_channel_reestablish); check_disconnect(&nodes[0]); // Bob now sends his `ChannelReestablish` to Alice to resume the channel and consider it "live". let bob_channel_reestablish = nodes[1].node.get_and_clear_pending_msg_events().iter().find_map(|event| if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); Some(msg.clone()) } else { None } ).unwrap(); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bob_channel_reestablish); + nodes[0].node.handle_channel_reestablish(node_b_id, &bob_channel_reestablish); // Sanity check that Alice won't disconnect Bob since she's no longer waiting for any messages. for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS { @@ -11332,7 +11801,7 @@ pub fn test_disconnects_peer_awaiting_response_ticks() { check_disconnect(&nodes[1]); // Finally, have Bob process the last message. - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &alice_revoke_and_ack); + nodes[1].node.handle_revoke_and_ack(node_a_id, &alice_revoke_and_ack); check_added_monitors(&nodes[1], 1); // At this point, neither node should attempt to disconnect each other, since they aren't @@ -11352,11 +11821,14 @@ pub fn test_remove_expired_outbound_unfunded_channels() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); - let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_message); - let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel_message); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let temp_channel_id = nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); + let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -11368,7 +11840,7 @@ pub fn test_remove_expired_outbound_unfunded_channels() { // Asserts the outbound channel has been removed from a nodes[0]'s peer state map. let check_outbound_channel_existence = |should_exist: bool| { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); + let chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist); }; @@ -11393,7 +11865,7 @@ pub fn test_remove_expired_outbound_unfunded_channels() { }, _ => panic!("Unexpected event"), } - check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, false, &[nodes[1].node.get_our_node_id()], 100000); + check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, false, &[node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -11403,11 +11875,14 @@ pub fn test_remove_expired_inbound_unfunded_channels() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); - let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_message); - let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel_message); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let temp_channel_id = nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); + let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); @@ -11419,7 +11894,7 @@ pub fn test_remove_expired_inbound_unfunded_channels() { // Asserts the inbound channel has been removed from a nodes[1]'s peer state map. let check_inbound_channel_existence = |should_exist: bool| { let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); + let chan_lock = per_peer_state.get(&node_a_id).unwrap().lock().unwrap(); assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist); }; @@ -11444,7 +11919,7 @@ pub fn test_remove_expired_inbound_unfunded_channels() { }, _ => panic!("Unexpected event"), } - check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, false, &[nodes[0].node.get_our_node_id()], 100000); + check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, false, &[node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -11455,15 +11930,18 @@ pub fn test_channel_close_when_not_timely_accepted() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Simulate peer-disconnects mid-handshake // The channel is initiated from the node 0 side, // but the nodes disconnect before node 1 could send accept channel - let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap(); - let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let create_chan_id = nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); // Make sure that we have not removed the OutboundV1Channel from node[0] immediately. assert_eq!(nodes[0].node.list_channels().len(), 1); @@ -11478,7 +11956,7 @@ pub fn test_channel_close_when_not_timely_accepted() { // Since we disconnected from peer and did not connect back within time, // we should have forced-closed the channel by now. - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, [node_b_id], 100000); assert_eq!(nodes[0].node.list_channels().len(), 0); { @@ -11498,15 +11976,18 @@ pub fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Simulate peer-disconnects mid-handshake // The channel is initiated from the node 0 side, // but the nodes disconnect before node 1 could send accept channel - let create_chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None, None).unwrap(); - let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let create_chan_id = nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); // Make sure that we have not removed the OutboundV1Channel from node[0] immediately. assert_eq!(nodes[0].node.list_channels().len(), 1); @@ -11515,10 +11996,10 @@ pub fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() { assert_eq!(nodes[1].node.list_channels().len(), 0); // The peers now reconnect - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { + nodes[0].node.peer_connected(node_b_id, &msgs::Init { features: nodes[1].node.init_features(), networks: None, remote_network_address: None }, true).unwrap(); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { + nodes[1].node.peer_connected(node_a_id, &msgs::Init { features: nodes[0].node.init_features(), networks: None, remote_network_address: None }, false).unwrap(); @@ -11546,6 +12027,9 @@ fn do_test_multi_post_event_actions(do_reload: bool) { let nodes_0_deserialized; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; let chan_id_2 = create_announced_chan_between_nodes(&nodes, 0, 2).2; @@ -11564,7 +12048,7 @@ fn do_test_multi_post_event_actions(do_reload: bool) { expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000); for dest in &[1, 2] { - let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[*dest], nodes[0].node.get_our_node_id()); + let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[*dest], node_a_id); nodes[0].node.handle_update_fulfill_htlc(nodes[*dest].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill_updates.commitment_signed, false); check_added_monitors(&nodes[0], 0); @@ -11578,8 +12062,8 @@ fn do_test_multi_post_event_actions(do_reload: bool) { check_added_monitors(&nodes[1], 1); let send_event = SendEvent::from_node(&nodes[1]); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &send_event.msgs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &send_event.commitment_msg); + nodes[0].node.handle_update_add_htlc(node_b_id, &send_event.msgs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &send_event.commitment_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); if do_reload { @@ -11588,8 +12072,8 @@ fn do_test_multi_post_event_actions(do_reload: bool) { let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_2).encode(); reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, chain_monitor, nodes_0_deserialized); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - nodes[2].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[1].node.peer_disconnected(node_a_id); + nodes[2].node.peer_disconnected(node_a_id); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[2])); @@ -11608,7 +12092,7 @@ fn do_test_multi_post_event_actions(do_reload: bool) { // After the events are processed, the ChannelMonitorUpdates will be released and, upon their // completion, we'll respond to nodes[1] with an RAA + CS. - get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + get_revoke_commit_msgs(&nodes[0], &node_b_id); check_added_monitors(&nodes[0], 3); } @@ -11625,6 +12109,10 @@ pub fn test_batch_channel_open() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Initiate channel opening and create the batch channel funding transaction. let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[ (&nodes[1], 100_000, 0, 42, None), @@ -11632,25 +12120,25 @@ pub fn test_batch_channel_open() { ]); // Go through the funding_created and funding_signed flow with node 1. - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msgs[0]); + nodes[1].node.handle_funding_created(node_a_id, &funding_created_msgs[0]); check_added_monitors(&nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed_msg); + let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); check_added_monitors(&nodes[0], 1); // The transaction should not have been broadcast before all channels are ready. assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0); // Go through the funding_created and funding_signed flow with node 2. - nodes[2].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msgs[1]); + nodes[2].node.handle_funding_created(node_a_id, &funding_created_msgs[1]); check_added_monitors(&nodes[2], 1); - expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[2], &node_a_id); - let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, node_a_id); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.handle_funding_signed(nodes[2].node.get_our_node_id(), &funding_signed_msg); + nodes[0].node.handle_funding_signed(node_c_id, &funding_signed_msg); check_added_monitors(&nodes[0], 1); // The transaction should not have been broadcast before persisting all monitors has been @@ -11675,14 +12163,14 @@ pub fn test_batch_channel_open() { crate::events::Event::ChannelPending { ref counterparty_node_id, .. - } if counterparty_node_id == &nodes[1].node.get_our_node_id(), + } if counterparty_node_id == &node_b_id, ))); assert!(events.iter().any(|e| matches!( *e, crate::events::Event::ChannelPending { ref counterparty_node_id, .. - } if counterparty_node_id == &nodes[2].node.get_our_node_id(), + } if counterparty_node_id == &node_c_id, ))); } @@ -11695,6 +12183,9 @@ pub fn test_close_in_funding_batch() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Initiate channel opening and create the batch channel funding transaction. let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[ (&nodes[1], 100_000, 0, 42, None), @@ -11702,12 +12193,12 @@ pub fn test_close_in_funding_batch() { ]); // Go through the funding_created and funding_signed flow with node 1. - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msgs[0]); + nodes[1].node.handle_funding_created(node_a_id, &funding_created_msgs[0]); check_added_monitors(&nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed_msg); + let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); check_added_monitors(&nodes[0], 1); // The transaction should not have been broadcast before all channels are ready. @@ -11719,7 +12210,7 @@ pub fn test_close_in_funding_batch() { let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1); let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2); let error_message = "Channel force-closed"; - nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &node_b_id, error_message.to_string()).unwrap(); // The monitor should become closed. check_added_monitors(&nodes[0], 1); @@ -11775,6 +12266,10 @@ pub fn test_batch_funding_close_after_funding_signed() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // Initiate channel opening and create the batch channel funding transaction. let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[ (&nodes[1], 100_000, 0, 42, None), @@ -11782,22 +12277,22 @@ pub fn test_batch_funding_close_after_funding_signed() { ]); // Go through the funding_created and funding_signed flow with node 1. - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msgs[0]); + nodes[1].node.handle_funding_created(node_a_id, &funding_created_msgs[0]); check_added_monitors(&nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed_msg); + let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); check_added_monitors(&nodes[0], 1); // Go through the funding_created and funding_signed flow with node 2. - nodes[2].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msgs[1]); + nodes[2].node.handle_funding_created(node_a_id, &funding_created_msgs[1]); check_added_monitors(&nodes[2], 1); - expect_channel_pending_event(&nodes[2], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[2], &node_a_id); - let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, node_a_id); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.handle_funding_signed(nodes[2].node.get_our_node_id(), &funding_signed_msg); + nodes[0].node.handle_funding_signed(node_c_id, &funding_signed_msg); check_added_monitors(&nodes[0], 1); // The transaction should not have been broadcast before all channels are ready. @@ -11809,7 +12304,7 @@ pub fn test_batch_funding_close_after_funding_signed() { let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1); let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2); let error_message = "Channel force-closed"; - nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &node_b_id, error_message.to_string()).unwrap(); check_added_monitors(&nodes[0], 2); { let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap(); @@ -11931,21 +12426,24 @@ pub fn test_accept_inbound_channel_errors_queued() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config0), Some(config1)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); - let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg); + nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); + let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + + nodes[1].node.handle_open_channel(node_a_id, &open_channel_msg); let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { - match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23, None) { + match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &node_a_id, 23, None) { Err(APIError::ChannelUnavailable { err: _ }) => (), _ => panic!(), } } _ => panic!("Unexpected event"), } - assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id, + assert_eq!(get_err_msg(&nodes[1], &node_a_id).channel_id, open_channel_msg.common_fields.temporary_channel_id); } @@ -11958,25 +12456,28 @@ pub fn test_manual_funding_abandon() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(cfg.clone()), Some(cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).is_ok()); - let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + assert!(nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).is_ok()); + let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel); - let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_open_channel(node_a_id, &open_channel); + let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel); - let (temporary_channel_id, _tx, funding_outpoint) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42); - nodes[0].node.unsafe_manual_funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), funding_outpoint).unwrap(); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); + let (temporary_channel_id, _tx, funding_outpoint) = create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); + nodes[0].node.unsafe_manual_funding_transaction_generated(temporary_channel_id, node_b_id, funding_outpoint).unwrap(); check_added_monitors!(nodes[0], 0); - let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created); + let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); + nodes[1].node.handle_funding_created(node_a_id, &funding_created); check_added_monitors!(nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); let err = msgs::ErrorMessage { channel_id: funding_signed.channel_id, data: "".to_string() }; - nodes[0].node.handle_error(nodes[1].node.get_our_node_id(), &err); + nodes[0].node.handle_error(node_b_id, &err); let close_events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(close_events.len(), 2); @@ -12000,24 +12501,27 @@ pub fn test_funding_signed_event() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(cfg.clone()), Some(cfg)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).is_ok()); - let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + assert!(nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).is_ok()); + let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel); - let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_open_channel(node_a_id, &open_channel); + let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel); - let (temporary_channel_id, tx, funding_outpoint) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42); - nodes[0].node.unsafe_manual_funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), funding_outpoint).unwrap(); + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); + let (temporary_channel_id, tx, funding_outpoint) = create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); + nodes[0].node.unsafe_manual_funding_transaction_generated(temporary_channel_id, node_b_id, funding_outpoint).unwrap(); check_added_monitors!(nodes[0], 0); - let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created); + let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); + nodes[1].node.handle_funding_created(node_a_id, &funding_created); check_added_monitors!(nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed); + let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed); check_added_monitors!(nodes[0], 1); let events = &nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); @@ -12030,7 +12534,7 @@ pub fn test_funding_signed_event() { }; match &events[1] { crate::events::Event::ChannelPending { counterparty_node_id, .. } => { - assert_eq!(*&nodes[1].node.get_our_node_id(), *counterparty_node_id); + assert_eq!(*&node_b_id, *counterparty_node_id); }, _ => panic!("Unexpected event"), }; @@ -12038,13 +12542,13 @@ pub fn test_funding_signed_event() { mine_transaction(&nodes[0], &tx); mine_transaction(&nodes[1], &tx); - let as_channel_ready = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id()); - nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &as_channel_ready); - let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()); - nodes[0].node.handle_channel_ready(nodes[1].node.get_our_node_id(), &as_channel_ready); + let as_channel_ready = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, node_a_id); + nodes[1].node.handle_channel_ready(node_a_id, &as_channel_ready); + let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, node_b_id); + nodes[0].node.handle_channel_ready(node_b_id, &as_channel_ready); - expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id()); - expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_ready_event(&nodes[0], &node_b_id); + expect_channel_ready_event(&nodes[1], &node_a_id); nodes[0].node.get_and_clear_pending_msg_events(); nodes[1].node.get_and_clear_pending_msg_events(); } From 5ad41e5ffaf1e20935a1986c055e8cc7cde9ca44 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 1 May 2025 23:22:19 +0000 Subject: [PATCH 08/25] f drop unecessary diff --- lightning/src/ln/functional_tests.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index ee8462ca10c..039a5ab18f1 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -10479,14 +10479,12 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks; + let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); let node_c_id = nodes[2].node.get_our_node_id(); - *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks; - - let node_c_id = node_c_id; - create_announced_chan_between_nodes(&nodes, 0, 1); let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2); let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); From 93d2a08782ebcba0ebe8a4ff41c4f28c7df561a1 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 27 Apr 2025 18:22:37 +0000 Subject: [PATCH 09/25] Move channel reserve tests out of `functional_tests.rs` into a new file `functional_tests.rs` has gotten incredibly huge over the years, so here we move some channel reserve tests into a new file. --- lightning/src/ln/functional_test_utils.rs | 6 + lightning/src/ln/functional_tests.rs | 499 +------------------ lightning/src/ln/htlc_reserve_unit_tests.rs | 508 ++++++++++++++++++++ lightning/src/ln/mod.rs | 3 + 4 files changed, 518 insertions(+), 498 deletions(-) create mode 100644 lightning/src/ln/htlc_reserve_unit_tests.rs diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index c00e5a5bf6f..8a9a5cb1762 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -16,7 +16,9 @@ use crate::chain::transaction::OutPoint; use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCHandlingFailureType, PaidBolt12Invoice, PathFailure, PaymentFailureReason, PaymentPurpose}; use crate::events::bump_transaction::{BumpTransactionEvent, BumpTransactionEventHandler, Wallet, WalletSource}; use crate::ln::types::ChannelId; +use crate::types::features::ChannelTypeFeatures; use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret}; +use crate::ln::chan_utils::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC}; use crate::ln::channelmanager::{AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA}; use crate::types::features::InitFeatures; use crate::ln::msgs; @@ -1104,6 +1106,10 @@ macro_rules! unwrap_send_err { } } +pub fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, channel_type_features: &ChannelTypeFeatures) -> u64 { + (commitment_tx_base_weight(channel_type_features) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000 +} + /// Check whether N channel monitor(s) have been added. pub fn check_added_monitors>(node: &H, count: usize) { if let Some(chain_monitor) = node.chain_monitor() { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 039a5ab18f1..d9d41efeb2d 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -23,7 +23,7 @@ use crate::events::bump_transaction::WalletSource; use crate::events::{Event, FundingInfo, PathFailure, PaymentPurpose, ClosureReason, HTLCHandlingFailureType, PaymentFailureReason}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentSecret, PaymentHash}; -use crate::ln::channel::{get_holder_selected_channel_reserve_satoshis, Channel, InboundV1Channel, OutboundV1Channel, COINBASE_MATURITY, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT}; +use crate::ln::channel::{get_holder_selected_channel_reserve_satoshis, Channel, InboundV1Channel, OutboundV1Channel, COINBASE_MATURITY, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, MIN_AFFORDABLE_HTLC_COUNT}; use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError, MIN_CHAN_DUST_LIMIT_SATOSHIS}; use crate::ln::{chan_utils, onion_utils}; @@ -189,78 +189,6 @@ pub fn test_funding_exceeds_no_wumbo_limit() { } } -fn do_test_counterparty_no_reserve(send_from_initiator: bool) { - // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure, - // but only for them. Because some LSPs do it with some level of trust of the clients (for a - // substantial UX improvement), we explicitly allow it. Because it's unlikely to happen often - // in normal testing, we test it explicitly here. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - let default_config = UserConfig::default(); - - // Have node0 initiate a channel to node1 with aforementioned parameters - let mut push_amt = 100_000_000; - let feerate_per_kw = 253; - let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); - push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000; - push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; - - let temp_channel_id = nodes[0].node.create_channel(node_b_id, 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None, None).unwrap(); - let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - if !send_from_initiator { - open_channel_message.channel_reserve_satoshis = 0; - open_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000; - } - nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); - - // Extract the channel accept message from node1 to node0 - let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); - if send_from_initiator { - accept_channel_message.channel_reserve_satoshis = 0; - accept_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000; - } - nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); - { - let sender_node = if send_from_initiator { &nodes[1] } else { &nodes[0] }; - let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] }; - let mut sender_node_per_peer_lock; - let mut sender_node_peer_state_lock; - - let channel = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); - assert!(channel.is_unfunded_v1()); - channel.funding_mut().holder_selected_channel_reserve_satoshis = 0; - channel.context_mut().holder_max_htlc_value_in_flight_msat = 100_000_000; - } - - let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id); - let funding_msgs = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx); - create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0); - - // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s - // security model if it ever tries to send funds back to nodes[0] (but that's not our problem). - if send_from_initiator { - send_payment(&nodes[0], &[&nodes[1]], 100_000_000 - // Note that for outbound channels we have to consider the commitment tx fee and the - // "fee spike buffer", which is currently a multiple of the total commitment tx fee as - // well as an additional HTLC. - - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, &channel_type_features)); - } else { - send_payment(&nodes[1], &[&nodes[0]], push_amt); - } -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_counterparty_no_reserve() { - do_test_counterparty_no_reserve(true); - do_test_counterparty_no_reserve(false); -} - #[xtest(feature = "_externalize_tests")] pub fn test_async_inbound_update_fee() { let chanmon_cfgs = create_chanmon_cfgs(2); @@ -1993,431 +1921,6 @@ pub fn test_inbound_outbound_capacity_is_not_zero() { assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000); } -fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64, channel_type_features: &ChannelTypeFeatures) -> u64 { - (commitment_tx_base_weight(channel_type_features) + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000 -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_channel_reserve_holding_cell_htlcs() { - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - // When this test was written, the default base fee floated based on the HTLC count. - // It is now fixed, so we simply set the fee to the expected value here. - let mut config = test_default_channel_config(); - config.channel_config.forwarding_fee_base_msat = 239; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]); - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - let node_c_id = nodes[2].node.get_our_node_id(); - - let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001); - let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001); - let chan_2_user_id = nodes[2].node.list_channels()[0].user_channel_id; - - let mut stat01 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); - let mut stat11 = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2); - - let mut stat12 = get_channel_value_stat!(nodes[1], nodes[2], chan_2.2); - let mut stat22 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2); - - macro_rules! expect_forward { - ($node: expr) => {{ - let mut events = $node.node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - check_added_monitors!($node, 1); - let payment_event = SendEvent::from_event(events.remove(0)); - payment_event - }} - } - - let feemsat = 239; // set above - let total_fee_msat = (nodes.len() - 2) as u64 * feemsat; - let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2); - let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_1.2); - - let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat; - - // attempt to send amt_msat > their_max_htlc_value_in_flight_msat - { - let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) - .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0); - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0); - route.paths[0].hops.last_mut().unwrap().fee_msat += 1; - assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat)); - - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - } - - // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete - // nodes[0]'s wealth - loop { - let amt_msat = recv_value_0 + total_fee_msat; - // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve. - // Also, ensure that each payment has enough to be over the dust limit to - // ensure it'll be included in each commit tx fee calculation. - let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features); - let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000); - if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat { - break; - } - - let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) - .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0); - let route = get_route!(nodes[0], payment_params, recv_value_0).unwrap(); - let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0); - claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); - - let (stat01_, stat11_, stat12_, stat22_) = ( - get_channel_value_stat!(nodes[0], nodes[1], chan_1.2), - get_channel_value_stat!(nodes[1], nodes[0], chan_1.2), - get_channel_value_stat!(nodes[1], nodes[2], chan_2.2), - get_channel_value_stat!(nodes[2], nodes[1], chan_2.2), - ); - - assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat); - assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat); - assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat)); - assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat)); - stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_; - } - - // adding pending output. - // 2* and +1 HTLCs on the commit tx fee for the fee spike reserve. - // The reason we're dividing by two here is as follows: the dividend is the total outbound liquidity - // after fees, the channel reserve, and the fee spike buffer are removed. We eventually want to - // divide this quantity into 3 portions, that will each be sent in an HTLC. This allows us - // to test channel channel reserve policy at the edges of what amount is sendable, i.e. - // cases where 1 msat over X amount will cause a payment failure, but anything less than - // that can be sent successfully. So, dividing by two is a somewhat arbitrary way of getting - // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments - // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee - // policy. - let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features); - let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2; - let amt_msat_1 = recv_value_1 + total_fee_msat; - - let (route_1, our_payment_hash_1, our_payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_1); - let payment_event_1 = { - nodes[0].node.send_payment_with_route(route_1.clone(), our_payment_hash_1, - RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - SendEvent::from_event(events.remove(0)) - }; - nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event_1.msgs[0]); - - // channel reserve test with htlc pending output > 0 - let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs; - { - let mut route = route_1.clone(); - route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1; - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]); - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - } - - // split the rest to test holding cell - let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features); - let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs; - let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2; - let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat; - { - let stat = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); - assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat + commit_tx_fee_3_htlcs), stat.channel_reserve_msat); - } - - // now see if they go through on both sides - let (route_21, our_payment_hash_21, our_payment_preimage_21, our_payment_secret_21) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_21); - // but this will stuck in the holding cell - nodes[0].node.send_payment_with_route(route_21, our_payment_hash_21, - RecipientOnionFields::secret_only(our_payment_secret_21), PaymentId(our_payment_hash_21.0)).unwrap(); - check_added_monitors!(nodes[0], 0); - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 0); - - // test with outbound holding cell amount > 0 - { - let (mut route, our_payment_hash, _, our_payment_secret) = - get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22); - route.paths[0].hops.last_mut().unwrap().fee_msat += 1; - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - } - - let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22); - // this will also stuck in the holding cell - nodes[0].node.send_payment_with_route(route_22, our_payment_hash_22, - RecipientOnionFields::secret_only(our_payment_secret_22), PaymentId(our_payment_hash_22.0)).unwrap(); - check_added_monitors!(nodes[0], 0); - assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - - // flush the pending htlc - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event_1.commitment_msg); - let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); - check_added_monitors!(nodes[1], 1); - - // the pending htlc should be promoted to committed - nodes[0].node.handle_revoke_and_ack(node_b_id, &as_revoke_and_ack); - check_added_monitors!(nodes[0], 1); - let commitment_update_2 = get_htlc_update_msgs!(nodes[0], node_b_id); - - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &as_commitment_signed); - let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); - - nodes[1].node.handle_revoke_and_ack(node_a_id, &bs_revoke_and_ack); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); - - expect_pending_htlcs_forwardable!(nodes[1]); - - let ref payment_event_11 = expect_forward!(nodes[1]); - nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event_11.msgs[0]); - commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false); - - expect_pending_htlcs_forwardable!(nodes[2]); - expect_payment_claimable!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1); - - // flush the htlcs in the holding cell - assert_eq!(commitment_update_2.update_add_htlcs.len(), 2); - nodes[1].node.handle_update_add_htlc(node_a_id, &commitment_update_2.update_add_htlcs[0]); - nodes[1].node.handle_update_add_htlc(node_a_id, &commitment_update_2.update_add_htlcs[1]); - commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false); - expect_pending_htlcs_forwardable!(nodes[1]); - - let ref payment_event_3 = expect_forward!(nodes[1]); - assert_eq!(payment_event_3.msgs.len(), 2); - nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event_3.msgs[0]); - nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event_3.msgs[1]); - - commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false); - expect_pending_htlcs_forwardable!(nodes[2]); - - let events = nodes[2].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 2); - match events[0] { - Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { - assert_eq!(our_payment_hash_21, *payment_hash); - assert_eq!(recv_value_21, amount_msat); - assert_eq!(node_c_id, receiver_node_id.unwrap()); - assert_eq!(*via_channel_ids, vec![(chan_2.2, Some(chan_2_user_id))]); - match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { - assert!(payment_preimage.is_none()); - assert_eq!(our_payment_secret_21, *payment_secret); - }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") - } - }, - _ => panic!("Unexpected event"), - } - match events[1] { - Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { - assert_eq!(our_payment_hash_22, *payment_hash); - assert_eq!(recv_value_22, amount_msat); - assert_eq!(node_c_id, receiver_node_id.unwrap()); - assert_eq!(*via_channel_ids, vec![(chan_2.2, Some(chan_2_user_id))]); - match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { - assert!(payment_preimage.is_none()); - assert_eq!(our_payment_secret_22, *payment_secret); - }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") - } - }, - _ => panic!("Unexpected event"), - } - - claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1); - claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21); - claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22); - - let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, &channel_type_features); - let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat; - send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3); - - let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); - let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat); - let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); - assert_eq!(stat0.value_to_self_msat, expected_value_to_self); - assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc); - - let stat2 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2); - assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3); -} - -#[xtest(feature = "_externalize_tests")] -pub fn channel_reserve_in_flight_removes() { - // In cases where one side claims an HTLC, it thinks it has additional available funds that it - // can send to its counterparty, but due to update ordering, the other side may not yet have - // considered those HTLCs fully removed. - // This tests that we don't count HTLCs which will not be included in the next remote - // commitment transaction towards the reserve value (as it implies no commitment transaction - // will be generated which violates the remote reserve value). - // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test. - // To test this we: - // * route two HTLCs from A to B (note that, at a high level, this test is checking that, when - // you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if - // you only consider the value of the first HTLC, it may not), - // * start routing a third HTLC from A to B, - // * claim the first two HTLCs (though B will generate an update_fulfill for one, and put - // the other claim in its holding cell, as it immediately goes into AwaitingRAA), - // * deliver the first fulfill from B - // * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell - // claim, - // * deliver A's response CS and RAA. - // This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having - // removed it fully. B now has the push_msat plus the first two HTLCs in value. - // * Now B happily sends another HTLC, potentially violating its reserve value from A's point - // of view (if A counts the AwaitingRemovedRemoteRevoke HTLC). - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - - let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2); - // Route the first two HTLCs. - let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000; - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1); - let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 20_000); - - // Start routing the third HTLC (this is just used to get everyone in the right state). - let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); - let send_1 = { - nodes[0].node.send_payment_with_route(route, payment_hash_3, - RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - SendEvent::from_event(events.remove(0)) - }; - - // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an - // initial fulfill/CS. - nodes[1].node.claim_funds(payment_preimage_1); - expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1); - check_added_monitors!(nodes[1], 1); - let bs_removes = get_htlc_update_msgs!(nodes[1], node_a_id); - - // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not - // remove the second HTLC when we send the HTLC back from B to A. - nodes[1].node.claim_funds(payment_preimage_2); - expect_payment_claimed!(nodes[1], payment_hash_2, 20_000); - check_added_monitors!(nodes[1], 1); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_removes.update_fulfill_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_removes.commitment_signed); - check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); - - nodes[1].node.handle_update_add_htlc(node_a_id, &send_1.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_1.commitment_msg); - check_added_monitors!(nodes[1], 1); - // B is already AwaitingRAA, so cant generate a CS here - let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - - nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); - let bs_cs = get_htlc_update_msgs!(nodes[1], node_a_id); - - nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); - let as_cs = get_htlc_update_msgs!(nodes[0], node_b_id); - - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_cs.commitment_signed); - check_added_monitors!(nodes[1], 1); - let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - - // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the - // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view. - // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A - // can no longer broadcast a commitment transaction with it and B has the preimage so can go - // on-chain as necessary). - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_cs.update_fulfill_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs.commitment_signed); - check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false); - - nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - expect_pending_htlcs_forwardable!(nodes[1]); - expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 100000); - - // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't - // resolve the second HTLC from A's point of view. - nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); - expect_payment_path_successful!(nodes[0]); - let as_cs = get_htlc_update_msgs!(nodes[0], node_b_id); - - // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back - // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing. - let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[1], nodes[0], 10000); - let send_2 = { - nodes[1].node.send_payment_with_route(route, payment_hash_4, - RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap(); - check_added_monitors!(nodes[1], 1); - let mut events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - SendEvent::from_event(events.remove(0)) - }; - - nodes[0].node.handle_update_add_htlc(node_b_id, &send_2.msgs[0]); - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &send_2.commitment_msg); - check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - - // Now just resolve all the outstanding messages/HTLCs for completeness... - - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_cs.commitment_signed); - check_added_monitors!(nodes[1], 1); - let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - - nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); - - nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); - expect_payment_path_successful!(nodes[0]); - let as_cs = get_htlc_update_msgs!(nodes[0], node_b_id); - - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_cs.commitment_signed); - check_added_monitors!(nodes[1], 1); - let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - - nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); - - expect_pending_htlcs_forwardable!(nodes[0]); - expect_payment_claimable!(nodes[0], payment_hash_4, payment_secret_4, 10000); - - claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4); - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); -} - enum PostFailBackAction { TimeoutOnChain, ClaimOnChain, diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs new file mode 100644 index 00000000000..e339ac0e8fd --- /dev/null +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -0,0 +1,508 @@ +//! Various unit tests covering HTLC handling as well as tests covering channel reserve tracking. + +use crate::events::{Event, PaymentPurpose}; +use crate::ln::functional_test_utils::*; +use crate::ln::chan_utils::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC}; +use crate::ln::channel::{FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, Channel, get_holder_selected_channel_reserve_satoshis}; +use crate::ln::channelmanager::PaymentId; +use crate::ln::outbound_payment::RecipientOnionFields; +use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; +use crate::util::config::UserConfig; +use crate::util::errors::APIError; +use crate::types::features::ChannelTypeFeatures; +use crate::routing::router::PaymentParameters; + +use lightning_macros::xtest; + +fn do_test_counterparty_no_reserve(send_from_initiator: bool) { + // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure, + // but only for them. Because some LSPs do it with some level of trust of the clients (for a + // substantial UX improvement), we explicitly allow it. Because it's unlikely to happen often + // in normal testing, we test it explicitly here. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let default_config = UserConfig::default(); + + // Have node0 initiate a channel to node1 with aforementioned parameters + let mut push_amt = 100_000_000; + let feerate_per_kw = 253; + let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); + push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000; + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + + let temp_channel_id = nodes[0].node.create_channel(node_b_id, 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None, None).unwrap(); + let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + if !send_from_initiator { + open_channel_message.channel_reserve_satoshis = 0; + open_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000; + } + nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); + + // Extract the channel accept message from node1 to node0 + let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + if send_from_initiator { + accept_channel_message.channel_reserve_satoshis = 0; + accept_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000; + } + nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); + { + let sender_node = if send_from_initiator { &nodes[1] } else { &nodes[0] }; + let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] }; + let mut sender_node_per_peer_lock; + let mut sender_node_peer_state_lock; + + let channel = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); + assert!(channel.is_unfunded_v1()); + channel.funding_mut().holder_selected_channel_reserve_satoshis = 0; + channel.context_mut().holder_max_htlc_value_in_flight_msat = 100_000_000; + } + + let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id); + let funding_msgs = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx); + create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0); + + // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s + // security model if it ever tries to send funds back to nodes[0] (but that's not our problem). + if send_from_initiator { + send_payment(&nodes[0], &[&nodes[1]], 100_000_000 + // Note that for outbound channels we have to consider the commitment tx fee and the + // "fee spike buffer", which is currently a multiple of the total commitment tx fee as + // well as an additional HTLC. + - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, &channel_type_features)); + } else { + send_payment(&nodes[1], &[&nodes[0]], push_amt); + } +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_counterparty_no_reserve() { + do_test_counterparty_no_reserve(true); + do_test_counterparty_no_reserve(false); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_channel_reserve_holding_cell_htlcs() { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + // When this test was written, the default base fee floated based on the HTLC count. + // It is now fixed, so we simply set the fee to the expected value here. + let mut config = test_default_channel_config(); + config.channel_config.forwarding_fee_base_msat = 239; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 190000, 1001); + let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 190000, 1001); + let chan_2_user_id = nodes[2].node.list_channels()[0].user_channel_id; + + let mut stat01 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); + let mut stat11 = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2); + + let mut stat12 = get_channel_value_stat!(nodes[1], nodes[2], chan_2.2); + let mut stat22 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2); + + macro_rules! expect_forward { + ($node: expr) => {{ + let mut events = $node.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + check_added_monitors!($node, 1); + let payment_event = SendEvent::from_event(events.remove(0)); + payment_event + }} + } + + let feemsat = 239; // set above + let total_fee_msat = (nodes.len() - 2) as u64 * feemsat; + let feerate = get_feerate!(nodes[0], nodes[1], chan_1.2); + let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_1.2); + + let recv_value_0 = stat01.counterparty_max_htlc_value_in_flight_msat - total_fee_msat; + + // attempt to send amt_msat > their_max_htlc_value_in_flight_msat + { + let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0); + let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0); + route.paths[0].hops.last_mut().unwrap().fee_msat += 1; + assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat)); + + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) + ), true, APIError::ChannelUnavailable { .. }, {}); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + } + + // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete + // nodes[0]'s wealth + loop { + let amt_msat = recv_value_0 + total_fee_msat; + // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve. + // Also, ensure that each payment has enough to be over the dust limit to + // ensure it'll be included in each commit tx fee calculation. + let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features); + let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000); + if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat { + break; + } + + let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0); + let route = get_route!(nodes[0], payment_params, recv_value_0).unwrap(); + let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); + + let (stat01_, stat11_, stat12_, stat22_) = ( + get_channel_value_stat!(nodes[0], nodes[1], chan_1.2), + get_channel_value_stat!(nodes[1], nodes[0], chan_1.2), + get_channel_value_stat!(nodes[1], nodes[2], chan_2.2), + get_channel_value_stat!(nodes[2], nodes[1], chan_2.2), + ); + + assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat); + assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat); + assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat)); + assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat)); + stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_; + } + + // adding pending output. + // 2* and +1 HTLCs on the commit tx fee for the fee spike reserve. + // The reason we're dividing by two here is as follows: the dividend is the total outbound liquidity + // after fees, the channel reserve, and the fee spike buffer are removed. We eventually want to + // divide this quantity into 3 portions, that will each be sent in an HTLC. This allows us + // to test channel channel reserve policy at the edges of what amount is sendable, i.e. + // cases where 1 msat over X amount will cause a payment failure, but anything less than + // that can be sent successfully. So, dividing by two is a somewhat arbitrary way of getting + // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments + // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee + // policy. + let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features); + let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2; + let amt_msat_1 = recv_value_1 + total_fee_msat; + + let (route_1, our_payment_hash_1, our_payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_1); + let payment_event_1 = { + nodes[0].node.send_payment_with_route(route_1.clone(), our_payment_hash_1, + RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event_1.msgs[0]); + + // channel reserve test with htlc pending output > 0 + let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs; + { + let mut route = route_1.clone(); + route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1; + let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]); + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) + ), true, APIError::ChannelUnavailable { .. }, {}); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + } + + // split the rest to test holding cell + let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features); + let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs; + let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2; + let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat; + { + let stat = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); + assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat + commit_tx_fee_3_htlcs), stat.channel_reserve_msat); + } + + // now see if they go through on both sides + let (route_21, our_payment_hash_21, our_payment_preimage_21, our_payment_secret_21) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_21); + // but this will stuck in the holding cell + nodes[0].node.send_payment_with_route(route_21, our_payment_hash_21, + RecipientOnionFields::secret_only(our_payment_secret_21), PaymentId(our_payment_hash_21.0)).unwrap(); + check_added_monitors!(nodes[0], 0); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 0); + + // test with outbound holding cell amount > 0 + { + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22); + route.paths[0].hops.last_mut().unwrap().fee_msat += 1; + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) + ), true, APIError::ChannelUnavailable { .. }, {}); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + } + + let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22); + // this will also stuck in the holding cell + nodes[0].node.send_payment_with_route(route_22, our_payment_hash_22, + RecipientOnionFields::secret_only(our_payment_secret_22), PaymentId(our_payment_hash_22.0)).unwrap(); + check_added_monitors!(nodes[0], 0); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + // flush the pending htlc + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event_1.commitment_msg); + let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); + check_added_monitors!(nodes[1], 1); + + // the pending htlc should be promoted to committed + nodes[0].node.handle_revoke_and_ack(node_b_id, &as_revoke_and_ack); + check_added_monitors!(nodes[0], 1); + let commitment_update_2 = get_htlc_update_msgs!(nodes[0], node_b_id); + + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &as_commitment_signed); + let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + // No commitment_signed so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[0], 1); + + nodes[1].node.handle_revoke_and_ack(node_a_id, &bs_revoke_and_ack); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[1], 1); + + expect_pending_htlcs_forwardable!(nodes[1]); + + let ref payment_event_11 = expect_forward!(nodes[1]); + nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event_11.msgs[0]); + commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false); + + expect_pending_htlcs_forwardable!(nodes[2]); + expect_payment_claimable!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1); + + // flush the htlcs in the holding cell + assert_eq!(commitment_update_2.update_add_htlcs.len(), 2); + nodes[1].node.handle_update_add_htlc(node_a_id, &commitment_update_2.update_add_htlcs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &commitment_update_2.update_add_htlcs[1]); + commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false); + expect_pending_htlcs_forwardable!(nodes[1]); + + let ref payment_event_3 = expect_forward!(nodes[1]); + assert_eq!(payment_event_3.msgs.len(), 2); + nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event_3.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event_3.msgs[1]); + + commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[2]); + + let events = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + match events[0] { + Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { + assert_eq!(our_payment_hash_21, *payment_hash); + assert_eq!(recv_value_21, amount_msat); + assert_eq!(node_c_id, receiver_node_id.unwrap()); + assert_eq!(*via_channel_ids, vec![(chan_2.2, Some(chan_2_user_id))]); + match &purpose { + PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(our_payment_secret_21, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") + } + }, + _ => panic!("Unexpected event"), + } + match events[1] { + Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { + assert_eq!(our_payment_hash_22, *payment_hash); + assert_eq!(recv_value_22, amount_msat); + assert_eq!(node_c_id, receiver_node_id.unwrap()); + assert_eq!(*via_channel_ids, vec![(chan_2.2, Some(chan_2_user_id))]); + match &purpose { + PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { + assert!(payment_preimage.is_none()); + assert_eq!(our_payment_secret_22, *payment_secret); + }, + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") + } + }, + _ => panic!("Unexpected event"), + } + + claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1); + claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21); + claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22); + + let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, &channel_type_features); + let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat; + send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3); + + let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); + let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat); + let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); + assert_eq!(stat0.value_to_self_msat, expected_value_to_self); + assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc); + + let stat2 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2); + assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3); +} + +#[xtest(feature = "_externalize_tests")] +pub fn channel_reserve_in_flight_removes() { + // In cases where one side claims an HTLC, it thinks it has additional available funds that it + // can send to its counterparty, but due to update ordering, the other side may not yet have + // considered those HTLCs fully removed. + // This tests that we don't count HTLCs which will not be included in the next remote + // commitment transaction towards the reserve value (as it implies no commitment transaction + // will be generated which violates the remote reserve value). + // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test. + // To test this we: + // * route two HTLCs from A to B (note that, at a high level, this test is checking that, when + // you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if + // you only consider the value of the first HTLC, it may not), + // * start routing a third HTLC from A to B, + // * claim the first two HTLCs (though B will generate an update_fulfill for one, and put + // the other claim in its holding cell, as it immediately goes into AwaitingRAA), + // * deliver the first fulfill from B + // * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell + // claim, + // * deliver A's response CS and RAA. + // This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having + // removed it fully. B now has the push_msat plus the first two HTLCs in value. + // * Now B happily sends another HTLC, potentially violating its reserve value from A's point + // of view (if A counts the AwaitingRemovedRemoteRevoke HTLC). + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + + let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2); + // Route the first two HTLCs. + let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000; + let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1); + let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 20_000); + + // Start routing the third HTLC (this is just used to get everyone in the right state). + let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); + let send_1 = { + nodes[0].node.send_payment_with_route(route, payment_hash_3, + RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + + // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an + // initial fulfill/CS. + nodes[1].node.claim_funds(payment_preimage_1); + expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1); + check_added_monitors!(nodes[1], 1); + let bs_removes = get_htlc_update_msgs!(nodes[1], node_a_id); + + // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not + // remove the second HTLC when we send the HTLC back from B to A. + nodes[1].node.claim_funds(payment_preimage_2); + expect_payment_claimed!(nodes[1], payment_hash_2, 20_000); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_removes.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_removes.commitment_signed); + check_added_monitors!(nodes[0], 1); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); + + nodes[1].node.handle_update_add_htlc(node_a_id, &send_1.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_1.commitment_msg); + check_added_monitors!(nodes[1], 1); + // B is already AwaitingRAA, so cant generate a CS here + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); + check_added_monitors!(nodes[1], 1); + let bs_cs = get_htlc_update_msgs!(nodes[1], node_a_id); + + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); + check_added_monitors!(nodes[0], 1); + let as_cs = get_htlc_update_msgs!(nodes[0], node_b_id); + + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_cs.commitment_signed); + check_added_monitors!(nodes[1], 1); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + + // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the + // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view. + // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A + // can no longer broadcast a commitment transaction with it and B has the preimage so can go + // on-chain as necessary). + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_cs.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs.commitment_signed); + check_added_monitors!(nodes[0], 1); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false); + + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 100000); + + // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't + // resolve the second HTLC from A's point of view. + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); + check_added_monitors!(nodes[0], 1); + expect_payment_path_successful!(nodes[0]); + let as_cs = get_htlc_update_msgs!(nodes[0], node_b_id); + + // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back + // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing. + let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[1], nodes[0], 10000); + let send_2 = { + nodes[1].node.send_payment_with_route(route, payment_hash_4, + RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap(); + check_added_monitors!(nodes[1], 1); + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + + nodes[0].node.handle_update_add_htlc(node_b_id, &send_2.msgs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &send_2.commitment_msg); + check_added_monitors!(nodes[0], 1); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + + // Now just resolve all the outstanding messages/HTLCs for completeness... + + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_cs.commitment_signed); + check_added_monitors!(nodes[1], 1); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); + check_added_monitors!(nodes[1], 1); + + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); + check_added_monitors!(nodes[0], 1); + expect_payment_path_successful!(nodes[0]); + let as_cs = get_htlc_update_msgs!(nodes[0], node_b_id); + + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_cs.commitment_signed); + check_added_monitors!(nodes[1], 1); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); + check_added_monitors!(nodes[0], 1); + + expect_pending_htlcs_forwardable!(nodes[0]); + expect_payment_claimable!(nodes[0], payment_hash_4, payment_secret_4, 10000); + + claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); +} diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index aa2d8c668ba..7db23efcf0a 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -69,6 +69,9 @@ mod async_payments_tests; #[cfg(any(test, feature = "_externalize_tests"))] #[allow(unused_mut)] pub mod functional_tests; +#[cfg(any(test, feature = "_externalize_tests"))] +#[allow(unused_mut)] +pub mod htlc_reserve_unit_tests; #[cfg(all(test, splicing))] #[allow(unused_mut)] mod splicing_tests; From 4dddaf8f6ad7e82275df52eb764754fd7ee2e3ed Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 1 May 2025 14:32:58 +0000 Subject: [PATCH 10/25] Move more chan reserve tests out of `functional_tests.rs` `funtional_tests.rs` has gotten incredibly huge over the years, so here we move some further channel reserve tests into a new file. We do so separately from the previous commit to ensure git identifies the changes as move-only. --- lightning/src/ln/functional_tests.rs | 430 ------------------- lightning/src/ln/htlc_reserve_unit_tests.rs | 442 +++++++++++++++++++- 2 files changed, 438 insertions(+), 434 deletions(-) diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index d9d41efeb2d..6c6815c4684 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1200,129 +1200,6 @@ pub fn fake_network_test() { check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [node_b_id], 100000); } -#[xtest(feature = "_externalize_tests")] -pub fn holding_cell_htlc_counting() { - // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs - // to ensure we don't end up with HTLCs sitting around in our holding cell for several - // commitment dance rounds. - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - let node_c_id = nodes[2].node.get_our_node_id(); - - create_announced_chan_between_nodes(&nodes, 0, 1); - let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - - // Fetch a route in advance as we will be unable to once we're unable to send. - let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000); - - let mut payments = Vec::new(); - for _ in 0..50 { - let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000); - nodes[1].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); - payments.push((payment_preimage, payment_hash)); - } - check_added_monitors!(nodes[1], 1); - - let mut events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - let initial_payment_event = SendEvent::from_event(events.pop().unwrap()); - assert_eq!(initial_payment_event.node_id, node_c_id); - - // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in - // the holding cell waiting on B's RAA to send. At this point we should not be able to add - // another HTLC. - { - unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0) - ), true, APIError::ChannelUnavailable { .. }, {}); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - } - - // This should also be true if we try to forward a payment. - let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000); - { - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - } - - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - let payment_event = SendEvent::from_event(events.pop().unwrap()); - assert_eq!(payment_event.node_id, node_b_id); - - nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - // We have to forward pending HTLCs twice - once tries to forward the payment forward (and - // fails), the second will process the resulting failure and fail the HTLC backward. - expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }]); - check_added_monitors!(nodes[1], 1); - - let bs_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_fail_updates.update_fail_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true); - - expect_payment_failed_with_update!(nodes[0], payment_hash_2, false, chan_2.0.contents.short_channel_id, false); - - // Now forward all the pending HTLCs and claim them back - nodes[2].node.handle_update_add_htlc(node_b_id, &initial_payment_event.msgs[0]); - nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &initial_payment_event.commitment_msg); - check_added_monitors!(nodes[2], 1); - - let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], node_b_id); - nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); - check_added_monitors!(nodes[1], 1); - let as_updates = get_htlc_update_msgs!(nodes[1], node_c_id); - - nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_commitment_signed); - check_added_monitors!(nodes[1], 1); - let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); - - for ref update in as_updates.update_add_htlcs.iter() { - nodes[2].node.handle_update_add_htlc(node_b_id, update); - } - nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &as_updates.commitment_signed); - check_added_monitors!(nodes[2], 1); - nodes[2].node.handle_revoke_and_ack(node_b_id, &as_raa); - check_added_monitors!(nodes[2], 1); - let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], node_b_id); - - nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); - check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_commitment_signed); - check_added_monitors!(nodes[1], 1); - let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); - - nodes[2].node.handle_revoke_and_ack(node_b_id, &as_final_raa); - check_added_monitors!(nodes[2], 1); - - expect_pending_htlcs_forwardable!(nodes[2]); - - let events = nodes[2].node.get_and_clear_pending_events(); - assert_eq!(events.len(), payments.len()); - for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) { - match event { - &Event::PaymentClaimable { ref payment_hash, .. } => { - assert_eq!(*payment_hash, *hash); - }, - _ => panic!("Unexpected event"), - }; - } - - for (preimage, _) in payments.drain(..) { - claim_payment(&nodes[1], &[&nodes[2]], preimage); - } - - send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); -} - #[xtest(feature = "_externalize_tests")] pub fn duplicate_htlc_test() { // Test that we accept duplicate payment_hash HTLCs across the network and that @@ -1446,313 +1323,6 @@ pub fn test_duplicate_htlc_different_direction_onchain() { } } -#[xtest(feature = "_externalize_tests")] -pub fn test_basic_channel_reserve() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - - let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); - let channel_reserve = chan_stat.channel_reserve_msat; - - // The 2* and +1 are for the fee spike reserve. - let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, &get_channel_type_features!(nodes[0], nodes[1], chan.2)); - let max_can_send = 5000000 - channel_reserve - commit_tx_fee; - let (mut route, our_payment_hash, _, our_payment_secret) = - get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); - route.paths[0].hops.last_mut().unwrap().fee_msat += 1; - let err = nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)); - unwrap_send_err!(nodes[0], err, true, APIError::ChannelUnavailable { .. }, {} ); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - - send_payment(&nodes[0], &vec![&nodes[1]], max_can_send); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_fee_spike_violation_fails_htlc() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - - let (mut route, payment_hash, _, payment_secret) = - get_route_and_payment_hash!(nodes[0], nodes[1], 3460000); - route.paths[0].hops[0].fee_msat += 1; - // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() - let secp_ctx = Secp256k1::new(); - let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!"); - - let cur_height = nodes[1].node.best_block.read().unwrap().height + 1; - - let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv); - let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret); - let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], - 3460001, &recipient_onion_fields, cur_height, &None, None, None).unwrap(); - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); - let msg = msgs::UpdateAddHTLC { - channel_id: chan.2, - htlc_id: 0, - amount_msat: htlc_msat, - payment_hash: payment_hash, - cltv_expiry: htlc_cltv, - onion_routing_packet: onion_packet, - skimmed_fee_msat: None, - blinding_point: None, - }; - - nodes[1].node.handle_update_add_htlc(node_a_id, &msg); - - // Now manually create the commitment_signed message corresponding to the update_add - // nodes[0] just sent. In the code for construction of this message, "local" refers - // to the sender of the message, and "remote" refers to the receiver. - - let feerate_per_kw = get_feerate!(nodes[0], nodes[1], chan.2); - - const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; - - let (local_secret, next_local_point) = { - let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); - let local_chan = chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); - let chan_signer = local_chan.get_signer(); - // Make the signer believe we validated another commitment, so we can release the secret - chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; - - (chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER).unwrap(), - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx).unwrap()) - }; - let remote_point = { - let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&node_a_id).unwrap().lock().unwrap(); - let remote_chan = chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); - let chan_signer = remote_chan.get_signer(); - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx).unwrap() - }; - - // Build the remote commitment transaction so we can sign it, and then later use the - // signature for the commitment_signed message. - let local_chan_balance = 1313; - - let accepted_htlc_info = chan_utils::HTLCOutputInCommitment { - offered: false, - amount_msat: 3460001, - cltv_expiry: htlc_cltv, - payment_hash, - transaction_output_index: Some(1), - }; - - let commitment_number = INITIAL_COMMITMENT_NUMBER - 1; - - let res = { - let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let local_chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); - let local_chan = local_chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); - let local_chan_signer = local_chan.get_signer(); - let commitment_tx = CommitmentTransaction::new( - commitment_number, - &remote_point, - 95000, - local_chan_balance, - feerate_per_kw, - vec![accepted_htlc_info], - &local_chan.funding.channel_transaction_parameters.as_counterparty_broadcastable(), - &secp_ctx, - ); - local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment( - &local_chan.funding.channel_transaction_parameters, &commitment_tx, Vec::new(), - Vec::new(), &secp_ctx, - ).unwrap() - }; - - let commit_signed_msg = msgs::CommitmentSigned { - channel_id: chan.2, - signature: res.0, - htlc_signatures: res.1, - batch: None, - #[cfg(taproot)] - partial_signature_with_nonce: None, - }; - - // Send the commitment_signed message to the nodes[1]. - nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); - let _ = nodes[1].node.get_and_clear_pending_msg_events(); - - // Send the RAA to nodes[1]. - let raa_msg = msgs::RevokeAndACK { - channel_id: chan.2, - per_commitment_secret: local_secret, - next_per_commitment_point: next_local_point, - #[cfg(taproot)] - next_local_nonce: None, - }; - nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); - expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); - - let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - // Make sure the HTLC failed in the way we expect. - match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, .. } => { - assert_eq!(update_fail_htlcs.len(), 1); - update_fail_htlcs[0].clone() - }, - _ => panic!("Unexpected event"), - }; - nodes[1].logger.assert_log("lightning::ln::channel", - format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1); - - check_added_monitors!(nodes[1], 3); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { - let mut chanmon_cfgs = create_chanmon_cfgs(2); - // Set the fee rate for the channel very high, to the point where the fundee - // sending any above-dust amount would result in a channel reserve violation. - // In this test we check that we would be prevented from sending an HTLC in - // this situation. - let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let default_config = UserConfig::default(); - let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); - - let mut push_amt = 100_000_000; - push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features); - - push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; - - let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt); - - // Fetch a route in advance as we will be unable to once we're unable to send. - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000); - // Sending exactly enough to hit the reserve amount should be accepted - for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { - route_payment(&nodes[1], &[&nodes[0]], 1_000_000); - } - - // However one more HTLC should be significantly over the reserve amount and fail. - unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { - let mut chanmon_cfgs = create_chanmon_cfgs(2); - let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_b_id = nodes[1].node.get_our_node_id(); - - let default_config = UserConfig::default(); - let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); - - // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a - // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment - // transaction fee with 0 HTLCs (183 sats)). - let mut push_amt = 100_000_000; - push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features); - push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt); - - // Send four HTLCs to cover the initial push_msat buffer we're required to include - for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { - route_payment(&nodes[1], &[&nodes[0]], 1_000_000); - } - - let (mut route, payment_hash, _, payment_secret) = - get_route_and_payment_hash!(nodes[1], nodes[0], 1000); - route.paths[0].hops[0].fee_msat = 700_000; - // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() - let secp_ctx = Secp256k1::new(); - let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); - let cur_height = nodes[1].node.best_block.read().unwrap().height + 1; - let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv); - let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret); - let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], - 700_000, &recipient_onion_fields, cur_height, &None, None, None).unwrap(); - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); - let msg = msgs::UpdateAddHTLC { - channel_id: chan.2, - htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64, - amount_msat: htlc_msat, - payment_hash: payment_hash, - cltv_expiry: htlc_cltv, - onion_routing_packet: onion_packet, - skimmed_fee_msat: None, - blinding_point: None, - }; - - nodes[0].node.handle_update_add_htlc(node_b_id, &msg); - // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value", 3); - assert_eq!(nodes[0].node.list_channels().len(), 0); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value"); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() }, - [node_b_id], 100000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { - // Test that if we receive many dust HTLCs over an outbound channel, they don't count when - // calculating our commitment transaction fee (this was previously broken). - let mut chanmon_cfgs = create_chanmon_cfgs(2); - let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let default_config = UserConfig::default(); - let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); - - // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a - // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment - // transaction fee with 0 HTLCs (183 sats)). - let mut push_amt = 100_000_000; - push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features); - push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; - create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt); - - let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000 - + feerate_per_kw as u64 * htlc_success_tx_weight(&channel_type_features) / 1000 * 1000 - 1; - // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel - // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the - // commitment transaction fee. - route_payment(&nodes[1], &[&nodes[0]], dust_amt); - - // Send four HTLCs to cover the initial push_msat buffer we're required to include - for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { - route_payment(&nodes[1], &[&nodes[0]], 1_000_000); - } - - // One more than the dust amt should fail, however. - let (mut route, our_payment_hash, _, our_payment_secret) = - get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt); - route.paths[0].hops[0].fee_msat += 1; - unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); -} - #[xtest(feature = "_externalize_tests")] pub fn test_chan_init_feerate_unaffordability() { // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index e339ac0e8fd..a63bf0f9c6b 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -1,12 +1,14 @@ //! Various unit tests covering HTLC handling as well as tests covering channel reserve tracking. -use crate::events::{Event, PaymentPurpose}; +use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaymentPurpose}; use crate::ln::functional_test_utils::*; -use crate::ln::chan_utils::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC}; -use crate::ln::channel::{FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, Channel, get_holder_selected_channel_reserve_satoshis}; +use crate::ln::chan_utils::{self, CommitmentTransaction, htlc_success_tx_weight, commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC}; +use crate::ln::channel::{FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, Channel, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis}; use crate::ln::channelmanager::PaymentId; use crate::ln::outbound_payment::RecipientOnionFields; -use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; +use crate::ln::onion_utils::self; +use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; +use crate::sign::ecdsa::EcdsaChannelSigner; use crate::util::config::UserConfig; use crate::util::errors::APIError; use crate::types::features::ChannelTypeFeatures; @@ -14,6 +16,8 @@ use crate::routing::router::PaymentParameters; use lightning_macros::xtest; +use bitcoin::secp256k1::{Secp256k1, SecretKey}; + fn do_test_counterparty_no_reserve(send_from_initiator: bool) { // A peer providing a channel_reserve_satoshis of 0 (or less than our dust limit) is insecure, // but only for them. Because some LSPs do it with some level of trust of the clients (for a @@ -506,3 +510,433 @@ pub fn channel_reserve_in_flight_removes() { claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); } + +#[xtest(feature = "_externalize_tests")] +pub fn holding_cell_htlc_counting() { + // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs + // to ensure we don't end up with HTLCs sitting around in our holding cell for several + // commitment dance rounds. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + // Fetch a route in advance as we will be unable to once we're unable to send. + let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000); + + let mut payments = Vec::new(); + for _ in 0..50 { + let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000); + nodes[1].node.send_payment_with_route(route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + payments.push((payment_preimage, payment_hash)); + } + check_added_monitors!(nodes[1], 1); + + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let initial_payment_event = SendEvent::from_event(events.pop().unwrap()); + assert_eq!(initial_payment_event.node_id, node_c_id); + + // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in + // the holding cell waiting on B's RAA to send. At this point we should not be able to add + // another HTLC. + { + unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, payment_hash_1, + RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0) + ), true, APIError::ChannelUnavailable { .. }, {}); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + } + + // This should also be true if we try to forward a payment. + let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000); + { + nodes[0].node.send_payment_with_route(route, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + check_added_monitors!(nodes[0], 1); + } + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + assert_eq!(payment_event.node_id, node_b_id); + + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + // We have to forward pending HTLCs twice - once tries to forward the payment forward (and + // fails), the second will process the resulting failure and fail the HTLC backward. + expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }]); + check_added_monitors!(nodes[1], 1); + + let bs_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_fail_updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true); + + expect_payment_failed_with_update!(nodes[0], payment_hash_2, false, chan_2.0.contents.short_channel_id, false); + + // Now forward all the pending HTLCs and claim them back + nodes[2].node.handle_update_add_htlc(node_b_id, &initial_payment_event.msgs[0]); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &initial_payment_event.commitment_msg); + check_added_monitors!(nodes[2], 1); + + let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], node_b_id); + nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); + check_added_monitors!(nodes[1], 1); + let as_updates = get_htlc_update_msgs!(nodes[1], node_c_id); + + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_commitment_signed); + check_added_monitors!(nodes[1], 1); + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); + + for ref update in as_updates.update_add_htlcs.iter() { + nodes[2].node.handle_update_add_htlc(node_b_id, update); + } + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &as_updates.commitment_signed); + check_added_monitors!(nodes[2], 1); + nodes[2].node.handle_revoke_and_ack(node_b_id, &as_raa); + check_added_monitors!(nodes[2], 1); + let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], node_b_id); + + nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); + check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_commitment_signed); + check_added_monitors!(nodes[1], 1); + let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); + + nodes[2].node.handle_revoke_and_ack(node_b_id, &as_final_raa); + check_added_monitors!(nodes[2], 1); + + expect_pending_htlcs_forwardable!(nodes[2]); + + let events = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(events.len(), payments.len()); + for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) { + match event { + &Event::PaymentClaimable { ref payment_hash, .. } => { + assert_eq!(*payment_hash, *hash); + }, + _ => panic!("Unexpected event"), + }; + } + + for (preimage, _) in payments.drain(..) { + claim_payment(&nodes[1], &[&nodes[2]], preimage); + } + + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_basic_channel_reserve() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + + let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); + let channel_reserve = chan_stat.channel_reserve_msat; + + // The 2* and +1 are for the fee spike reserve. + let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, &get_channel_type_features!(nodes[0], nodes[1], chan.2)); + let max_can_send = 5000000 - channel_reserve - commit_tx_fee; + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); + route.paths[0].hops.last_mut().unwrap().fee_msat += 1; + let err = nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)); + unwrap_send_err!(nodes[0], err, true, APIError::ChannelUnavailable { .. }, {} ); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + send_payment(&nodes[0], &vec![&nodes[1]], max_can_send); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_fee_spike_violation_fails_htlc() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + + let (mut route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 3460000); + route.paths[0].hops[0].fee_msat += 1; + // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() + let secp_ctx = Secp256k1::new(); + let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!"); + + let cur_height = nodes[1].node.best_block.read().unwrap().height + 1; + + let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv); + let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret); + let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], + 3460001, &recipient_onion_fields, cur_height, &None, None, None).unwrap(); + let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); + let msg = msgs::UpdateAddHTLC { + channel_id: chan.2, + htlc_id: 0, + amount_msat: htlc_msat, + payment_hash: payment_hash, + cltv_expiry: htlc_cltv, + onion_routing_packet: onion_packet, + skimmed_fee_msat: None, + blinding_point: None, + }; + + nodes[1].node.handle_update_add_htlc(node_a_id, &msg); + + // Now manually create the commitment_signed message corresponding to the update_add + // nodes[0] just sent. In the code for construction of this message, "local" refers + // to the sender of the message, and "remote" refers to the receiver. + + let feerate_per_kw = get_feerate!(nodes[0], nodes[1], chan.2); + + const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; + + let (local_secret, next_local_point) = { + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); + let local_chan = chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); + let chan_signer = local_chan.get_signer(); + // Make the signer believe we validated another commitment, so we can release the secret + chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; + + (chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER).unwrap(), + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx).unwrap()) + }; + let remote_point = { + let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&node_a_id).unwrap().lock().unwrap(); + let remote_chan = chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); + let chan_signer = remote_chan.get_signer(); + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx).unwrap() + }; + + // Build the remote commitment transaction so we can sign it, and then later use the + // signature for the commitment_signed message. + let local_chan_balance = 1313; + + let accepted_htlc_info = chan_utils::HTLCOutputInCommitment { + offered: false, + amount_msat: 3460001, + cltv_expiry: htlc_cltv, + payment_hash, + transaction_output_index: Some(1), + }; + + let commitment_number = INITIAL_COMMITMENT_NUMBER - 1; + + let res = { + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let local_chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); + let local_chan = local_chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); + let local_chan_signer = local_chan.get_signer(); + let commitment_tx = CommitmentTransaction::new( + commitment_number, + &remote_point, + 95000, + local_chan_balance, + feerate_per_kw, + vec![accepted_htlc_info], + &local_chan.funding.channel_transaction_parameters.as_counterparty_broadcastable(), + &secp_ctx, + ); + local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment( + &local_chan.funding.channel_transaction_parameters, &commitment_tx, Vec::new(), + Vec::new(), &secp_ctx, + ).unwrap() + }; + + let commit_signed_msg = msgs::CommitmentSigned { + channel_id: chan.2, + signature: res.0, + htlc_signatures: res.1, + batch: None, + #[cfg(taproot)] + partial_signature_with_nonce: None, + }; + + // Send the commitment_signed message to the nodes[1]. + nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); + let _ = nodes[1].node.get_and_clear_pending_msg_events(); + + // Send the RAA to nodes[1]. + let raa_msg = msgs::RevokeAndACK { + channel_id: chan.2, + per_commitment_secret: local_secret, + next_per_commitment_point: next_local_point, + #[cfg(taproot)] + next_local_nonce: None, + }; + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + // Make sure the HTLC failed in the way we expect. + match events[0] { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, .. } => { + assert_eq!(update_fail_htlcs.len(), 1); + update_fail_htlcs[0].clone() + }, + _ => panic!("Unexpected event"), + }; + nodes[1].logger.assert_log("lightning::ln::channel", + format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1); + + check_added_monitors!(nodes[1], 3); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { + let mut chanmon_cfgs = create_chanmon_cfgs(2); + // Set the fee rate for the channel very high, to the point where the fundee + // sending any above-dust amount would result in a channel reserve violation. + // In this test we check that we would be prevented from sending an HTLC in + // this situation. + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let default_config = UserConfig::default(); + let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); + + let mut push_amt = 100_000_000; + push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features); + + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + + let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt); + + // Fetch a route in advance as we will be unable to once we're unable to send. + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000); + // Sending exactly enough to hit the reserve amount should be accepted + for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { + route_payment(&nodes[1], &[&nodes[0]], 1_000_000); + } + + // However one more HTLC should be significantly over the reserve amount and fail. + unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) + ), true, APIError::ChannelUnavailable { .. }, {}); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { + let mut chanmon_cfgs = create_chanmon_cfgs(2); + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + + let default_config = UserConfig::default(); + let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); + + // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a + // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment + // transaction fee with 0 HTLCs (183 sats)). + let mut push_amt = 100_000_000; + push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features); + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt); + + // Send four HTLCs to cover the initial push_msat buffer we're required to include + for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { + route_payment(&nodes[1], &[&nodes[0]], 1_000_000); + } + + let (mut route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 1000); + route.paths[0].hops[0].fee_msat = 700_000; + // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() + let secp_ctx = Secp256k1::new(); + let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); + let cur_height = nodes[1].node.best_block.read().unwrap().height + 1; + let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv); + let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret); + let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], + 700_000, &recipient_onion_fields, cur_height, &None, None, None).unwrap(); + let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); + let msg = msgs::UpdateAddHTLC { + channel_id: chan.2, + htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64, + amount_msat: htlc_msat, + payment_hash: payment_hash, + cltv_expiry: htlc_cltv, + onion_routing_packet: onion_packet, + skimmed_fee_msat: None, + blinding_point: None, + }; + + nodes[0].node.handle_update_add_htlc(node_b_id, &msg); + // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value", 3); + assert_eq!(nodes[0].node.list_channels().len(), 0); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value"); + check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() }, + [node_b_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { + // Test that if we receive many dust HTLCs over an outbound channel, they don't count when + // calculating our commitment transaction fee (this was previously broken). + let mut chanmon_cfgs = create_chanmon_cfgs(2); + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let default_config = UserConfig::default(); + let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); + + // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a + // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment + // transaction fee with 0 HTLCs (183 sats)). + let mut push_amt = 100_000_000; + push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features); + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt); + + let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000 + + feerate_per_kw as u64 * htlc_success_tx_weight(&channel_type_features) / 1000 * 1000 - 1; + // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel + // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the + // commitment transaction fee. + route_payment(&nodes[1], &[&nodes[0]], dust_amt); + + // Send four HTLCs to cover the initial push_msat buffer we're required to include + for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { + route_payment(&nodes[1], &[&nodes[0]], 1_000_000); + } + + // One more than the dust amt should fail, however. + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt); + route.paths[0].hops[0].fee_msat += 1; + unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) + ), true, APIError::ChannelUnavailable { .. }, {}); +} From 1764cf9b6ed64ec12ff7c8d3168aa97b9caaf5d7 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 1 May 2025 15:58:38 +0000 Subject: [PATCH 11/25] f fix build --- lightning/src/ln/htlc_reserve_unit_tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index a63bf0f9c6b..80dd31a54d3 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -6,7 +6,7 @@ use crate::ln::chan_utils::{self, CommitmentTransaction, htlc_success_tx_weight, use crate::ln::channel::{FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, Channel, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis}; use crate::ln::channelmanager::PaymentId; use crate::ln::outbound_payment::RecipientOnionFields; -use crate::ln::onion_utils::self; +use crate::ln::onion_utils; use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; use crate::sign::ecdsa::EcdsaChannelSigner; use crate::util::config::UserConfig; From 021dc4cb424cf4a945fe95d29c65223e048f0f72 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 1 May 2025 14:37:43 +0000 Subject: [PATCH 12/25] Move HTLC unit tests out of `functional_tests.rs` into a new file `funtional_tests.rs` has gotten incredibly huge over the years, so here we move some simple HTLC unit tests into a new file. --- lightning/src/ln/functional_tests.rs | 796 +------------------ lightning/src/ln/htlc_reserve_unit_tests.rs | 798 +++++++++++++++++++- 2 files changed, 797 insertions(+), 797 deletions(-) diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 6c6815c4684..4322c037044 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -22,7 +22,7 @@ use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, Signe use crate::events::bump_transaction::WalletSource; use crate::events::{Event, FundingInfo, PathFailure, PaymentPurpose, ClosureReason, HTLCHandlingFailureType, PaymentFailureReason}; use crate::ln::types::ChannelId; -use crate::types::payment::{PaymentPreimage, PaymentSecret, PaymentHash}; +use crate::types::payment::{PaymentSecret, PaymentHash}; use crate::ln::channel::{get_holder_selected_channel_reserve_satoshis, Channel, InboundV1Channel, OutboundV1Channel, COINBASE_MATURITY, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, MIN_AFFORDABLE_HTLC_COUNT}; use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError, MIN_CHAN_DUST_LIMIT_SATOSHIS}; @@ -39,7 +39,6 @@ use crate::util::errors::APIError; use crate::util::ser::{Writeable, ReadableArgs}; use crate::util::string::UntrustedString; use crate::util::config::{ChannelConfigOverrides, ChannelHandshakeConfigUpdate, ChannelConfigUpdate, MaxDustHTLCExposure, UserConfig}; -use crate::ln::onion_utils::AttributionData; use bitcoin::hash_types::BlockHash; use bitcoin::locktime::absolute::LockTime; @@ -1364,111 +1363,6 @@ pub fn test_chan_init_feerate_unaffordability() { } } -#[xtest(feature = "_externalize_tests")] -pub fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() { - // Test that if we receive many dust HTLCs over an inbound channel, they don't count when - // calculating our counterparty's commitment transaction fee (this was previously broken). - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000); - - let payment_amt = 46000; // Dust amount - // In the previous code, these first four payments would succeed. - route_payment(&nodes[0], &[&nodes[1]], payment_amt); - route_payment(&nodes[0], &[&nodes[1]], payment_amt); - route_payment(&nodes[0], &[&nodes[1]], payment_amt); - route_payment(&nodes[0], &[&nodes[1]], payment_amt); - - // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer. - route_payment(&nodes[0], &[&nodes[1]], payment_amt); - route_payment(&nodes[0], &[&nodes[1]], payment_amt); - route_payment(&nodes[0], &[&nodes[1]], payment_amt); - route_payment(&nodes[0], &[&nodes[1]], payment_amt); - route_payment(&nodes[0], &[&nodes[1]], payment_amt); - - // And this last payment previously resulted in nodes[1] closing on its inbound-channel - // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment - // transaction fee and therefore perceived this next payment as a channel reserve violation. - route_payment(&nodes[0], &[&nodes[1]], payment_amt); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { - let chanmon_cfgs = create_chanmon_cfgs(3); - let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); - let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - let _ = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000); - - let feemsat = 239; - let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat; - let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); - let feerate = get_feerate!(nodes[0], nodes[1], chan.2); - let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2); - - // Add a 2* and +1 for the fee spike reserve. - let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features); - let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2; - let amt_msat_1 = recv_value_1 + total_routing_fee_msat; - - // Add a pending HTLC. - let (route_1, our_payment_hash_1, _, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_1); - let payment_event_1 = { - nodes[0].node.send_payment_with_route(route_1.clone(), our_payment_hash_1, - RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - SendEvent::from_event(events.remove(0)) - }; - nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event_1.msgs[0]); - - // Attempt to trigger a channel reserve violation --> payment failure. - let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, &channel_type_features); - let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1; - let amt_msat_2 = recv_value_2 + total_routing_fee_msat; - let mut route_2 = route_1.clone(); - route_2.paths[0].hops.last_mut().unwrap().fee_msat = amt_msat_2; - - // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() - let secp_ctx = Secp256k1::new(); - let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); - let cur_height = nodes[0].node.best_block.read().unwrap().height + 1; - let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv); - let recipient_onion_fields = RecipientOnionFields::spontaneous_empty(); - let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads( - &route_2.paths[0], recv_value_2, &recipient_onion_fields, cur_height, &None, None, None).unwrap(); - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1).unwrap(); - let msg = msgs::UpdateAddHTLC { - channel_id: chan.2, - htlc_id: 1, - amount_msat: htlc_msat + 1, - payment_hash: our_payment_hash_1, - cltv_expiry: htlc_cltv, - onion_routing_packet: onion_packet, - skimmed_fee_msat: None, - blinding_point: None, - }; - - nodes[1].node.handle_update_add_htlc(node_a_id, &msg); - // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. - nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value", 3); - assert_eq!(nodes[1].node.list_channels().len(), 1); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() }, - [node_a_id], 100000); -} - #[xtest(feature = "_externalize_tests")] pub fn test_inbound_outbound_capacity_is_not_zero() { let chanmon_cfgs = create_chanmon_cfgs(2); @@ -6007,694 +5901,6 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { check_added_monitors!(nodes[0], 1); } -#[xtest(feature = "_externalize_tests")] -pub fn test_payment_route_reaching_same_channel_twice() { - //A route should not go through the same channel twice - //It is enforced when constructing a route. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_b_id = nodes[1].node.get_our_node_id(); - - let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); - - let payment_params = PaymentParameters::from_node_id(node_b_id, 0) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000); - - // Extend the path by itself, essentially simulating route going through same channel twice - let cloned_hops = route.paths[0].hops.clone(); - route.paths[0].hops.extend_from_slice(&cloned_hops); - - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), false, APIError::InvalidRoute { ref err }, - assert_eq!(err, &"Path went through the same channel twice")); - assert!(nodes[0].node.list_recent_payments().is_empty()); -} - -// BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message. -// BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve. -//TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO. - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() { - //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these) - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); - route.paths[0].hops[0].fee_msat = 100; - - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_sender_zero_value_msat() { - //BOLT2 Requirement: MUST offer amount_msat greater than 0. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); - route.paths[0].hops[0].fee_msat = 0; - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)), - true, APIError::ChannelUnavailable { ref err }, - assert_eq!(err, "Cannot send 0-msat HTLC")); - - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 2); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { - //BOLT2 Requirement: MUST offer amount_msat greater than 0. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); - updates.update_add_htlcs[0].amount_msat = 0; - - nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); - nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC", 3); - check_closed_broadcast!(nodes[1], true).unwrap(); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() }, - [node_a_id], 100000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() { - //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000. - //It is enforced when constructing a route. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_b_id = nodes[1].node.get_our_node_id(); - - let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); - - let payment_params = PaymentParameters::from_node_id(node_b_id, 0) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000); - route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001; - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::InvalidRoute { ref err }, - assert_eq!(err, &"Channel CLTV overflowed?")); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() { - //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC. - //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0. - //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); - let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&node_a_id) - .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().counterparty_max_accepted_htlcs as u64; - - // Fetch a route in advance as we will be unable to once we're unable to send. - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); - for i in 0..max_accepted_htlcs { - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); - let payment_event = { - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - if let MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] { - assert_eq!(htlcs[0].htlc_id, i); - } else { - assert!(false); - } - SendEvent::from_event(events.remove(0)) - }; - nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); - commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - - expect_pending_htlcs_forwardable!(nodes[1]); - expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000); - } - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); - - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { - //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let channel_value = 100000; - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0); - let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat; - - send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight); - - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_in_flight); - // Manually create a route over our max in flight (which our router normally automatically - // limits us to. - route.paths[0].hops[0].fee_msat = max_in_flight + 1; - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - - send_payment(&nodes[0], &[&nodes[1]], max_in_flight); -} - -// BOLT 2 Requirements for the Receiver when handling an update_add_htlc message. -#[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { - //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - let htlc_minimum_msat: u64; - { - let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); - let channel = chan_lock.channel_by_id.get(&chan.2).unwrap(); - htlc_minimum_msat = channel.context().get_holder_htlc_minimum_msat(); - } - - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); - updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1; - nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); - assert!(nodes[1].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { - //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - - let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); - let channel_reserve = chan_stat.channel_reserve_msat; - let feerate = get_feerate!(nodes[0], nodes[1], chan.2); - let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2); - // The 2* and +1 are for the fee spike reserve. - let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); - - let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound; - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); - - // Even though channel-initiator senders are required to respect the fee_spike_reserve, - // at this time channel-initiatee receivers are not required to enforce that senders - // respect the fee_spike_reserve. - updates.update_add_htlcs[0].amount_msat = max_can_send + commit_tx_fee_outbound + 1; - nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); - - assert!(nodes[1].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { - //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel - //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - - let send_amt = 3999999; - let (mut route, our_payment_hash, _, our_payment_secret) = - get_route_and_payment_hash!(nodes[0], nodes[1], 1000); - route.paths[0].hops[0].fee_msat = send_amt; - let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); - let cur_height = nodes[0].node.best_block.read().unwrap().height + 1; - let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv); - let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret); - let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads( - &route.paths[0], send_amt, &recipient_onion_fields, cur_height, &None, None, None).unwrap(); - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap(); - - let mut msg = msgs::UpdateAddHTLC { - channel_id: chan.2, - htlc_id: 0, - amount_msat: 1000, - payment_hash: our_payment_hash, - cltv_expiry: htlc_cltv, - onion_routing_packet: onion_packet.clone(), - skimmed_fee_msat: None, - blinding_point: None, - }; - - for i in 0..50 { - msg.htlc_id = i as u64; - nodes[1].node.handle_update_add_htlc(node_a_id, &msg); - } - msg.htlc_id = (50) as u64; - nodes[1].node.handle_update_add_htlc(node_a_id, &msg); - - assert!(nodes[1].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { - //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); - - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); - updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1; - nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); - - assert!(nodes[1].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 1000000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { - //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); - updates.update_add_htlcs[0].cltv_expiry = 500000000; - nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); - - assert!(nodes[1].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height"); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { - //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection. - // We test this by first testing that that repeated HTLCs pass commitment signature checks - // after disconnect and that non-sequential htlc_ids result in a channel failure. - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - create_announced_chan_between_nodes(&nodes, 0, 1); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], node_b_id); - nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); - - //Disconnect and Reconnect - nodes[0].node.peer_disconnected(node_b_id); - nodes[1].node.peer_disconnected(node_a_id); - nodes[0].node.peer_connected(node_b_id, &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); - let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); - assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(node_a_id, &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); - let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); - assert_eq!(reestablish_2.len(), 1); - nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); - handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); - handle_chan_reestablish_msgs!(nodes[1], nodes[0]); - - //Resend HTLC - nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); - assert_eq!(updates.commitment_signed.len(), 1); - assert_eq!(updates.commitment_signed[0].htlc_signatures.len(), 1); - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &updates.commitment_signed); - check_added_monitors!(nodes[1], 1); - let _bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); - - nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); - - assert!(nodes[1].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { - //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc. - - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - - check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], node_b_id); - nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); - - let update_msg = msgs::UpdateFulfillHTLC{ - channel_id: chan.2, - htlc_id: 0, - payment_preimage: our_payment_preimage, - }; - - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_msg); - - assert!(nodes[0].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { - //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc. - - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], node_b_id); - nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); - - let update_msg = msgs::UpdateFailHTLC{ - channel_id: chan.2, - htlc_id: 0, - reason: Vec::new(), - attribution_data: Some(AttributionData::new()) - }; - - nodes[0].node.handle_update_fail_htlc(node_b_id, &update_msg); - - assert!(nodes[0].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() { - //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc. - - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], node_b_id); - nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); - let update_msg = msgs::UpdateFailMalformedHTLC{ - channel_id: chan.2, - htlc_id: 0, - sha256_of_onion: [1; 32], - failure_code: 0x8000, - }; - - nodes[0].node.handle_update_fail_malformed_htlc(node_b_id, &update_msg); - - assert!(nodes[0].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { - //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel. - - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_b_id = nodes[1].node.get_our_node_id(); - - create_announced_chan_between_nodes(&nodes, 0, 1); - - let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000); - - nodes[1].node.claim_funds(our_payment_preimage); - check_added_monitors!(nodes[1], 1); - expect_payment_claimed!(nodes[1], our_payment_hash, 100_000); - - let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = { - match events[0] { - MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { - assert!(update_add_htlcs.is_empty()); - assert_eq!(update_fulfill_htlcs.len(), 1); - assert!(update_fail_htlcs.is_empty()); - assert!(update_fail_malformed_htlcs.is_empty()); - assert!(update_fee.is_none()); - update_fulfill_htlcs[0].clone() - }, - _ => panic!("Unexpected event"), - } - }; - - update_fulfill_msg.htlc_id = 1; - - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_msg); - - assert!(nodes[0].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find"); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fulfill_htlc_bolt2_wrong_preimage() { - //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel. - - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_b_id = nodes[1].node.get_our_node_id(); - - create_announced_chan_between_nodes(&nodes, 0, 1); - - let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000); - - nodes[1].node.claim_funds(our_payment_preimage); - check_added_monitors!(nodes[1], 1); - expect_payment_claimed!(nodes[1], our_payment_hash, 100_000); - - let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = { - match events[0] { - MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { - assert!(update_add_htlcs.is_empty()); - assert_eq!(update_fulfill_htlcs.len(), 1); - assert!(update_fail_htlcs.is_empty()); - assert!(update_fail_malformed_htlcs.is_empty()); - assert!(update_fee.is_none()); - update_fulfill_htlcs[0].clone() - }, - _ => panic!("Unexpected event"), - } - }; - - update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]); - - nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_msg); - - assert!(nodes[0].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() { - //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel. - - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); - - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - - let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); - updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message - - nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); - check_added_monitors!(nodes[1], 0); - commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); - check_added_monitors(&nodes[1], 1); - - let events = nodes[1].node.get_and_clear_pending_msg_events(); - - let mut update_msg: msgs::UpdateFailMalformedHTLC = { - match events[0] { - MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { - assert!(update_add_htlcs.is_empty()); - assert!(update_fulfill_htlcs.is_empty()); - assert!(update_fail_htlcs.is_empty()); - assert_eq!(update_fail_malformed_htlcs.len(), 1); - assert!(update_fee.is_none()); - update_fail_malformed_htlcs[0].clone() - }, - _ => panic!("Unexpected event"), - } - }; - update_msg.failure_code &= !0x8000; - nodes[0].node.handle_update_fail_malformed_htlc(node_b_id, &update_msg); - - assert!(nodes[0].node.list_channels().is_empty()); - let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set"); - check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 1000000); -} - #[xtest(feature = "_externalize_tests")] pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_update_fail_htlc() { //BOLT 2 Requirement: a receiving node which has an outgoing HTLC canceled by update_fail_malformed_htlc: diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index 80dd31a54d3..58b025e7b95 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -4,14 +4,15 @@ use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaymentPurpos use crate::ln::functional_test_utils::*; use crate::ln::chan_utils::{self, CommitmentTransaction, htlc_success_tx_weight, commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC}; use crate::ln::channel::{FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, Channel, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis}; -use crate::ln::channelmanager::PaymentId; +use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder}; use crate::ln::outbound_payment::RecipientOnionFields; -use crate::ln::onion_utils; +use crate::ln::onion_utils::{self, AttributionData}; use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; use crate::sign::ecdsa::EcdsaChannelSigner; use crate::util::config::UserConfig; use crate::util::errors::APIError; use crate::types::features::ChannelTypeFeatures; +use crate::types::payment::PaymentPreimage; use crate::routing::router::PaymentParameters; use lightning_macros::xtest; @@ -940,3 +941,796 @@ pub fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); } + +#[xtest(feature = "_externalize_tests")] +pub fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() { + // Test that if we receive many dust HTLCs over an inbound channel, they don't count when + // calculating our counterparty's commitment transaction fee (this was previously broken). + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000); + + let payment_amt = 46000; // Dust amount + // In the previous code, these first four payments would succeed. + route_payment(&nodes[0], &[&nodes[1]], payment_amt); + route_payment(&nodes[0], &[&nodes[1]], payment_amt); + route_payment(&nodes[0], &[&nodes[1]], payment_amt); + route_payment(&nodes[0], &[&nodes[1]], payment_amt); + + // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer. + route_payment(&nodes[0], &[&nodes[1]], payment_amt); + route_payment(&nodes[0], &[&nodes[1]], payment_amt); + route_payment(&nodes[0], &[&nodes[1]], payment_amt); + route_payment(&nodes[0], &[&nodes[1]], payment_amt); + route_payment(&nodes[0], &[&nodes[1]], payment_amt); + + // And this last payment previously resulted in nodes[1] closing on its inbound-channel + // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment + // transaction fee and therefore perceived this next payment as a channel reserve violation. + route_payment(&nodes[0], &[&nodes[1]], payment_amt); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + let _ = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000); + + let feemsat = 239; + let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat; + let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); + let feerate = get_feerate!(nodes[0], nodes[1], chan.2); + let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2); + + // Add a 2* and +1 for the fee spike reserve. + let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features); + let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2; + let amt_msat_1 = recv_value_1 + total_routing_fee_msat; + + // Add a pending HTLC. + let (route_1, our_payment_hash_1, _, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_1); + let payment_event_1 = { + nodes[0].node.send_payment_with_route(route_1.clone(), our_payment_hash_1, + RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + SendEvent::from_event(events.remove(0)) + }; + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event_1.msgs[0]); + + // Attempt to trigger a channel reserve violation --> payment failure. + let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, &channel_type_features); + let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1; + let amt_msat_2 = recv_value_2 + total_routing_fee_msat; + let mut route_2 = route_1.clone(); + route_2.paths[0].hops.last_mut().unwrap().fee_msat = amt_msat_2; + + // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() + let secp_ctx = Secp256k1::new(); + let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); + let cur_height = nodes[0].node.best_block.read().unwrap().height + 1; + let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv); + let recipient_onion_fields = RecipientOnionFields::spontaneous_empty(); + let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads( + &route_2.paths[0], recv_value_2, &recipient_onion_fields, cur_height, &None, None, None).unwrap(); + let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1).unwrap(); + let msg = msgs::UpdateAddHTLC { + channel_id: chan.2, + htlc_id: 1, + amount_msat: htlc_msat + 1, + payment_hash: our_payment_hash_1, + cltv_expiry: htlc_cltv, + onion_routing_packet: onion_packet, + skimmed_fee_msat: None, + blinding_point: None, + }; + + nodes[1].node.handle_update_add_htlc(node_a_id, &msg); + // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value", 3); + assert_eq!(nodes[1].node.list_channels().len(), 1); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); + check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() }, + [node_a_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_payment_route_reaching_same_channel_twice() { + //A route should not go through the same channel twice + //It is enforced when constructing a route. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + + let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); + + let payment_params = PaymentParameters::from_node_id(node_b_id, 0) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000); + + // Extend the path by itself, essentially simulating route going through same channel twice + let cloned_hops = route.paths[0].hops.clone(); + route.paths[0].hops.extend_from_slice(&cloned_hops); + + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) + ), false, APIError::InvalidRoute { ref err }, + assert_eq!(err, &"Path went through the same channel twice")); + assert!(nodes[0].node.list_recent_payments().is_empty()); +} + +// BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message. +// BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve. +//TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO. + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() { + //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these) + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + + let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); + route.paths[0].hops[0].fee_msat = 100; + + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) + ), true, APIError::ChannelUnavailable { .. }, {}); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_sender_zero_value_msat() { + //BOLT2 Requirement: MUST offer amount_msat greater than 0. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + + let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); + route.paths[0].hops[0].fee_msat = 0; + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)), + true, APIError::ChannelUnavailable { ref err }, + assert_eq!(err, "Cannot send 0-msat HTLC")); + + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 2); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { + //BOLT2 Requirement: MUST offer amount_msat greater than 0. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); + nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); + updates.update_add_htlcs[0].amount_msat = 0; + + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC", 3); + check_closed_broadcast!(nodes[1], true).unwrap(); + check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() }, + [node_a_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() { + //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000. + //It is enforced when constructing a route. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + + let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); + + let payment_params = PaymentParameters::from_node_id(node_b_id, 0) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000); + route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001; + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) + ), true, APIError::InvalidRoute { ref err }, + assert_eq!(err, &"Channel CLTV overflowed?")); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() { + //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC. + //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0. + //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); + let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&node_a_id) + .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().counterparty_max_accepted_htlcs as u64; + + // Fetch a route in advance as we will be unable to once we're unable to send. + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); + for i in 0..max_accepted_htlcs { + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); + let payment_event = { + nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + if let MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] { + assert_eq!(htlcs[0].htlc_id, i); + } else { + assert!(false); + } + SendEvent::from_event(events.remove(0)) + }; + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + + expect_pending_htlcs_forwardable!(nodes[1]); + expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000); + } + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) + ), true, APIError::ChannelUnavailable { .. }, {}); + + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { + //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let channel_value = 100000; + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0); + let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat; + + send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight); + + let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_in_flight); + // Manually create a route over our max in flight (which our router normally automatically + // limits us to. + route.paths[0].hops[0].fee_msat = max_in_flight + 1; + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) + ), true, APIError::ChannelUnavailable { .. }, {}); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + send_payment(&nodes[0], &[&nodes[1]], max_in_flight); +} + +// BOLT 2 Requirements for the Receiver when handling an update_add_htlc message. +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { + //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + let htlc_minimum_msat: u64; + { + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); + let channel = chan_lock.channel_by_id.get(&chan.2).unwrap(); + htlc_minimum_msat = channel.context().get_holder_htlc_minimum_msat(); + } + + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat); + nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); + updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1; + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + assert!(nodes[1].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str())); + check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { + //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + + let chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); + let channel_reserve = chan_stat.channel_reserve_msat; + let feerate = get_feerate!(nodes[0], nodes[1], chan.2); + let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2); + // The 2* and +1 are for the fee spike reserve. + let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); + + let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound; + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); + nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); + + // Even though channel-initiator senders are required to respect the fee_spike_reserve, + // at this time channel-initiatee receivers are not required to enforce that senders + // respect the fee_spike_reserve. + updates.update_add_htlcs[0].amount_msat = max_can_send + commit_tx_fee_outbound + 1; + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + + assert!(nodes[1].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); + check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { + //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel + //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + + let send_amt = 3999999; + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000); + route.paths[0].hops[0].fee_msat = send_amt; + let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); + let cur_height = nodes[0].node.best_block.read().unwrap().height + 1; + let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv); + let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret); + let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads( + &route.paths[0], send_amt, &recipient_onion_fields, cur_height, &None, None, None).unwrap(); + let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap(); + + let mut msg = msgs::UpdateAddHTLC { + channel_id: chan.2, + htlc_id: 0, + amount_msat: 1000, + payment_hash: our_payment_hash, + cltv_expiry: htlc_cltv, + onion_routing_packet: onion_packet.clone(), + skimmed_fee_msat: None, + blinding_point: None, + }; + + for i in 0..50 { + msg.htlc_id = i as u64; + nodes[1].node.handle_update_add_htlc(node_a_id, &msg); + } + msg.htlc_id = (50) as u64; + nodes[1].node.handle_update_add_htlc(node_a_id, &msg); + + assert!(nodes[1].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str())); + check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { + //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); + + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); + updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1; + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + + assert!(nodes[1].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str())); + check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 1000000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { + //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); + updates.update_add_htlcs[0].cltv_expiry = 500000000; + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + + assert!(nodes[1].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height"); + check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { + //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection. + // We test this by first testing that that repeated HTLCs pass commitment signature checks + // after disconnect and that non-sequential htlc_ids result in a channel failure. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + check_added_monitors!(nodes[0], 1); + let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + + //Disconnect and Reconnect + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); + nodes[0].node.peer_connected(node_b_id, &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); + assert_eq!(reestablish_1.len(), 1); + nodes[1].node.peer_connected(node_a_id, &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); + handle_chan_reestablish_msgs!(nodes[0], nodes[1]); + nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); + handle_chan_reestablish_msgs!(nodes[1], nodes[0]); + + //Resend HTLC + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + assert_eq!(updates.commitment_signed.len(), 1); + assert_eq!(updates.commitment_signed[0].htlc_signatures.len(), 1); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &updates.commitment_signed); + check_added_monitors!(nodes[1], 1); + let _bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); + + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + + assert!(nodes[1].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); + assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str())); + check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { + //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc. + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + + check_added_monitors!(nodes[0], 1); + let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + + let update_msg = msgs::UpdateFulfillHTLC{ + channel_id: chan.2, + htlc_id: 0, + payment_preimage: our_payment_preimage, + }; + + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_msg); + + assert!(nodes[0].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); + check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { + //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc. + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + check_added_monitors!(nodes[0], 1); + let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + + let update_msg = msgs::UpdateFailHTLC{ + channel_id: chan.2, + htlc_id: 0, + reason: Vec::new(), + attribution_data: Some(AttributionData::new()) + }; + + nodes[0].node.handle_update_fail_htlc(node_b_id, &update_msg); + + assert!(nodes[0].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); + check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() { + //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc. + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + check_added_monitors!(nodes[0], 1); + let updates = get_htlc_update_msgs!(nodes[0], node_b_id); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + let update_msg = msgs::UpdateFailMalformedHTLC{ + channel_id: chan.2, + htlc_id: 0, + sha256_of_onion: [1; 32], + failure_code: 0x8000, + }; + + nodes[0].node.handle_update_fail_malformed_htlc(node_b_id, &update_msg); + + assert!(nodes[0].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); + check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { + //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel. + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + + let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000); + + nodes[1].node.claim_funds(our_payment_preimage); + check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], our_payment_hash, 100_000); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = { + match events[0] { + MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + update_fulfill_htlcs[0].clone() + }, + _ => panic!("Unexpected event"), + } + }; + + update_fulfill_msg.htlc_id = 1; + + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_msg); + + assert!(nodes[0].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find"); + check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fulfill_htlc_bolt2_wrong_preimage() { + //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel. + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + + let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000); + + nodes[1].node.claim_funds(our_payment_preimage); + check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], our_payment_hash, 100_000); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = { + match events[0] { + MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert_eq!(update_fulfill_htlcs.len(), 1); + assert!(update_fail_htlcs.is_empty()); + assert!(update_fail_malformed_htlcs.is_empty()); + assert!(update_fee.is_none()); + update_fulfill_htlcs[0].clone() + }, + _ => panic!("Unexpected event"), + } + }; + + update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]); + + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_msg); + + assert!(nodes[0].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str())); + check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() { + //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel. + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); + + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + nodes[0].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); + updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message + + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); + check_added_monitors(&nodes[1], 1); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + + let mut update_msg: msgs::UpdateFailMalformedHTLC = { + match events[0] { + MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { + assert!(update_add_htlcs.is_empty()); + assert!(update_fulfill_htlcs.is_empty()); + assert!(update_fail_htlcs.is_empty()); + assert_eq!(update_fail_malformed_htlcs.len(), 1); + assert!(update_fee.is_none()); + update_fail_malformed_htlcs[0].clone() + }, + _ => panic!("Unexpected event"), + } + }; + update_msg.failure_code &= !0x8000; + nodes[0].node.handle_update_fail_malformed_htlc(node_b_id, &update_msg); + + assert!(nodes[0].node.list_channels().is_empty()); + let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); + assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set"); + check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 1000000); +} From 8f3baa9a2874e95c18149ca7a7b72b1b7b102ac9 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 1 May 2025 14:48:44 +0000 Subject: [PATCH 13/25] Clean up `htlc_reserve_unit_tests` in anticipation of `rustfmt` --- lightning/src/ln/htlc_reserve_unit_tests.rs | 338 ++++++++++++-------- 1 file changed, 205 insertions(+), 133 deletions(-) diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index 58b025e7b95..88856fd4f44 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -41,7 +41,9 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000; push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; - let temp_channel_id = nodes[0].node.create_channel(node_b_id, 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None, None).unwrap(); + let push = if send_from_initiator { 0 } else { push_amt }; + let temp_channel_id = + nodes[0].node.create_channel(node_b_id, 100_000, push, 42, None, None).unwrap(); let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); if !send_from_initiator { open_channel_message.channel_reserve_satoshis = 0; @@ -99,7 +101,9 @@ pub fn test_channel_reserve_holding_cell_htlcs() { // It is now fixed, so we simply set the fee to the expected value here. let mut config = test_default_channel_config(); config.channel_config.forwarding_fee_base_msat = 239; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]); + + let configs = [Some(config.clone()), Some(config.clone()), Some(config.clone())]; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -141,9 +145,10 @@ pub fn test_channel_reserve_holding_cell_htlcs() { route.paths[0].hops.last_mut().unwrap().fee_msat += 1; assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat)); - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -197,8 +202,10 @@ pub fn test_channel_reserve_holding_cell_htlcs() { let (route_1, our_payment_hash_1, our_payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_1); let payment_event_1 = { - nodes[0].node.send_payment_with_route(route_1.clone(), our_payment_hash_1, - RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap(); + let route = route_1.clone(); + let onion = RecipientOnionFields::secret_only(our_payment_secret_1); + let id = PaymentId(our_payment_hash_1.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash_1, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -213,9 +220,10 @@ pub fn test_channel_reserve_holding_cell_htlcs() { let mut route = route_1.clone(); route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1; let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]); - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -232,8 +240,9 @@ pub fn test_channel_reserve_holding_cell_htlcs() { // now see if they go through on both sides let (route_21, our_payment_hash_21, our_payment_preimage_21, our_payment_secret_21) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_21); // but this will stuck in the holding cell - nodes[0].node.send_payment_with_route(route_21, our_payment_hash_21, - RecipientOnionFields::secret_only(our_payment_secret_21), PaymentId(our_payment_hash_21.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret_21); + let id = PaymentId(our_payment_hash_21.0); + nodes[0].node.send_payment_with_route(route_21, our_payment_hash_21, onion, id).unwrap(); check_added_monitors!(nodes[0], 0); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 0); @@ -243,16 +252,18 @@ pub fn test_channel_reserve_holding_cell_htlcs() { let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22); route.paths[0].hops.last_mut().unwrap().fee_msat += 1; - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22); // this will also stuck in the holding cell - nodes[0].node.send_payment_with_route(route_22, our_payment_hash_22, - RecipientOnionFields::secret_only(our_payment_secret_22), PaymentId(our_payment_hash_22.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret_22); + let id = PaymentId(our_payment_hash_22.0); + nodes[0].node.send_payment_with_route(route_22, our_payment_hash_22, onion, id).unwrap(); check_added_monitors!(nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -335,13 +346,13 @@ pub fn test_channel_reserve_holding_cell_htlcs() { _ => panic!("Unexpected event"), } - claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1); - claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21); - claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], our_payment_preimage_1); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], our_payment_preimage_21); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], our_payment_preimage_22); let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, &channel_type_features); let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat; - send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3); + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], recv_value_3); let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat); @@ -396,8 +407,9 @@ pub fn channel_reserve_in_flight_removes() { // Start routing the third HTLC (this is just used to get everyone in the right state). let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); let send_1 = { - nodes[0].node.send_payment_with_route(route, payment_hash_3, - RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret_3); + let id = PaymentId(payment_hash_3.0); + nodes[0].node.send_payment_with_route(route, payment_hash_3, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -471,8 +483,9 @@ pub fn channel_reserve_in_flight_removes() { // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing. let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[1], nodes[0], 10000); let send_2 = { - nodes[1].node.send_payment_with_route(route, payment_hash_4, - RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret_4); + let id = PaymentId(payment_hash_4.0); + nodes[1].node.send_payment_with_route(route, payment_hash_4, onion, id).unwrap(); check_added_monitors!(nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -535,8 +548,9 @@ pub fn holding_cell_htlc_counting() { let mut payments = Vec::new(); for _ in 0..50 { let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000); - nodes[1].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); payments.push((payment_preimage, payment_hash)); } check_added_monitors!(nodes[1], 1); @@ -550,19 +564,19 @@ pub fn holding_cell_htlc_counting() { // the holding cell waiting on B's RAA to send. At this point we should not be able to add // another HTLC. { - unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0) - ), true, APIError::ChannelUnavailable { .. }, {}); + let onion = RecipientOnionFields::secret_only(payment_secret_1); + let id = PaymentId(payment_hash_1.0); + let res = nodes[1].node.send_payment_with_route(route, payment_hash_1, onion, id); + unwrap_send_err!(nodes[1], res, true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); } // This should also be true if we try to forward a payment. let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000); - { - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - } + let onion = RecipientOnionFields::secret_only(payment_secret_2); + let id = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -574,14 +588,16 @@ pub fn holding_cell_htlc_counting() { // We have to forward pending HTLCs twice - once tries to forward the payment forward (and // fails), the second will process the resulting failure and fail the HTLC backward. expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }]); + let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![fail]); check_added_monitors!(nodes[1], 1); let bs_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_fail_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true); - expect_payment_failed_with_update!(nodes[0], payment_hash_2, false, chan_2.0.contents.short_channel_id, false); + let failing_scid = chan_2.0.contents.short_channel_id; + expect_payment_failed_with_update!(nodes[0], payment_hash_2, false, failing_scid, false); // Now forward all the pending HTLCs and claim them back nodes[2].node.handle_update_add_htlc(node_b_id, &initial_payment_event.msgs[0]); @@ -653,8 +669,9 @@ pub fn test_basic_channel_reserve() { let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); route.paths[0].hops.last_mut().unwrap().fee_msat += 1; - let err = nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let err = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); unwrap_send_err!(nodes[0], err, true, APIError::ChannelUnavailable { .. }, {} ); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -720,11 +737,15 @@ pub fn test_fee_spike_violation_fails_htlc() { chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx).unwrap()) }; let remote_point = { - let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&node_a_id).unwrap().lock().unwrap(); - let remote_chan = chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); - let chan_signer = remote_chan.get_signer(); - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx).unwrap() + let per_peer_lock; + let mut peer_state_lock; + + let channel = get_channel_ref!(nodes[1], nodes[0], per_peer_lock, peer_state_lock, chan.2); + let chan_signer = channel.as_funded().unwrap().get_signer(); + chan_signer + .as_ref() + .get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx) + .unwrap() }; // Build the remote commitment transaction so we can sign it, and then later use the @@ -742,10 +763,12 @@ pub fn test_fee_spike_violation_fails_htlc() { let commitment_number = INITIAL_COMMITMENT_NUMBER - 1; let res = { - let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let local_chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); - let local_chan = local_chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); - let local_chan_signer = local_chan.get_signer(); + let per_peer_lock; + let mut peer_state_lock; + + let channel = get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan.2); + let chan_signer = channel.as_funded().unwrap().get_signer(); + let commitment_tx = CommitmentTransaction::new( commitment_number, &remote_point, @@ -753,13 +776,15 @@ pub fn test_fee_spike_violation_fails_htlc() { local_chan_balance, feerate_per_kw, vec![accepted_htlc_info], - &local_chan.funding.channel_transaction_parameters.as_counterparty_broadcastable(), + &channel.funding().channel_transaction_parameters.as_counterparty_broadcastable(), &secp_ctx, ); - local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment( - &local_chan.funding.channel_transaction_parameters, &commitment_tx, Vec::new(), - Vec::new(), &secp_ctx, - ).unwrap() + let params = &channel.funding().channel_transaction_parameters; + chan_signer + .as_ecdsa() + .unwrap() + .sign_counterparty_commitment(params, &commitment_tx, Vec::new(), Vec::new(), &secp_ctx) + .unwrap() }; let commit_signed_msg = msgs::CommitmentSigned { @@ -833,9 +858,10 @@ pub fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { } // However one more HTLC should be significantly over the reserve amount and fail. - unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[1].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[1], res, true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); } @@ -937,9 +963,10 @@ pub fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt); route.paths[0].hops[0].fee_msat += 1; - unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[1].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[1], res, true, APIError::ChannelUnavailable { .. }, {}); } #[xtest(feature = "_externalize_tests")] @@ -954,6 +981,7 @@ pub fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() { create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000); let payment_amt = 46000; // Dust amount + // In the previous code, these first four payments would succeed. route_payment(&nodes[0], &[&nodes[1]], payment_amt); route_payment(&nodes[0], &[&nodes[1]], payment_amt); @@ -999,8 +1027,10 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { // Add a pending HTLC. let (route_1, our_payment_hash_1, _, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_1); let payment_event_1 = { - nodes[0].node.send_payment_with_route(route_1.clone(), our_payment_hash_1, - RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret_1); + let id = PaymentId(our_payment_hash_1.0); + let route = route_1.clone(); + nodes[0].node.send_payment_with_route(route, our_payment_hash_1, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -1043,8 +1073,8 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote HTLC add would put them under remote reserve value".to_string() }, - [node_a_id], 100000); + let reason = ClosureReason::ProcessingError { err: err_msg.data.clone() }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1092,9 +1122,10 @@ pub fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() { let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); route.paths[0].hops[0].fee_msat = 100; - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -1110,8 +1141,10 @@ pub fn test_update_add_htlc_bolt2_sender_zero_value_msat() { let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); route.paths[0].hops[0].fee_msat = 0; - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)), + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, "Cannot send 0-msat HTLC")); @@ -1133,8 +1166,9 @@ pub fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); updates.update_add_htlcs[0].amount_msat = 0; @@ -1143,8 +1177,10 @@ pub fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC", 3); check_closed_broadcast!(nodes[1], true).unwrap(); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() }, - [node_a_id], 100000); + let reason = ClosureReason::ProcessingError { + err: "Remote side tried to send a 0-msat HTLC".to_string() + }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1164,9 +1200,11 @@ pub fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() { .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000); route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001; - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::InvalidRoute { ref err }, + + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::InvalidRoute { ref err }, assert_eq!(err, &"Channel CLTV overflowed?")); } @@ -1183,21 +1221,27 @@ pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increme let node_a_id = nodes[0].node.get_our_node_id(); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); - let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&node_a_id) - .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().counterparty_max_accepted_htlcs as u64; + let max_accepted_htlcs = { + let per_peer_lock; + let mut peer_state_lock; + + let channel = get_channel_ref!(nodes[1], nodes[0], per_peer_lock, peer_state_lock, chan.2); + channel.context().counterparty_max_accepted_htlcs as u64 + }; // Fetch a route in advance as we will be unable to once we're unable to send. let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); for i in 0..max_accepted_htlcs { let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); let payment_event = { - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - if let MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] { + if let MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, .. } = events[0] { assert_eq!(htlcs[0].htlc_id, i); } else { assert!(false); @@ -1211,9 +1255,10 @@ pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increme expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000); } - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -1230,15 +1275,16 @@ pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0); let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat; - send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight); + send_payment(&nodes[0], &[&nodes[1]], max_in_flight); let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_in_flight); // Manually create a route over our max in flight (which our router normally automatically // limits us to. route.paths[0].hops[0].fee_msat = max_in_flight + 1; - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); send_payment(&nodes[0], &[&nodes[1]], max_in_flight); @@ -1257,17 +1303,18 @@ pub fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() let node_b_id = nodes[1].node.get_our_node_id(); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - let htlc_minimum_msat: u64; - { - let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); - let channel = chan_lock.channel_by_id.get(&chan.2).unwrap(); - htlc_minimum_msat = channel.context().get_holder_htlc_minimum_msat(); - } + let htlc_minimum_msat = { + let per_peer_lock; + let mut peer_state_lock; + + let channel = get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan.2); + channel.context().get_holder_htlc_minimum_msat() + }; let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1; @@ -1276,7 +1323,8 @@ pub fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1301,8 +1349,9 @@ pub fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound; let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); @@ -1316,7 +1365,8 @@ pub fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1366,7 +1416,8 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1383,8 +1434,9 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1; @@ -1394,7 +1446,8 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 1000000); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 1000000); } #[xtest(feature = "_externalize_tests")] @@ -1410,8 +1463,9 @@ pub fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let reason = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, reason, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); updates.update_add_htlcs[0].cltv_expiry = 500000000; @@ -1421,7 +1475,8 @@ pub fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height"); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1439,8 +1494,10 @@ pub fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { create_announced_chan_between_nodes(&nodes, 0, 1); let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); @@ -1448,16 +1505,20 @@ pub fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { //Disconnect and Reconnect nodes[0].node.peer_disconnected(node_b_id); nodes[1].node.peer_disconnected(node_a_id); - nodes[0].node.peer_connected(node_b_id, &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + + let init_msg = msgs::Init { + features: nodes[0].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(node_a_id, &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); + nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); handle_chan_reestablish_msgs!(nodes[0], nodes[1]); nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); @@ -1477,7 +1538,8 @@ pub fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_a_id], 100000); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1494,8 +1556,9 @@ pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], node_b_id); @@ -1513,7 +1576,8 @@ pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1531,8 +1595,9 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); @@ -1550,7 +1615,8 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1568,8 +1634,9 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitme let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); @@ -1586,7 +1653,8 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitme let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1612,7 +1680,7 @@ pub fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { assert_eq!(events.len(), 1); let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = { match events[0] { - MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. }, .. } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_htlcs.is_empty()); @@ -1632,7 +1700,8 @@ pub fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find"); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1658,7 +1727,7 @@ pub fn test_update_fulfill_htlc_bolt2_wrong_preimage() { assert_eq!(events.len(), 1); let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = { match events[0] { - MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. }, .. } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_htlcs.is_empty()); @@ -1678,7 +1747,8 @@ pub fn test_update_fulfill_htlc_bolt2_wrong_preimage() { let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 100000); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -1696,8 +1766,9 @@ pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_me create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); @@ -1714,7 +1785,7 @@ pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_me let mut update_msg: msgs::UpdateFailMalformedHTLC = { match events[0] { - MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. }, .. } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); @@ -1732,5 +1803,6 @@ pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_me let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set"); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: err_msg.data }, [node_b_id], 1000000); + let reason = ClosureReason::ProcessingError { err: err_msg.data }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 1000000); } From 9c185f6b7dad50070d23cc586af39cd89afb7d11 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 28 Apr 2025 14:08:36 +0000 Subject: [PATCH 14/25] De-macro `check_added_monitors` in `htlc_reserve_unit_tests` --- lightning/src/ln/htlc_reserve_unit_tests.rs | 140 ++++++++++---------- 1 file changed, 70 insertions(+), 70 deletions(-) diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index 88856fd4f44..c5016f18028 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -124,7 +124,7 @@ pub fn test_channel_reserve_holding_cell_htlcs() { ($node: expr) => {{ let mut events = $node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - check_added_monitors!($node, 1); + check_added_monitors(&$node, 1); let payment_event = SendEvent::from_event(events.remove(0)); payment_event }} @@ -206,7 +206,7 @@ pub fn test_channel_reserve_holding_cell_htlcs() { let onion = RecipientOnionFields::secret_only(our_payment_secret_1); let id = PaymentId(our_payment_hash_1.0); nodes[0].node.send_payment_with_route(route, our_payment_hash_1, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -243,7 +243,7 @@ pub fn test_channel_reserve_holding_cell_htlcs() { let onion = RecipientOnionFields::secret_only(our_payment_secret_21); let id = PaymentId(our_payment_hash_21.0); nodes[0].node.send_payment_with_route(route_21, our_payment_hash_21, onion, id).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 0); @@ -264,28 +264,28 @@ pub fn test_channel_reserve_holding_cell_htlcs() { let onion = RecipientOnionFields::secret_only(our_payment_secret_22); let id = PaymentId(our_payment_hash_22.0); nodes[0].node.send_payment_with_route(route_22, our_payment_hash_22, onion, id).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // flush the pending htlc nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event_1.commitment_msg); let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // the pending htlc should be promoted to committed nodes[0].node.handle_revoke_and_ack(node_b_id, &as_revoke_and_ack); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let commitment_update_2 = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &as_commitment_signed); let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_revoke_and_ack(node_a_id, &bs_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); @@ -410,7 +410,7 @@ pub fn channel_reserve_in_flight_removes() { let onion = RecipientOnionFields::secret_only(payment_secret_3); let id = PaymentId(payment_hash_3.0); nodes[0].node.send_payment_with_route(route, payment_hash_3, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) @@ -420,38 +420,38 @@ pub fn channel_reserve_in_flight_removes() { // initial fulfill/CS. nodes[1].node.claim_funds(payment_preimage_1); expect_payment_claimed!(nodes[1], payment_hash_1, payment_value_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_removes = get_htlc_update_msgs!(nodes[1], node_a_id); // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not // remove the second HTLC when we send the HTLC back from B to A. nodes[1].node.claim_funds(payment_preimage_2); expect_payment_claimed!(nodes[1], payment_hash_2, 20_000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_removes.update_fulfill_htlcs[0]); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_removes.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[1].node.handle_update_add_htlc(node_a_id, &send_1.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_1.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // B is already AwaitingRAA, so cant generate a CS here let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_cs = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_cs = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_cs.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the @@ -461,12 +461,12 @@ pub fn channel_reserve_in_flight_removes() { // on-chain as necessary). nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_cs.update_fulfill_htlcs[0]); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); expect_pending_htlcs_forwardable!(nodes[1]); @@ -475,7 +475,7 @@ pub fn channel_reserve_in_flight_removes() { // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't // resolve the second HTLC from A's point of view. nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); let as_cs = get_htlc_update_msgs!(nodes[0], node_b_id); @@ -486,7 +486,7 @@ pub fn channel_reserve_in_flight_removes() { let onion = RecipientOnionFields::secret_only(payment_secret_4); let id = PaymentId(payment_hash_4.0); nodes[1].node.send_payment_with_route(route, payment_hash_4, onion, id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) @@ -494,29 +494,29 @@ pub fn channel_reserve_in_flight_removes() { nodes[0].node.handle_update_add_htlc(node_b_id, &send_2.msgs[0]); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &send_2.commitment_msg); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // Now just resolve all the outstanding messages/HTLCs for completeness... nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_cs.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); let as_cs = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_cs.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_pending_htlcs_forwardable!(nodes[0]); expect_payment_claimable!(nodes[0], payment_hash_4, payment_secret_4, 10000); @@ -553,7 +553,7 @@ pub fn holding_cell_htlc_counting() { nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); payments.push((payment_preimage, payment_hash)); } - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -576,7 +576,7 @@ pub fn holding_cell_htlc_counting() { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -590,7 +590,7 @@ pub fn holding_cell_htlc_counting() { expect_pending_htlcs_forwardable!(nodes[1]); let fail = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![fail]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_fail_updates.update_fail_htlcs[0]); @@ -602,34 +602,34 @@ pub fn holding_cell_htlc_counting() { // Now forward all the pending HTLCs and claim them back nodes[2].node.handle_update_add_htlc(node_b_id, &initial_payment_event.msgs[0]); nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &initial_payment_event.commitment_msg); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], node_b_id); nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let as_updates = get_htlc_update_msgs!(nodes[1], node_c_id); nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); for ref update in as_updates.update_add_htlcs.iter() { nodes[2].node.handle_update_add_htlc(node_b_id, update); } nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &as_updates.commitment_signed); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); nodes[2].node.handle_revoke_and_ack(node_b_id, &as_raa); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], node_b_id); nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &as_final_raa); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_pending_htlcs_forwardable!(nodes[2]); @@ -825,7 +825,7 @@ pub fn test_fee_spike_violation_fails_htlc() { nodes[1].logger.assert_log("lightning::ln::channel", format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1); - check_added_monitors!(nodes[1], 3); + check_added_monitors(&nodes[1], 3); } #[xtest(feature = "_externalize_tests")] @@ -920,7 +920,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { assert_eq!(nodes[0].node.list_channels().len(), 0); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value"); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string() }, [node_b_id], 100000); } @@ -1031,7 +1031,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { let id = PaymentId(our_payment_hash_1.0); let route = route_1.clone(); nodes[0].node.send_payment_with_route(route, our_payment_hash_1, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1072,7 +1072,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { assert_eq!(nodes[1].node.list_channels().len(), 1); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data.clone() }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } @@ -1169,14 +1169,14 @@ pub fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); updates.update_add_htlcs[0].amount_msat = 0; nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC", 3); check_closed_broadcast!(nodes[1], true).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() }; @@ -1237,7 +1237,7 @@ pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increme let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1249,7 +1249,7 @@ pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increme SendEvent::from_event(events.remove(0)) }; nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -1315,14 +1315,14 @@ pub fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1; nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } @@ -1352,7 +1352,7 @@ pub fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); // Even though channel-initiator senders are required to respect the fee_spike_reserve, @@ -1364,7 +1364,7 @@ pub fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } @@ -1415,7 +1415,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } @@ -1437,7 +1437,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1; nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); @@ -1445,7 +1445,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 1000000); } @@ -1466,7 +1466,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { let reason = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, reason, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); updates.update_add_htlcs[0].cltv_expiry = 500000000; nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); @@ -1474,7 +1474,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height"); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } @@ -1498,7 +1498,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); @@ -1529,7 +1529,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { assert_eq!(updates.commitment_signed.len(), 1); assert_eq!(updates.commitment_signed[0].htlc_signatures.len(), 1); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let _bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); @@ -1537,7 +1537,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } @@ -1560,7 +1560,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); @@ -1575,7 +1575,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); } @@ -1598,7 +1598,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); @@ -1614,7 +1614,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); } @@ -1637,7 +1637,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitme let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); let update_msg = msgs::UpdateFailMalformedHTLC{ @@ -1652,7 +1652,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitme assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); } @@ -1673,7 +1673,7 @@ pub fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000); nodes[1].node.claim_funds(our_payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], our_payment_hash, 100_000); let events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -1699,7 +1699,7 @@ pub fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Remote tried to fulfill/fail an HTLC we couldn't find"); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); } @@ -1720,7 +1720,7 @@ pub fn test_update_fulfill_htlc_bolt2_wrong_preimage() { let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000); nodes[1].node.claim_funds(our_payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], our_payment_hash, 100_000); let events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -1746,7 +1746,7 @@ pub fn test_update_fulfill_htlc_bolt2_wrong_preimage() { assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str())); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); } @@ -1769,13 +1769,13 @@ pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_me let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true); expect_pending_htlcs_forwardable!(nodes[1]); expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); @@ -1802,7 +1802,7 @@ pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_me assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Got update_fail_malformed_htlc with BADONION not set"); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[0], 1, reason, [node_b_id], 1000000); } From b6c59eb1e51240ba443275dbe72b33e93afe3f09 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 27 Apr 2025 19:02:34 +0000 Subject: [PATCH 15/25] Run `rustfmt` on `htlc_reserve_unit_tests.rs` --- lightning/src/ln/htlc_reserve_unit_tests.rs | 522 +++++++++++++++----- 1 file changed, 397 insertions(+), 125 deletions(-) diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index c5016f18028..aee764682a2 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -1,19 +1,25 @@ //! Various unit tests covering HTLC handling as well as tests covering channel reserve tracking. use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaymentPurpose}; -use crate::ln::functional_test_utils::*; -use crate::ln::chan_utils::{self, CommitmentTransaction, htlc_success_tx_weight, commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC}; -use crate::ln::channel::{FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, Channel, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis}; +use crate::ln::chan_utils::{ + self, commitment_tx_base_weight, htlc_success_tx_weight, CommitmentTransaction, + COMMITMENT_TX_WEIGHT_PER_HTLC, +}; +use crate::ln::channel::{ + get_holder_selected_channel_reserve_satoshis, Channel, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, + MIN_AFFORDABLE_HTLC_COUNT, +}; use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder}; -use crate::ln::outbound_payment::RecipientOnionFields; -use crate::ln::onion_utils::{self, AttributionData}; +use crate::ln::functional_test_utils::*; use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; +use crate::ln::onion_utils::{self, AttributionData}; +use crate::ln::outbound_payment::RecipientOnionFields; +use crate::routing::router::PaymentParameters; use crate::sign::ecdsa::EcdsaChannelSigner; -use crate::util::config::UserConfig; -use crate::util::errors::APIError; use crate::types::features::ChannelTypeFeatures; use crate::types::payment::PaymentPreimage; -use crate::routing::router::PaymentParameters; +use crate::util::config::UserConfig; +use crate::util::errors::APIError; use lightning_macros::xtest; @@ -38,13 +44,16 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { let mut push_amt = 100_000_000; let feerate_per_kw = 253; let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); - push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000; + push_amt -= feerate_per_kw as u64 + * (commitment_tx_base_weight(&channel_type_features) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) + / 1000 * 1000; push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; let push = if send_from_initiator { 0 } else { push_amt }; let temp_channel_id = nodes[0].node.create_channel(node_b_id, 100_000, push, 42, None, None).unwrap(); - let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + let mut open_channel_message = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); if !send_from_initiator { open_channel_message.channel_reserve_satoshis = 0; open_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000; @@ -52,7 +61,8 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); // Extract the channel accept message from node1 to node0 - let mut accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + let mut accept_channel_message = + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); if send_from_initiator { accept_channel_message.channel_reserve_satoshis = 0; accept_channel_message.common_fields.max_htlc_value_in_flight_msat = 100_000_000; @@ -64,24 +74,35 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) { let mut sender_node_per_peer_lock; let mut sender_node_peer_state_lock; - let channel = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id); + let channel = get_channel_ref!( + sender_node, + counterparty_node, + sender_node_per_peer_lock, + sender_node_peer_state_lock, + temp_channel_id + ); assert!(channel.is_unfunded_v1()); channel.funding_mut().holder_selected_channel_reserve_satoshis = 0; channel.context_mut().holder_max_htlc_value_in_flight_msat = 100_000_000; } let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id); - let funding_msgs = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx); + let funding_msgs = + create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx); create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_msgs.0); // nodes[0] should now be able to send the full balance to nodes[1], violating nodes[1]'s // security model if it ever tries to send funds back to nodes[0] (but that's not our problem). if send_from_initiator { - send_payment(&nodes[0], &[&nodes[1]], 100_000_000 + send_payment( + &nodes[0], + &[&nodes[1]], + 100_000_000 // Note that for outbound channels we have to consider the commitment tx fee and the // "fee spike buffer", which is currently a multiple of the total commitment tx fee as // well as an additional HTLC. - - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, &channel_type_features)); + - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * commit_tx_fee_msat(feerate_per_kw, 2, &channel_type_features), + ); } else { send_payment(&nodes[1], &[&nodes[0]], push_amt); } @@ -127,7 +148,7 @@ pub fn test_channel_reserve_holding_cell_htlcs() { check_added_monitors(&$node, 1); let payment_event = SendEvent::from_event(events.remove(0)); payment_event - }} + }}; } let feemsat = 239; // set above @@ -140,8 +161,11 @@ pub fn test_channel_reserve_holding_cell_htlcs() { // attempt to send amt_msat > their_max_htlc_value_in_flight_msat { let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) - .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0); - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0); + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap() + .with_max_channel_saturation_power_of_half(0); + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0); route.paths[0].hops.last_mut().unwrap().fee_msat += 1; assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat)); @@ -159,16 +183,26 @@ pub fn test_channel_reserve_holding_cell_htlcs() { // 3 for the 3 HTLCs that will be sent, 2* and +1 for the fee spike reserve. // Also, ensure that each payment has enough to be over the dust limit to // ensure it'll be included in each commit tx fee calculation. - let commit_tx_fee_all_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features); - let ensure_htlc_amounts_above_dust_buffer = 3 * (stat01.counterparty_dust_limit_msat + 1000); - if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat { + let commit_tx_fee_all_htlcs = + 2 * commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features); + let ensure_htlc_amounts_above_dust_buffer = + 3 * (stat01.counterparty_dust_limit_msat + 1000); + if stat01.value_to_self_msat + < stat01.channel_reserve_msat + + commit_tx_fee_all_htlcs + + ensure_htlc_amounts_above_dust_buffer + + amt_msat + { break; } let payment_params = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) - .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap().with_max_channel_saturation_power_of_half(0); + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap() + .with_max_channel_saturation_power_of_half(0); let route = get_route!(nodes[0], payment_params, recv_value_0).unwrap(); - let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0); + let (payment_preimage, ..) = + send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); let (stat01_, stat11_, stat12_, stat22_) = ( @@ -182,7 +216,10 @@ pub fn test_channel_reserve_holding_cell_htlcs() { assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat); assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat)); assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat)); - stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_; + stat01 = stat01_; + stat11 = stat11_; + stat12 = stat12_; + stat22 = stat22_; } // adding pending output. @@ -196,11 +233,16 @@ pub fn test_channel_reserve_holding_cell_htlcs() { // the amount of the first of these aforementioned 3 payments. The reason we split into 3 payments // is to test the behavior of the holding cell with respect to channel reserve and commit tx fee // policy. - let commit_tx_fee_2_htlcs = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features); - let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs)/2; + let commit_tx_fee_2_htlcs = 2 * commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features); + let recv_value_1 = (stat01.value_to_self_msat + - stat01.channel_reserve_msat + - total_fee_msat + - commit_tx_fee_2_htlcs) + / 2; let amt_msat_1 = recv_value_1 + total_fee_msat; - let (route_1, our_payment_hash_1, our_payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_1); + let (route_1, our_payment_hash_1, our_payment_preimage_1, our_payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_1); let payment_event_1 = { let route = route_1.clone(); let onion = RecipientOnionFields::secret_only(our_payment_secret_1); @@ -215,7 +257,11 @@ pub fn test_channel_reserve_holding_cell_htlcs() { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event_1.msgs[0]); // channel reserve test with htlc pending output > 0 - let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs; + let recv_value_2 = stat01.value_to_self_msat + - amt_msat_1 + - stat01.channel_reserve_msat + - total_fee_msat + - commit_tx_fee_2_htlcs; { let mut route = route_1.clone(); route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1; @@ -228,17 +274,25 @@ pub fn test_channel_reserve_holding_cell_htlcs() { } // split the rest to test holding cell - let commit_tx_fee_3_htlcs = 2*commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features); + let commit_tx_fee_3_htlcs = 2 * commit_tx_fee_msat(feerate, 3 + 1, &channel_type_features); let additional_htlc_cost_msat = commit_tx_fee_3_htlcs - commit_tx_fee_2_htlcs; - let recv_value_21 = recv_value_2/2 - additional_htlc_cost_msat/2; + let recv_value_21 = recv_value_2 / 2 - additional_htlc_cost_msat / 2; let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat - additional_htlc_cost_msat; { let stat = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); - assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat + commit_tx_fee_3_htlcs), stat.channel_reserve_msat); + assert_eq!( + stat.value_to_self_msat + - (stat.pending_outbound_htlcs_amount_msat + + recv_value_21 + recv_value_22 + + total_fee_msat + total_fee_msat + + commit_tx_fee_3_htlcs), + stat.channel_reserve_msat + ); } // now see if they go through on both sides - let (route_21, our_payment_hash_21, our_payment_preimage_21, our_payment_secret_21) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_21); + let (route_21, our_payment_hash_21, our_payment_preimage_21, our_payment_secret_21) = + get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_21); // but this will stuck in the holding cell let onion = RecipientOnionFields::secret_only(our_payment_secret_21); let id = PaymentId(our_payment_hash_21.0); @@ -259,7 +313,8 @@ pub fn test_channel_reserve_holding_cell_htlcs() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } - let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22); + let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = + get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22); // this will also stuck in the holding cell let onion = RecipientOnionFields::secret_only(our_payment_secret_22); let id = PaymentId(our_payment_hash_22.0); @@ -314,33 +369,51 @@ pub fn test_channel_reserve_holding_cell_htlcs() { let events = nodes[2].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { + Event::PaymentClaimable { + ref payment_hash, + ref purpose, + amount_msat, + receiver_node_id, + ref via_channel_ids, + .. + } => { assert_eq!(our_payment_hash_21, *payment_hash); assert_eq!(recv_value_21, amount_msat); assert_eq!(node_c_id, receiver_node_id.unwrap()); assert_eq!(*via_channel_ids, vec![(chan_2.2, Some(chan_2_user_id))]); match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { + payment_preimage, payment_secret, .. + } => { assert!(payment_preimage.is_none()); assert_eq!(our_payment_secret_21, *payment_secret); }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), } }, _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { + Event::PaymentClaimable { + ref payment_hash, + ref purpose, + amount_msat, + receiver_node_id, + ref via_channel_ids, + .. + } => { assert_eq!(our_payment_hash_22, *payment_hash); assert_eq!(recv_value_22, amount_msat); assert_eq!(node_c_id, receiver_node_id.unwrap()); assert_eq!(*via_channel_ids, vec![(chan_2.2, Some(chan_2_user_id))]); match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { + payment_preimage, payment_secret, .. + } => { assert!(payment_preimage.is_none()); assert_eq!(our_payment_secret_22, *payment_secret); }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), } }, _ => panic!("Unexpected event"), @@ -350,18 +423,25 @@ pub fn test_channel_reserve_holding_cell_htlcs() { claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], our_payment_preimage_21); claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], our_payment_preimage_22); - let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1, &channel_type_features); + let commit_tx_fee_0_htlcs = 2 * commit_tx_fee_msat(feerate, 1, &channel_type_features); let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat; send_payment(&nodes[0], &[&nodes[1], &nodes[2]], recv_value_3); - let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); - let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat) - (recv_value_3 + total_fee_msat); + let commit_tx_fee_1_htlc = 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); + let expected_value_to_self = stat01.value_to_self_msat + - (recv_value_1 + total_fee_msat) + - (recv_value_21 + total_fee_msat) + - (recv_value_22 + total_fee_msat) + - (recv_value_3 + total_fee_msat); let stat0 = get_channel_value_stat!(nodes[0], nodes[1], chan_1.2); assert_eq!(stat0.value_to_self_msat, expected_value_to_self); assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat + commit_tx_fee_1_htlc); let stat2 = get_channel_value_stat!(nodes[2], nodes[1], chan_2.2); - assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3); + assert_eq!( + stat2.value_to_self_msat, + stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22 + recv_value_3 + ); } #[xtest(feature = "_externalize_tests")] @@ -400,12 +480,15 @@ pub fn channel_reserve_in_flight_removes() { let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2); // Route the first two HTLCs. - let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000; - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1); + let payment_value_1 = + b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000; + let (payment_preimage_1, payment_hash_1, ..) = + route_payment(&nodes[0], &[&nodes[1]], payment_value_1); let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 20_000); // Start routing the third HTLC (this is just used to get everyone in the right state). - let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); + let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100000); let send_1 = { let onion = RecipientOnionFields::secret_only(payment_secret_3); let id = PaymentId(payment_hash_3.0); @@ -481,7 +564,8 @@ pub fn channel_reserve_in_flight_removes() { // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing. - let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[1], nodes[0], 10000); + let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = + get_route_and_payment_hash!(nodes[1], nodes[0], 10000); let send_2 = { let onion = RecipientOnionFields::secret_only(payment_secret_4); let id = PaymentId(payment_hash_4.0); @@ -543,11 +627,13 @@ pub fn holding_cell_htlc_counting() { let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); // Fetch a route in advance as we will be unable to once we're unable to send. - let (route, payment_hash_1, _, payment_secret_1) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000); + let (route, payment_hash_1, _, payment_secret_1) = + get_route_and_payment_hash!(nodes[1], nodes[2], 100000); let mut payments = Vec::new(); for _ in 0..50 { - let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000); + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[2], 100000); let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); @@ -572,7 +658,8 @@ pub fn holding_cell_htlc_counting() { } // This should also be true if we try to forward a payment. - let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000); + let (route, payment_hash_2, _, payment_secret_2) = + get_route_and_payment_hash!(nodes[0], nodes[2], 100000); let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); @@ -601,7 +688,9 @@ pub fn holding_cell_htlc_counting() { // Now forward all the pending HTLCs and claim them back nodes[2].node.handle_update_add_htlc(node_b_id, &initial_payment_event.msgs[0]); - nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &initial_payment_event.commitment_msg); + nodes[2] + .node + .handle_commitment_signed_batch_test(node_b_id, &initial_payment_event.commitment_msg); check_added_monitors(&nodes[2], 1); let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], node_b_id); @@ -664,7 +753,11 @@ pub fn test_basic_channel_reserve() { let channel_reserve = chan_stat.channel_reserve_msat; // The 2* and +1 are for the fee spike reserve. - let commit_tx_fee = 2 * commit_tx_fee_msat(get_feerate!(nodes[0], nodes[1], chan.2), 1 + 1, &get_channel_type_features!(nodes[0], nodes[1], chan.2)); + let commit_tx_fee = 2 * commit_tx_fee_msat( + get_feerate!(nodes[0], nodes[1], chan.2), + 1 + 1, + &get_channel_type_features!(nodes[0], nodes[1], chan.2), + ); let max_can_send = 5000000 - channel_reserve - commit_tx_fee; let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); @@ -672,7 +765,7 @@ pub fn test_basic_channel_reserve() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); let err = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); - unwrap_send_err!(nodes[0], err, true, APIError::ChannelUnavailable { .. }, {} ); + unwrap_send_err!(nodes[0], err, true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); send_payment(&nodes[0], &vec![&nodes[1]], max_can_send); @@ -701,14 +794,24 @@ pub fn test_fee_spike_violation_fails_htlc() { let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv); let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret); - let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], - 3460001, &recipient_onion_fields, cur_height, &None, None, None).unwrap(); - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); + let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads( + &route.paths[0], + 3460001, + &recipient_onion_fields, + cur_height, + &None, + None, + None, + ) + .unwrap(); + let onion_packet = + onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash) + .unwrap(); let msg = msgs::UpdateAddHTLC { channel_id: chan.2, htlc_id: 0, amount_msat: htlc_msat, - payment_hash: payment_hash, + payment_hash, cltv_expiry: htlc_cltv, onion_routing_packet: onion_packet, skimmed_fee_msat: None, @@ -733,8 +836,13 @@ pub fn test_fee_spike_violation_fails_htlc() { // Make the signer believe we validated another commitment, so we can release the secret chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; - (chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER).unwrap(), - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx).unwrap()) + ( + chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER).unwrap(), + chan_signer + .as_ref() + .get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx) + .unwrap(), + ) }; let remote_point = { let per_peer_lock; @@ -810,13 +918,19 @@ pub fn test_fee_spike_violation_fails_htlc() { }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash }] + ); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); // Make sure the HTLC failed in the way we expect. match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, + .. + } => { assert_eq!(update_fail_htlcs.len(), 1); update_fail_htlcs[0].clone() }, @@ -844,14 +958,19 @@ pub fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); let mut push_amt = 100_000_000; - push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features); + push_amt -= commit_tx_fee_msat( + feerate_per_kw, + MIN_AFFORDABLE_HTLC_COUNT as u64, + &channel_type_features, + ); push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt); // Fetch a route in advance as we will be unable to once we're unable to send. - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000); // Sending exactly enough to hit the reserve amount should be accepted for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { route_payment(&nodes[1], &[&nodes[0]], 1_000_000); @@ -882,7 +1001,11 @@ pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment // transaction fee with 0 HTLCs (183 sats)). let mut push_amt = 100_000_000; - push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features); + push_amt -= commit_tx_fee_msat( + feerate_per_kw, + MIN_AFFORDABLE_HTLC_COUNT as u64, + &channel_type_features, + ); push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt); @@ -900,14 +1023,24 @@ pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { let cur_height = nodes[1].node.best_block.read().unwrap().height + 1; let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv); let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret); - let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], - 700_000, &recipient_onion_fields, cur_height, &None, None, None).unwrap(); - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); + let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads( + &route.paths[0], + 700_000, + &recipient_onion_fields, + cur_height, + &None, + None, + None, + ) + .unwrap(); + let onion_packet = + onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash) + .unwrap(); let msg = msgs::UpdateAddHTLC { channel_id: chan.2, htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64, amount_msat: htlc_msat, - payment_hash: payment_hash, + payment_hash, cltv_expiry: htlc_cltv, onion_routing_packet: onion_packet, skimmed_fee_msat: None, @@ -943,12 +1076,17 @@ pub fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment // transaction fee with 0 HTLCs (183 sats)). let mut push_amt = 100_000_000; - push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features); + push_amt -= commit_tx_fee_msat( + feerate_per_kw, + MIN_AFFORDABLE_HTLC_COUNT as u64, + &channel_type_features, + ); push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt); let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000 - + feerate_per_kw as u64 * htlc_success_tx_weight(&channel_type_features) / 1000 * 1000 - 1; + + feerate_per_kw as u64 * htlc_success_tx_weight(&channel_type_features) / 1000 * 1000 + - 1; // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the // commitment transaction fee. @@ -1020,12 +1158,17 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2); // Add a 2* and +1 for the fee spike reserve. - let commit_tx_fee_2_htlc = 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features); - let recv_value_1 = (chan_stat.value_to_self_msat - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlc)/2; + let commit_tx_fee_2_htlc = 2 * commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features); + let recv_value_1 = (chan_stat.value_to_self_msat + - chan_stat.channel_reserve_msat + - total_routing_fee_msat + - commit_tx_fee_2_htlc) + / 2; let amt_msat_1 = recv_value_1 + total_routing_fee_msat; // Add a pending HTLC. - let (route_1, our_payment_hash_1, _, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_1); + let (route_1, our_payment_hash_1, _, our_payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat_1); let payment_event_1 = { let onion = RecipientOnionFields::secret_only(our_payment_secret_1); let id = PaymentId(our_payment_hash_1.0); @@ -1041,7 +1184,12 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { // Attempt to trigger a channel reserve violation --> payment failure. let commit_tx_fee_2_htlcs = commit_tx_fee_msat(feerate, 2, &channel_type_features); - let recv_value_2 = chan_stat.value_to_self_msat - amt_msat_1 - chan_stat.channel_reserve_msat - total_routing_fee_msat - commit_tx_fee_2_htlcs + 1; + let recv_value_2 = chan_stat.value_to_self_msat + - amt_msat_1 + - chan_stat.channel_reserve_msat + - total_routing_fee_msat + - commit_tx_fee_2_htlcs + + 1; let amt_msat_2 = recv_value_2 + total_routing_fee_msat; let mut route_2 = route_1.clone(); route_2.paths[0].hops.last_mut().unwrap().fee_msat = amt_msat_2; @@ -1053,8 +1201,22 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route_2.paths[0], &session_priv); let recipient_onion_fields = RecipientOnionFields::spontaneous_empty(); let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads( - &route_2.paths[0], recv_value_2, &recipient_onion_fields, cur_height, &None, None, None).unwrap(); - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash_1).unwrap(); + &route_2.paths[0], + recv_value_2, + &recipient_onion_fields, + cur_height, + &None, + None, + None, + ) + .unwrap(); + let onion_packet = onion_utils::construct_onion_packet( + onion_payloads, + onion_keys, + [0; 32], + &our_payment_hash_1, + ) + .unwrap(); let msg = msgs::UpdateAddHTLC { channel_id: chan.2, htlc_id: 1, @@ -1068,7 +1230,11 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { nodes[1].node.handle_update_add_htlc(node_a_id, &msg); // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. - nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value", 3); + nodes[1].logger.assert_log_contains( + "lightning::ln::channelmanager", + "Remote HTLC add would put them under remote reserve value", + 3, + ); assert_eq!(nodes[1].node.list_channels().len(), 1); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); @@ -1091,8 +1257,10 @@ pub fn test_payment_route_reaching_same_channel_twice() { let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); let payment_params = PaymentParameters::from_node_id(node_b_id, 0) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000); + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000); // Extend the path by itself, essentially simulating route going through same channel twice let cloned_hops = route.paths[0].hops.clone(); @@ -1119,7 +1287,8 @@ pub fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() { let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100000); route.paths[0].hops[0].fee_msat = 100; let onion = RecipientOnionFields::secret_only(our_payment_secret); @@ -1139,7 +1308,8 @@ pub fn test_update_add_htlc_bolt2_sender_zero_value_msat() { let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100000); route.paths[0].hops[0].fee_msat = 0; let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); @@ -1149,7 +1319,11 @@ pub fn test_update_add_htlc_bolt2_sender_zero_value_msat() { assert_eq!(err, "Cannot send 0-msat HTLC")); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 2); + nodes[0].logger.assert_log_contains( + "lightning::ln::channelmanager", + "Cannot send 0-msat HTLC", + 2, + ); } #[xtest(feature = "_externalize_tests")] @@ -1165,7 +1339,8 @@ pub fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100000); let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); @@ -1174,11 +1349,15 @@ pub fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { updates.update_add_htlcs[0].amount_msat = 0; nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); - nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC", 3); + nodes[1].logger.assert_log_contains( + "lightning::ln::channelmanager", + "Remote side tried to send a 0-msat HTLC", + 3, + ); check_closed_broadcast!(nodes[1], true).unwrap(); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { - err: "Remote side tried to send a 0-msat HTLC".to_string() + err: "Remote side tried to send a 0-msat HTLC".to_string(), }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } @@ -1197,8 +1376,10 @@ pub fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() { let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0); let payment_params = PaymentParameters::from_node_id(node_b_id, 0) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000); + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000); route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001; let onion = RecipientOnionFields::secret_only(our_payment_secret); @@ -1230,9 +1411,11 @@ pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increme }; // Fetch a route in advance as we will be unable to once we're unable to send. - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100000); for i in 0..max_accepted_htlcs { - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100000); let payment_event = { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); @@ -1241,7 +1424,11 @@ pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increme let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - if let MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, .. } = events[0] { + if let MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { update_add_htlcs: ref htlcs, .. }, + .. + } = events[0] + { assert_eq!(htlcs[0].htlc_id, i); } else { assert!(false); @@ -1273,14 +1460,16 @@ pub fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { let channel_value = 100000; let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0); - let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2).counterparty_max_htlc_value_in_flight_msat; + let max_in_flight = get_channel_value_stat!(nodes[0], nodes[1], chan.2) + .counterparty_max_htlc_value_in_flight_msat; send_payment(&nodes[0], &[&nodes[1]], max_in_flight); - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_in_flight); + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], max_in_flight); // Manually create a route over our max in flight (which our router normally automatically // limits us to. - route.paths[0].hops[0].fee_msat = max_in_flight + 1; + route.paths[0].hops[0].fee_msat = max_in_flight + 1; let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); let res = nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id); @@ -1311,13 +1500,14 @@ pub fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() channel.context().get_holder_htlc_minimum_msat() }; - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat); let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors(&nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); - updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1; + updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat - 1; nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); @@ -1348,7 +1538,8 @@ pub fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() { let commit_tx_fee_outbound = 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); let max_can_send = 5000000 - channel_reserve - commit_tx_fee_outbound; - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); @@ -1388,11 +1579,25 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { route.paths[0].hops[0].fee_msat = send_amt; let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); let cur_height = nodes[0].node.best_block.read().unwrap().height + 1; - let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route.paths[0], &session_priv); + let onion_keys = onion_utils::construct_onion_keys( + &Secp256k1::signing_only(), + &route.paths[0], + &session_priv, + ); let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret); let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads( - &route.paths[0], send_amt, &recipient_onion_fields, cur_height, &None, None, None).unwrap(); - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap(); + &route.paths[0], + send_amt, + &recipient_onion_fields, + cur_height, + &None, + None, + None, + ) + .unwrap(); + let onion_packet = + onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash) + .unwrap(); let mut msg = msgs::UpdateAddHTLC { channel_id: chan.2, @@ -1414,7 +1619,9 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str())); + assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)") + .unwrap() + .is_match(err_msg.data.as_str())); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); @@ -1433,18 +1640,23 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors(&nodes[0], 1); let mut updates = get_htlc_update_msgs!(nodes[0], node_b_id); - updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2).counterparty_max_htlc_value_in_flight_msat + 1; + updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], nodes[0], chan.2) + .counterparty_max_htlc_value_in_flight_msat + + 1; nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str())); + assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value") + .unwrap() + .is_match(err_msg.data.as_str())); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 1000000); @@ -1462,7 +1674,8 @@ pub fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { let node_b_id = nodes[1].node.get_our_node_id(); create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); let reason = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, reason, id).unwrap(); @@ -1473,7 +1686,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() { assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert_eq!(err_msg.data,"Remote provided CLTV expiry in seconds instead of block height"); + assert_eq!(err_msg.data, "Remote provided CLTV expiry in seconds instead of block height"); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); @@ -1493,7 +1706,8 @@ pub fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { let node_b_id = nodes[1].node.get_our_node_id(); create_announced_chan_between_nodes(&nodes, 0, 1); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); @@ -1536,7 +1750,9 @@ pub fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str())); + assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)") + .unwrap() + .is_match(err_msg.data.as_str())); check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); @@ -1555,7 +1771,8 @@ pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { let node_b_id = nodes[1].node.get_our_node_id(); let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); @@ -1564,7 +1781,7 @@ pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { let updates = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); - let update_msg = msgs::UpdateFulfillHTLC{ + let update_msg = msgs::UpdateFulfillHTLC { channel_id: chan.2, htlc_id: 0, payment_preimage: our_payment_preimage, @@ -1574,7 +1791,11 @@ pub fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); + assert!(regex::Regex::new( + r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed" + ) + .unwrap() + .is_match(err_msg.data.as_str())); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); @@ -1594,7 +1815,8 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); @@ -1602,18 +1824,22 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { let updates = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); - let update_msg = msgs::UpdateFailHTLC{ + let update_msg = msgs::UpdateFailHTLC { channel_id: chan.2, htlc_id: 0, reason: Vec::new(), - attribution_data: Some(AttributionData::new()) + attribution_data: Some(AttributionData::new()), }; nodes[0].node.handle_update_fail_htlc(node_b_id, &update_msg); assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); + assert!(regex::Regex::new( + r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed" + ) + .unwrap() + .is_match(err_msg.data.as_str())); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); @@ -1633,14 +1859,15 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitme let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors(&nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); - let update_msg = msgs::UpdateFailMalformedHTLC{ + let update_msg = msgs::UpdateFailMalformedHTLC { channel_id: chan.2, htlc_id: 0, sha256_of_onion: [1; 32], @@ -1651,7 +1878,11 @@ pub fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitme assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); + assert!(regex::Regex::new( + r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed" + ) + .unwrap() + .is_match(err_msg.data.as_str())); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); @@ -1670,7 +1901,8 @@ pub fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { create_announced_chan_between_nodes(&nodes, 0, 1); - let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000); + let (our_payment_preimage, our_payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1]], 100_000); nodes[1].node.claim_funds(our_payment_preimage); check_added_monitors(&nodes[1], 1); @@ -1680,7 +1912,18 @@ pub fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() { assert_eq!(events.len(), 1); let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = { match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + .. + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_htlcs.is_empty()); @@ -1717,7 +1960,8 @@ pub fn test_update_fulfill_htlc_bolt2_wrong_preimage() { create_announced_chan_between_nodes(&nodes, 0, 1); - let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000); + let (our_payment_preimage, our_payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1]], 100_000); nodes[1].node.claim_funds(our_payment_preimage); check_added_monitors(&nodes[1], 1); @@ -1727,7 +1971,18 @@ pub fn test_update_fulfill_htlc_bolt2_wrong_preimage() { assert_eq!(events.len(), 1); let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = { match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + .. + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_htlcs.is_empty()); @@ -1745,7 +2000,9 @@ pub fn test_update_fulfill_htlc_bolt2_wrong_preimage() { assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str())); + assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage") + .unwrap() + .is_match(err_msg.data.as_str())); check_added_monitors(&nodes[0], 1); let reason = ClosureReason::ProcessingError { err: err_msg.data }; check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); @@ -1765,7 +2022,8 @@ pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_me create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); @@ -1778,14 +2036,28 @@ pub fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_me check_added_monitors(&nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::InvalidOnion] + ); check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); let mut update_msg: msgs::UpdateFailMalformedHTLC = { match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + .. + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); From db36f0819d758604d90922d5bbed6a8a7f953ff9 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 28 Apr 2025 01:04:32 +0000 Subject: [PATCH 16/25] Move channel fee-rated tests out of `functional_tests.rs` `funtional_tests.rs` has gotten incredibly huge over the years, so here we move all the channel fee-related tests out to their own file. --- lightning/src/ln/functional_tests.rs | 904 +------------------------- lightning/src/ln/mod.rs | 3 + lightning/src/ln/update_fee_tests.rs | 915 +++++++++++++++++++++++++++ 3 files changed, 921 insertions(+), 901 deletions(-) create mode 100644 lightning/src/ln/update_fee_tests.rs diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 4322c037044..70fc54ed6eb 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -18,16 +18,16 @@ use crate::chain::channelmonitor; use crate::chain::channelmonitor::{Balance, ChannelMonitorUpdateStep, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE}; use crate::chain::transaction::OutPoint; use crate::ln::onion_utils::LocalHTLCFailureReason; -use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider}; +use crate::sign::{EntropySource, OutputSpender, SignerProvider}; use crate::events::bump_transaction::WalletSource; use crate::events::{Event, FundingInfo, PathFailure, PaymentPurpose, ClosureReason, HTLCHandlingFailureType, PaymentFailureReason}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentSecret, PaymentHash}; -use crate::ln::channel::{get_holder_selected_channel_reserve_satoshis, Channel, InboundV1Channel, OutboundV1Channel, COINBASE_MATURITY, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, MIN_AFFORDABLE_HTLC_COUNT}; +use crate::ln::channel::{get_holder_selected_channel_reserve_satoshis, Channel, InboundV1Channel, OutboundV1Channel, COINBASE_MATURITY}; use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError, MIN_CHAN_DUST_LIMIT_SATOSHIS}; use crate::ln::{chan_utils, onion_utils}; -use crate::ln::chan_utils::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment}; +use crate::ln::chan_utils::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight}; use crate::routing::gossip::{NetworkGraph, NetworkUpdate}; use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route, RouteParameters}; use crate::types::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures}; @@ -62,7 +62,6 @@ use crate::sync::{Arc, Mutex, RwLock}; use lightning_macros::xtest; use crate::ln::functional_test_utils::*; -use crate::ln::chan_utils::CommitmentTransaction; use super::channel::UNFUNDED_CHANNEL_AGE_LIMIT_TICKS; @@ -188,307 +187,6 @@ pub fn test_funding_exceeds_no_wumbo_limit() { } } -#[xtest(feature = "_externalize_tests")] -pub fn test_async_inbound_update_fee() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - create_announced_chan_between_nodes(&nodes, 0, 1); - - // balancing - send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); - - // A B - // update_fee -> - // send (1) commitment_signed -. - // <- update_add_htlc/commitment_signed - // send (2) RAA (awaiting remote revoke) -. - // (1) commitment_signed is delivered -> - // .- send (3) RAA (awaiting remote revoke) - // (2) RAA is delivered -> - // .- send (4) commitment_signed - // <- (3) RAA is delivered - // send (5) commitment_signed -. - // <- (4) commitment_signed is delivered - // send (6) RAA -. - // (5) commitment_signed is delivered -> - // <- RAA - // (6) RAA is delivered -> - - // First nodes[0] generates an update_fee - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock += 20; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - - let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events_0.len(), 1); - let (update_msg, commitment_signed) = match events_0[0] { // (1) - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - (update_fee.as_ref(), commitment_signed) - }, - _ => panic!("Unexpected event"), - }; - - nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); - - // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000); - nodes[1].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[1], 1); - - let payment_event = { - let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events_1.len(), 1); - SendEvent::from_event(events_1.remove(0)) - }; - assert_eq!(payment_event.node_id, node_a_id); - assert_eq!(payment_event.msgs.len(), 1); - - // ...now when the messages get delivered everyone should be happy - nodes[0].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &payment_event.commitment_msg); // (2) - let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); - - // deliver(1), generate (3): - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); - let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); - - nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); // deliver (2) - let bs_update = get_htlc_update_msgs!(nodes[1], node_a_id); - assert!(bs_update.update_add_htlcs.is_empty()); // (4) - assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4) - assert!(bs_update.update_fail_htlcs.is_empty()); // (4) - assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4) - assert!(bs_update.update_fee.is_none()); // (4) - check_added_monitors!(nodes[1], 1); - - nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); // deliver (3) - let as_update = get_htlc_update_msgs!(nodes[0], node_b_id); - assert!(as_update.update_add_htlcs.is_empty()); // (5) - assert!(as_update.update_fulfill_htlcs.is_empty()); // (5) - assert!(as_update.update_fail_htlcs.is_empty()); // (5) - assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5) - assert!(as_update.update_fee.is_none()); // (5) - check_added_monitors!(nodes[0], 1); - - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_update.commitment_signed); // deliver (4) - let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - // only (6) so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); - - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_update.commitment_signed); // deliver (5) - let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - check_added_monitors!(nodes[1], 1); - - nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke); - check_added_monitors!(nodes[0], 1); - - let events_2 = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events_2.len(), 1); - match events_2[0] { - Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment - _ => panic!("Unexpected event"), - } - - nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_revoke); // deliver (6) - check_added_monitors!(nodes[1], 1); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fee_unordered_raa() { - // Just the intro to the previous test followed by an out-of-order RAA (which caused a - // crash in an earlier version of the update_fee patch) - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - create_announced_chan_between_nodes(&nodes, 0, 1); - - // balancing - send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); - - // First nodes[0] generates an update_fee - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock += 20; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - - let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events_0.len(), 1); - let update_msg = match events_0[0] { // (1) - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => { - update_fee.as_ref() - }, - _ => panic!("Unexpected event"), - }; - - nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); - - // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000); - nodes[1].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[1], 1); - - let payment_event = { - let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events_1.len(), 1); - SendEvent::from_event(events_1.remove(0)) - }; - assert_eq!(payment_event.node_id, node_a_id); - assert_eq!(payment_event.msgs.len(), 1); - - // ...now when the messages get delivered everyone should be happy - nodes[0].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &payment_event.commitment_msg); // (2) - let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); - - nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_msg); // deliver (2) - check_added_monitors!(nodes[1], 1); - - // We can't continue, sadly, because our (1) now has a bogus signature -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_multi_flight_update_fee() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - create_announced_chan_between_nodes(&nodes, 0, 1); - - // A B - // update_fee/commitment_signed -> - // .- send (1) RAA and (2) commitment_signed - // update_fee (never committed) -> - // (3) update_fee -> - // We have to manually generate the above update_fee, it is allowed by the protocol but we - // don't track which updates correspond to which revoke_and_ack responses so we're in - // AwaitingRAA mode and will not generate the update_fee yet. - // <- (1) RAA delivered - // (3) is generated and send (4) CS -. - // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't - // know the per_commitment_point to use for it. - // <- (2) commitment_signed delivered - // revoke_and_ack -> - // B should send no response here - // (4) commitment_signed delivered -> - // <- RAA/commitment_signed delivered - // revoke_and_ack -> - - // First nodes[0] generates an update_fee - let initial_feerate; - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - initial_feerate = *feerate_lock; - *feerate_lock = initial_feerate + 20; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - - let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events_0.len(), 1); - let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1) - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - (update_fee.as_ref().unwrap(), commitment_signed) - }, - _ => panic!("Unexpected event"), - }; - - // Deliver first update_fee/commitment_signed pair, generating (1) and (2): - nodes[1].node.handle_update_fee(node_a_id, update_msg_1); - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed_1); - let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); - check_added_monitors!(nodes[1], 1); - - // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment - // transaction: - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock = initial_feerate + 40; - } - nodes[0].node.timer_tick_occurred(); - assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - - // Create the (3) update_fee message that nodes[0] will generate before it does... - let mut update_msg_2 = msgs::UpdateFee { - channel_id: update_msg_1.channel_id.clone(), - feerate_per_kw: (initial_feerate + 30) as u32, - }; - - nodes[1].node.handle_update_fee(node_a_id, &update_msg_2); - - update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32; - // Deliver (3) - nodes[1].node.handle_update_fee(node_a_id, &update_msg_2); - - // Deliver (1), generating (3) and (4) - nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_msg); - let as_second_update = get_htlc_update_msgs!(nodes[0], node_b_id); - check_added_monitors!(nodes[0], 1); - assert!(as_second_update.update_add_htlcs.is_empty()); - assert!(as_second_update.update_fulfill_htlcs.is_empty()); - assert!(as_second_update.update_fail_htlcs.is_empty()); - assert!(as_second_update.update_fail_malformed_htlcs.is_empty()); - // Check that the update_fee newly generated matches what we delivered: - assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id); - assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw); - - // Deliver (2) commitment_signed - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment_signed); - let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - check_added_monitors!(nodes[0], 1); - // No commitment_signed so get_event_msg's assert(len == 1) passes - - nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_msg); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); - - // Delever (4) - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_second_update.commitment_signed); - let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], node_a_id); - check_added_monitors!(nodes[1], 1); - - nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); - - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_commitment); - let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); - - nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_revoke); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); -} - fn do_test_sanity_on_in_flight_opens(steps: u8) { // Previously, we had issues deserializing channels when we hadn't connected the first block // after creation. To catch that and similar issues, we lean on the Node::drop impl to test @@ -588,480 +286,6 @@ pub fn test_sanity_on_in_flight_opens() { do_test_sanity_on_in_flight_opens(8 | 0b1000_0000); } -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fee_vanilla() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - create_announced_chan_between_nodes(&nodes, 0, 1); - - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock += 25; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - - let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events_0.len(), 1); - let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { - (update_fee.as_ref(), commitment_signed) - }, - _ => panic!("Unexpected event"), - }; - nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); - - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); - let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); - check_added_monitors!(nodes[1], 1); - - nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); - - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); - let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); - - nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fee_that_funder_cannot_afford() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - let channel_value = 5000; - let push_sats = 700; - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000); - let channel_id = chan.2; - let secp_ctx = Secp256k1::new(); - let default_config = UserConfig::default(); - let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config); - - let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); - - // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee - // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we - // calculate two different feerates here - the expected local limit as well as the expected - // remote limit. - let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(&channel_type_features) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32; - let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(&channel_type_features)) as u32; - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock = feerate; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - let update_msg = get_htlc_update_msgs!(nodes[0], node_b_id); - - nodes[1].node.handle_update_fee(node_a_id, &update_msg.update_fee.unwrap()); - - commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false); - - // Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate set above. - { - let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone(); - - //We made sure neither party's funds are below the dust limit and there are no HTLCs here - assert_eq!(commitment_tx.output.len(), 2); - let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000; - let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value.to_sat()); - actual_fee = channel_value - actual_fee; - assert_eq!(total_fee, actual_fee); - } - - { - // Increment the feerate by a small constant, accounting for rounding errors - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock += 4; - } - nodes[0].node.timer_tick_occurred(); - nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot afford to send new feerate at {}", feerate + 4), 1); - check_added_monitors!(nodes[0], 0); - - const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654; - - let remote_point = { - let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&node_a_id).unwrap().lock().unwrap(); - let remote_chan = chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); - let chan_signer = remote_chan.get_signer(); - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx).unwrap() - }; - - let res = { - let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let local_chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); - let local_chan = local_chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); - let local_chan_signer = local_chan.get_signer(); - let nondust_htlcs: Vec = vec![]; - let commitment_tx = CommitmentTransaction::new( - INITIAL_COMMITMENT_NUMBER - 1, - &remote_point, - push_sats, - channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000, - non_buffer_feerate + 4, - nondust_htlcs, - &local_chan.funding.channel_transaction_parameters.as_counterparty_broadcastable(), - &secp_ctx, - ); - local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment( - &local_chan.funding.channel_transaction_parameters, &commitment_tx, Vec::new(), - Vec::new(), &secp_ctx, - ).unwrap() - }; - - let commit_signed_msg = msgs::CommitmentSigned { - channel_id: chan.2, - signature: res.0, - htlc_signatures: res.1, - batch: None, - #[cfg(taproot)] - partial_signature_with_nonce: None, - }; - - let update_fee = msgs::UpdateFee { - channel_id: chan.2, - feerate_per_kw: non_buffer_feerate + 4, - }; - - nodes[1].node.handle_update_fee(node_a_id, &update_fee); - - //While producing the commitment_signed response after handling a received update_fee request the - //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve) - //Should produce and error. - nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); - nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3); - check_added_monitors!(nodes[1], 1); - check_closed_broadcast!(nodes[1], true); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") }, - [node_a_id], channel_value); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fee_that_saturates_subs() { - // Check that when a remote party sends us an `update_fee` message that results in a total fee - // on the commitment transaction that is greater than her balance, we saturate the subtractions, - // and force close the channel. - - let mut default_config = test_default_channel_config(); - let secp_ctx = Secp256k1::new(); - - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(default_config.clone()), Some(default_config)]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - let chan_id = create_chan_between_nodes_with_value(&nodes[0], &nodes[1], 10_000, 8_500_000).3; - - const FEERATE: u32 = 250 * 10; // 10sat/vb - - // Assert that the new feerate will completely exhaust the balance of node 0, and saturate the - // subtraction of the total fee from node 0's balance. - let total_fee_sat = chan_utils::commit_tx_fee_sat(FEERATE, 0, &ChannelTypeFeatures::empty()); - assert!(total_fee_sat > 1500); - - const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654; - - // We build a commitment transcation here only to pass node 1's check of node 0's signature - // in `commitment_signed`. - - let remote_point = { - let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&node_a_id).unwrap().lock().unwrap(); - let remote_chan = chan_lock.channel_by_id.get(&chan_id).and_then(Channel::as_funded).unwrap(); - let chan_signer = remote_chan.get_signer(); - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, &secp_ctx).unwrap() - }; - - let res = { - let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let local_chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); - let local_chan = local_chan_lock.channel_by_id.get(&chan_id).and_then(Channel::as_funded).unwrap(); - let local_chan_signer = local_chan.get_signer(); - let nondust_htlcs: Vec = vec![]; - let commitment_tx = CommitmentTransaction::new( - INITIAL_COMMITMENT_NUMBER, - &remote_point, - 8500, - // Set a zero balance here: this is the transaction that node 1 will expect a signature for, as - // he will do a saturating subtraction of the total fees from node 0's balance. - 0, - FEERATE, - nondust_htlcs, - &local_chan.funding.channel_transaction_parameters.as_counterparty_broadcastable(), - &secp_ctx, - ); - local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment( - &local_chan.funding.channel_transaction_parameters, &commitment_tx, Vec::new(), - Vec::new(), &secp_ctx, - ).unwrap() - }; - - let commit_signed_msg = msgs::CommitmentSigned { - channel_id: chan_id, - signature: res.0, - htlc_signatures: res.1, - batch: None, - #[cfg(taproot)] - partial_signature_with_nonce: None, - }; - - let update_fee = msgs::UpdateFee { - channel_id: chan_id, - feerate_per_kw: FEERATE, - }; - - nodes[1].node.handle_update_fee(node_a_id, &update_fee); - - nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); - nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3); - check_added_monitors!(nodes[1], 1); - check_closed_broadcast!(nodes[1], true); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") }, - [node_a_id], 10_000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fee_with_fundee_update_add_htlc() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - - // balancing - send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); - - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock += 20; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - - let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events_0.len(), 1); - let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { - (update_fee.as_ref(), commitment_signed) - }, - _ => panic!("Unexpected event"), - }; - nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); - let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); - check_added_monitors!(nodes[1], 1); - - let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 800000); - - // nothing happens since node[1] is in AwaitingRemoteRevoke - nodes[1].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - { - let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 0); - added_monitors.clear(); - } - assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - // node[1] has nothing to do - - nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); - - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); - let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg); - check_added_monitors!(nodes[1], 1); - // AwaitingRemoteRevoke ends here - - let commitment_update = get_htlc_update_msgs!(nodes[1], node_a_id); - assert_eq!(commitment_update.update_add_htlcs.len(), 1); - assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0); - assert_eq!(commitment_update.update_fail_htlcs.len(), 0); - assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0); - assert_eq!(commitment_update.update_fee.is_none(), true); - - nodes[0].node.handle_update_add_htlc(node_b_id, &commitment_update.update_add_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_update.commitment_signed); - check_added_monitors!(nodes[0], 1); - let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], node_b_id); - - nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke); - check_added_monitors!(nodes[1], 1); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_signed); - check_added_monitors!(nodes[1], 1); - let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - // No commitment_signed so get_event_msg's assert(len == 1) passes - - nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke); - check_added_monitors!(nodes[0], 1); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - - expect_pending_htlcs_forwardable!(nodes[0]); - - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); - match events[0] { - Event::PaymentClaimable { .. } => { }, - _ => panic!("Unexpected event"), - }; - - claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage); - - send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000); - send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000); - close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [node_b_id], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [node_a_id], 100000); -} - -#[xtest(feature = "_externalize_tests")] -pub fn test_update_fee() { - let chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let channel_id = chan.2; - - // A B - // (1) update_fee/commitment_signed -> - // <- (2) revoke_and_ack - // .- send (3) commitment_signed - // (4) update_fee/commitment_signed -> - // .- send (5) revoke_and_ack (no CS as we're awaiting a revoke) - // <- (3) commitment_signed delivered - // send (6) revoke_and_ack -. - // <- (5) deliver revoke_and_ack - // (6) deliver revoke_and_ack -> - // .- send (7) commitment_signed in response to (4) - // <- (7) deliver commitment_signed - // revoke_and_ack -> - - // Create and deliver (1)... - let feerate; - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - feerate = *feerate_lock; - *feerate_lock = feerate + 20; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - - let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events_0.len(), 1); - let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { - (update_fee.as_ref(), commitment_signed) - }, - _ => panic!("Unexpected event"), - }; - nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); - - // Generate (2) and (3): - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); - let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], node_a_id); - check_added_monitors!(nodes[1], 1); - - // Deliver (2): - nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); - - // Create and deliver (4)... - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock = feerate + 30; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events_0.len(), 1); - let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { - (update_fee.as_ref(), commitment_signed) - }, - _ => panic!("Unexpected event"), - }; - - nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); - check_added_monitors!(nodes[1], 1); - // ... creating (5) - let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - // No commitment_signed so get_event_msg's assert(len == 1) passes - - // Handle (3), creating (6): - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed_0); - check_added_monitors!(nodes[0], 1); - let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - // No commitment_signed so get_event_msg's assert(len == 1) passes - - // Deliver (5): - nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); - - // Deliver (6), creating (7): - nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg_0); - let commitment_update = get_htlc_update_msgs!(nodes[1], node_a_id); - assert!(commitment_update.update_add_htlcs.is_empty()); - assert!(commitment_update.update_fulfill_htlcs.is_empty()); - assert!(commitment_update.update_fail_htlcs.is_empty()); - assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); - assert!(commitment_update.update_fee.is_none()); - check_added_monitors!(nodes[1], 1); - - // Deliver (7) - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_update.commitment_signed); - check_added_monitors!(nodes[0], 1); - let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - // No commitment_signed so get_event_msg's assert(len == 1) passes - - nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg); - check_added_monitors!(nodes[1], 1); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - - assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30); - assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30); - close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [node_b_id], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [node_a_id], 100000); -} - #[xtest(feature = "_externalize_tests")] pub fn fake_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -1322,47 +546,6 @@ pub fn test_duplicate_htlc_different_direction_onchain() { } } -#[xtest(feature = "_externalize_tests")] -pub fn test_chan_init_feerate_unaffordability() { - // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to - // channel reserve and feerate requirements. - let mut chanmon_cfgs = create_chanmon_cfgs(2); - let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); - - let default_config = UserConfig::default(); - let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); - - // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single - // HTLC. - let mut push_amt = 100_000_000; - push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features); - assert_eq!(nodes[0].node.create_channel(node_b_id, 100_000, push_amt + 1, 42, None, None).unwrap_err(), - APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() }); - - // During open, we don't have a "counterparty channel reserve" to check against, so that - // requirement only comes into play on the open_channel handling side. - push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; - nodes[0].node.create_channel(node_b_id, 100_000, push_amt, 42, None, None).unwrap(); - let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - open_channel_msg.push_msat += 1; - nodes[1].node.handle_open_channel(node_a_id, &open_channel_msg); - - let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 1); - match msg_events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { - assert_eq!(msg.data, "Insufficient funding amount for initial reserve"); - }, - _ => panic!("Unexpected event"), - } -} - #[xtest(feature = "_externalize_tests")] pub fn test_inbound_outbound_capacity_is_not_zero() { let chanmon_cfgs = create_chanmon_cfgs(2); @@ -9816,87 +8999,6 @@ pub fn test_non_final_funding_tx_within_headroom() { get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); } -#[xtest(feature = "_externalize_tests")] -pub fn accept_busted_but_better_fee() { - // If a peer sends us a fee update that is too low, but higher than our previous channel - // feerate, we should accept it. In the future we may want to consider closing the channel - // later, but for now we only accept the update. - let mut chanmon_cfgs = create_chanmon_cfgs(2); - let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - - let node_a_id = nodes[0].node.get_our_node_id(); - - create_chan_between_nodes(&nodes[0], &nodes[1]); - - // Set nodes[1] to expect 5,000 sat/kW. - { - let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock = 5000; - } - - // If nodes[0] increases their feerate, even if its not enough, nodes[1] should accept it. - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock = 1000; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - nodes[1].node.handle_update_fee(node_a_id, update_fee.as_ref().unwrap()); - commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false); - }, - _ => panic!("Unexpected event"), - }; - - // If nodes[0] increases their feerate further, even if its not enough, nodes[1] should accept - // it. - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock = 2000; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - nodes[1].node.handle_update_fee(node_a_id, update_fee.as_ref().unwrap()); - commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false); - }, - _ => panic!("Unexpected event"), - }; - - // However, if nodes[0] decreases their feerate, nodes[1] should reject it and close the - // channel. - { - let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); - *feerate_lock = 1000; - } - nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); - - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => { - nodes[1].node.handle_update_fee(node_a_id, update_fee.as_ref().unwrap()); - check_closed_event!(nodes[1], 1, ClosureReason::PeerFeerateTooLow { - peer_feerate_sat_per_kw: 1000, required_feerate_sat_per_kw: 5000, - }, [node_a_id], 100000); - check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); - }, - _ => panic!("Unexpected event"), - }; -} - fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash: bool) { let mut chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index 7db23efcf0a..7b4795962b5 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -72,6 +72,9 @@ pub mod functional_tests; #[cfg(any(test, feature = "_externalize_tests"))] #[allow(unused_mut)] pub mod htlc_reserve_unit_tests; +#[cfg(any(test, feature = "_externalize_tests"))] +#[allow(unused_mut)] +pub mod update_fee_tests; #[cfg(all(test, splicing))] #[allow(unused_mut)] mod splicing_tests; diff --git a/lightning/src/ln/update_fee_tests.rs b/lightning/src/ln/update_fee_tests.rs new file mode 100644 index 00000000000..93b052a02c7 --- /dev/null +++ b/lightning/src/ln/update_fee_tests.rs @@ -0,0 +1,915 @@ +//! Functional tests testing channel feerate handling. + +use crate::events::{ClosureReason, Event}; +use crate::ln::functional_test_utils::*; +use crate::ln::chan_utils::{self, CommitmentTransaction, commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, HTLCOutputInCommitment}; +use crate::ln::channel::{Channel, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, CONCURRENT_INBOUND_HTLC_FEE_BUFFER}; +use crate::ln::channelmanager::PaymentId; +use crate::ln::outbound_payment::RecipientOnionFields; +use crate::ln::msgs::{self, BaseMessageHandler, ErrorAction, ChannelMessageHandler, MessageSendEvent}; +use crate::sign::ecdsa::EcdsaChannelSigner; +use crate::util::config::UserConfig; +use crate::util::errors::APIError; +use crate::types::features::ChannelTypeFeatures; + +use lightning_macros::xtest; + +use bitcoin::secp256k1::Secp256k1; + +#[xtest(feature = "_externalize_tests")] +pub fn test_async_inbound_update_fee() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + + // balancing + send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + + // A B + // update_fee -> + // send (1) commitment_signed -. + // <- update_add_htlc/commitment_signed + // send (2) RAA (awaiting remote revoke) -. + // (1) commitment_signed is delivered -> + // .- send (3) RAA (awaiting remote revoke) + // (2) RAA is delivered -> + // .- send (4) commitment_signed + // <- (3) RAA is delivered + // send (5) commitment_signed -. + // <- (4) commitment_signed is delivered + // send (6) RAA -. + // (5) commitment_signed is delivered -> + // <- RAA + // (6) RAA is delivered -> + + // First nodes[0] generates an update_fee + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 20; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { // (1) + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); + + // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000); + nodes[1].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + check_added_monitors!(nodes[1], 1); + + let payment_event = { + let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_1.len(), 1); + SendEvent::from_event(events_1.remove(0)) + }; + assert_eq!(payment_event.node_id, node_a_id); + assert_eq!(payment_event.msgs.len(), 1); + + // ...now when the messages get delivered everyone should be happy + nodes[0].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &payment_event.commitment_msg); // (2) + let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[0], 1); + + // deliver(1), generate (3): + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); + let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[1], 1); + + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); // deliver (2) + let bs_update = get_htlc_update_msgs!(nodes[1], node_a_id); + assert!(bs_update.update_add_htlcs.is_empty()); // (4) + assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4) + assert!(bs_update.update_fail_htlcs.is_empty()); // (4) + assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4) + assert!(bs_update.update_fee.is_none()); // (4) + check_added_monitors!(nodes[1], 1); + + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); // deliver (3) + let as_update = get_htlc_update_msgs!(nodes[0], node_b_id); + assert!(as_update.update_add_htlcs.is_empty()); // (5) + assert!(as_update.update_fulfill_htlcs.is_empty()); // (5) + assert!(as_update.update_fail_htlcs.is_empty()); // (5) + assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5) + assert!(as_update.update_fee.is_none()); // (5) + check_added_monitors!(nodes[0], 1); + + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_update.commitment_signed); // deliver (4) + let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + // only (6) so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[0], 1); + + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_update.commitment_signed); // deliver (5) + let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + check_added_monitors!(nodes[1], 1); + + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke); + check_added_monitors!(nodes[0], 1); + + let events_2 = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events_2.len(), 1); + match events_2[0] { + Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment + _ => panic!("Unexpected event"), + } + + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_revoke); // deliver (6) + check_added_monitors!(nodes[1], 1); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fee_unordered_raa() { + // Just the intro to the previous test followed by an out-of-order RAA (which caused a + // crash in an earlier version of the update_fee patch) + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + + // balancing + send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + + // First nodes[0] generates an update_fee + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 20; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_0.len(), 1); + let update_msg = match events_0[0] { // (1) + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => { + update_fee.as_ref() + }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); + + // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000); + nodes[1].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + check_added_monitors!(nodes[1], 1); + + let payment_event = { + let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events_1.len(), 1); + SendEvent::from_event(events_1.remove(0)) + }; + assert_eq!(payment_event.node_id, node_a_id); + assert_eq!(payment_event.msgs.len(), 1); + + // ...now when the messages get delivered everyone should be happy + nodes[0].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &payment_event.commitment_msg); // (2) + let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[0], 1); + + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_msg); // deliver (2) + check_added_monitors!(nodes[1], 1); + + // We can't continue, sadly, because our (1) now has a bogus signature +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_multi_flight_update_fee() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + + // A B + // update_fee/commitment_signed -> + // .- send (1) RAA and (2) commitment_signed + // update_fee (never committed) -> + // (3) update_fee -> + // We have to manually generate the above update_fee, it is allowed by the protocol but we + // don't track which updates correspond to which revoke_and_ack responses so we're in + // AwaitingRAA mode and will not generate the update_fee yet. + // <- (1) RAA delivered + // (3) is generated and send (4) CS -. + // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't + // know the per_commitment_point to use for it. + // <- (2) commitment_signed delivered + // revoke_and_ack -> + // B should send no response here + // (4) commitment_signed delivered -> + // <- RAA/commitment_signed delivered + // revoke_and_ack -> + + // First nodes[0] generates an update_fee + let initial_feerate; + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + initial_feerate = *feerate_lock; + *feerate_lock = initial_feerate + 20; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_0.len(), 1); + let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1) + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + (update_fee.as_ref().unwrap(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + + // Deliver first update_fee/commitment_signed pair, generating (1) and (2): + nodes[1].node.handle_update_fee(node_a_id, update_msg_1); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed_1); + let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); + check_added_monitors!(nodes[1], 1); + + // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment + // transaction: + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = initial_feerate + 40; + } + nodes[0].node.timer_tick_occurred(); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + // Create the (3) update_fee message that nodes[0] will generate before it does... + let mut update_msg_2 = msgs::UpdateFee { + channel_id: update_msg_1.channel_id.clone(), + feerate_per_kw: (initial_feerate + 30) as u32, + }; + + nodes[1].node.handle_update_fee(node_a_id, &update_msg_2); + + update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32; + // Deliver (3) + nodes[1].node.handle_update_fee(node_a_id, &update_msg_2); + + // Deliver (1), generating (3) and (4) + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_msg); + let as_second_update = get_htlc_update_msgs!(nodes[0], node_b_id); + check_added_monitors!(nodes[0], 1); + assert!(as_second_update.update_add_htlcs.is_empty()); + assert!(as_second_update.update_fulfill_htlcs.is_empty()); + assert!(as_second_update.update_fail_htlcs.is_empty()); + assert!(as_second_update.update_fail_malformed_htlcs.is_empty()); + // Check that the update_fee newly generated matches what we delivered: + assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id); + assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw); + + // Deliver (2) commitment_signed + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment_signed); + let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + check_added_monitors!(nodes[0], 1); + // No commitment_signed so get_event_msg's assert(len == 1) passes + + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_msg); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[1], 1); + + // Delever (4) + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_second_update.commitment_signed); + let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], node_a_id); + check_added_monitors!(nodes[1], 1); + + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[0], 1); + + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_commitment); + let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + // No commitment_signed so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[0], 1); + + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_revoke); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[1], 1); +} + + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fee_vanilla() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 25; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); + + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); + let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); + check_added_monitors!(nodes[1], 1); + + nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[0], 1); + + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); + let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + // No commitment_signed so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[0], 1); + + nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[1], 1); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fee_that_funder_cannot_afford() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let channel_value = 5000; + let push_sats = 700; + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000); + let channel_id = chan.2; + let secp_ctx = Secp256k1::new(); + let default_config = UserConfig::default(); + let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config); + + let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); + + // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee + // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we + // calculate two different feerates here - the expected local limit as well as the expected + // remote limit. + let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(&channel_type_features) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32; + let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(&channel_type_features)) as u32; + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = feerate; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(nodes[0], 1); + let update_msg = get_htlc_update_msgs!(nodes[0], node_b_id); + + nodes[1].node.handle_update_fee(node_a_id, &update_msg.update_fee.unwrap()); + + commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false); + + // Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate set above. + { + let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone(); + + //We made sure neither party's funds are below the dust limit and there are no HTLCs here + assert_eq!(commitment_tx.output.len(), 2); + let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000; + let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value.to_sat()); + actual_fee = channel_value - actual_fee; + assert_eq!(total_fee, actual_fee); + } + + { + // Increment the feerate by a small constant, accounting for rounding errors + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 4; + } + nodes[0].node.timer_tick_occurred(); + nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot afford to send new feerate at {}", feerate + 4), 1); + check_added_monitors!(nodes[0], 0); + + const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654; + + let remote_point = { + let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&node_a_id).unwrap().lock().unwrap(); + let remote_chan = chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); + let chan_signer = remote_chan.get_signer(); + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx).unwrap() + }; + + let res = { + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let local_chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); + let local_chan = local_chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); + let local_chan_signer = local_chan.get_signer(); + let nondust_htlcs: Vec = vec![]; + let commitment_tx = CommitmentTransaction::new( + INITIAL_COMMITMENT_NUMBER - 1, + &remote_point, + push_sats, + channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000, + non_buffer_feerate + 4, + nondust_htlcs, + &local_chan.funding.channel_transaction_parameters.as_counterparty_broadcastable(), + &secp_ctx, + ); + local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment( + &local_chan.funding.channel_transaction_parameters, &commitment_tx, Vec::new(), + Vec::new(), &secp_ctx, + ).unwrap() + }; + + let commit_signed_msg = msgs::CommitmentSigned { + channel_id: chan.2, + signature: res.0, + htlc_signatures: res.1, + batch: None, + #[cfg(taproot)] + partial_signature_with_nonce: None, + }; + + let update_fee = msgs::UpdateFee { + channel_id: chan.2, + feerate_per_kw: non_buffer_feerate + 4, + }; + + nodes[1].node.handle_update_fee(node_a_id, &update_fee); + + //While producing the commitment_signed response after handling a received update_fee request the + //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve) + //Should produce and error. + nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3); + check_added_monitors!(nodes[1], 1); + check_closed_broadcast!(nodes[1], true); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") }, + [node_a_id], channel_value); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fee_that_saturates_subs() { + // Check that when a remote party sends us an `update_fee` message that results in a total fee + // on the commitment transaction that is greater than her balance, we saturate the subtractions, + // and force close the channel. + + let mut default_config = test_default_channel_config(); + let secp_ctx = Secp256k1::new(); + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(default_config.clone()), Some(default_config)]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan_id = create_chan_between_nodes_with_value(&nodes[0], &nodes[1], 10_000, 8_500_000).3; + + const FEERATE: u32 = 250 * 10; // 10sat/vb + + // Assert that the new feerate will completely exhaust the balance of node 0, and saturate the + // subtraction of the total fee from node 0's balance. + let total_fee_sat = chan_utils::commit_tx_fee_sat(FEERATE, 0, &ChannelTypeFeatures::empty()); + assert!(total_fee_sat > 1500); + + const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654; + + // We build a commitment transcation here only to pass node 1's check of node 0's signature + // in `commitment_signed`. + + let remote_point = { + let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); + let chan_lock = per_peer_state.get(&node_a_id).unwrap().lock().unwrap(); + let remote_chan = chan_lock.channel_by_id.get(&chan_id).and_then(Channel::as_funded).unwrap(); + let chan_signer = remote_chan.get_signer(); + chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, &secp_ctx).unwrap() + }; + + let res = { + let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); + let local_chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); + let local_chan = local_chan_lock.channel_by_id.get(&chan_id).and_then(Channel::as_funded).unwrap(); + let local_chan_signer = local_chan.get_signer(); + let nondust_htlcs: Vec = vec![]; + let commitment_tx = CommitmentTransaction::new( + INITIAL_COMMITMENT_NUMBER, + &remote_point, + 8500, + // Set a zero balance here: this is the transaction that node 1 will expect a signature for, as + // he will do a saturating subtraction of the total fees from node 0's balance. + 0, + FEERATE, + nondust_htlcs, + &local_chan.funding.channel_transaction_parameters.as_counterparty_broadcastable(), + &secp_ctx, + ); + local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment( + &local_chan.funding.channel_transaction_parameters, &commitment_tx, Vec::new(), + Vec::new(), &secp_ctx, + ).unwrap() + }; + + let commit_signed_msg = msgs::CommitmentSigned { + channel_id: chan_id, + signature: res.0, + htlc_signatures: res.1, + batch: None, + #[cfg(taproot)] + partial_signature_with_nonce: None, + }; + + let update_fee = msgs::UpdateFee { + channel_id: chan_id, + feerate_per_kw: FEERATE, + }; + + nodes[1].node.handle_update_fee(node_a_id, &update_fee); + + nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3); + check_added_monitors!(nodes[1], 1); + check_closed_broadcast!(nodes[1], true); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") }, + [node_a_id], 10_000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fee_with_fundee_update_add_htlc() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + + // balancing + send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 20; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); + let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); + check_added_monitors!(nodes[1], 1); + + let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 800000); + + // nothing happens since node[1] is in AwaitingRemoteRevoke + nodes[1].node.send_payment_with_route(route, our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + { + let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); + assert_eq!(added_monitors.len(), 0); + added_monitors.clear(); + } + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + // node[1] has nothing to do + + nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[0], 1); + + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); + let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + // No commitment_signed so get_event_msg's assert(len == 1) passes + check_added_monitors!(nodes[0], 1); + nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg); + check_added_monitors!(nodes[1], 1); + // AwaitingRemoteRevoke ends here + + let commitment_update = get_htlc_update_msgs!(nodes[1], node_a_id); + assert_eq!(commitment_update.update_add_htlcs.len(), 1); + assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0); + assert_eq!(commitment_update.update_fail_htlcs.len(), 0); + assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0); + assert_eq!(commitment_update.update_fee.is_none(), true); + + nodes[0].node.handle_update_add_htlc(node_b_id, &commitment_update.update_add_htlcs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_update.commitment_signed); + check_added_monitors!(nodes[0], 1); + let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], node_b_id); + + nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_signed); + check_added_monitors!(nodes[1], 1); + let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + // No commitment_signed so get_event_msg's assert(len == 1) passes + + nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke); + check_added_monitors!(nodes[0], 1); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + expect_pending_htlcs_forwardable!(nodes[0]); + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentClaimable { .. } => { }, + _ => panic!("Unexpected event"), + }; + + claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage); + + send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000); + send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000); + close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); + check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [node_b_id], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [node_a_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_update_fee() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan = create_announced_chan_between_nodes(&nodes, 0, 1); + let channel_id = chan.2; + + // A B + // (1) update_fee/commitment_signed -> + // <- (2) revoke_and_ack + // .- send (3) commitment_signed + // (4) update_fee/commitment_signed -> + // .- send (5) revoke_and_ack (no CS as we're awaiting a revoke) + // <- (3) commitment_signed delivered + // send (6) revoke_and_ack -. + // <- (5) deliver revoke_and_ack + // (6) deliver revoke_and_ack -> + // .- send (7) commitment_signed in response to (4) + // <- (7) deliver commitment_signed + // revoke_and_ack -> + + // Create and deliver (1)... + let feerate; + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + feerate = *feerate_lock; + *feerate_lock = feerate + 20; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(nodes[0], 1); + + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); + + // Generate (2) and (3): + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); + let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], node_a_id); + check_added_monitors!(nodes[1], 1); + + // Deliver (2): + nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[0], 1); + + // Create and deliver (4)... + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = feerate + 30; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(nodes[0], 1); + let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events_0.len(), 1); + let (update_msg, commitment_signed) = match events_0[0] { + MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); + check_added_monitors!(nodes[1], 1); + // ... creating (5) + let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + // No commitment_signed so get_event_msg's assert(len == 1) passes + + // Handle (3), creating (6): + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed_0); + check_added_monitors!(nodes[0], 1); + let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + // No commitment_signed so get_event_msg's assert(len == 1) passes + + // Deliver (5): + nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + check_added_monitors!(nodes[0], 1); + + // Deliver (6), creating (7): + nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg_0); + let commitment_update = get_htlc_update_msgs!(nodes[1], node_a_id); + assert!(commitment_update.update_add_htlcs.is_empty()); + assert!(commitment_update.update_fulfill_htlcs.is_empty()); + assert!(commitment_update.update_fail_htlcs.is_empty()); + assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); + assert!(commitment_update.update_fee.is_none()); + check_added_monitors!(nodes[1], 1); + + // Deliver (7) + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_update.commitment_signed); + check_added_monitors!(nodes[0], 1); + let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + // No commitment_signed so get_event_msg's assert(len == 1) passes + + nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg); + check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30); + assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30); + close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); + check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [node_b_id], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [node_a_id], 100000); +} + +#[xtest(feature = "_externalize_tests")] +pub fn test_chan_init_feerate_unaffordability() { + // Test that we will reject channel opens which do not leave enough to pay for any HTLCs due to + // channel reserve and feerate requirements. + let mut chanmon_cfgs = create_chanmon_cfgs(2); + let feerate_per_kw = *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let default_config = UserConfig::default(); + let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); + + // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single + // HTLC. + let mut push_amt = 100_000_000; + push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features); + assert_eq!(nodes[0].node.create_channel(node_b_id, 100_000, push_amt + 1, 42, None, None).unwrap_err(), + APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() }); + + // During open, we don't have a "counterparty channel reserve" to check against, so that + // requirement only comes into play on the open_channel handling side. + push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; + nodes[0].node.create_channel(node_b_id, 100_000, push_amt, 42, None, None).unwrap(); + let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + open_channel_msg.push_msat += 1; + nodes[1].node.handle_open_channel(node_a_id, &open_channel_msg); + + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + match msg_events[0] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { + assert_eq!(msg.data, "Insufficient funding amount for initial reserve"); + }, + _ => panic!("Unexpected event"), + } +} + +#[xtest(feature = "_externalize_tests")] +pub fn accept_busted_but_better_fee() { + // If a peer sends us a fee update that is too low, but higher than our previous channel + // feerate, we should accept it. In the future we may want to consider closing the channel + // later, but for now we only accept the update. + let mut chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + + create_chan_between_nodes(&nodes[0], &nodes[1]); + + // Set nodes[1] to expect 5,000 sat/kW. + { + let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = 5000; + } + + // If nodes[0] increases their feerate, even if its not enough, nodes[1] should accept it. + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = 1000; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(nodes[0], 1); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + nodes[1].node.handle_update_fee(node_a_id, update_fee.as_ref().unwrap()); + commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false); + }, + _ => panic!("Unexpected event"), + }; + + // If nodes[0] increases their feerate further, even if its not enough, nodes[1] should accept + // it. + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = 2000; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(nodes[0], 1); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + nodes[1].node.handle_update_fee(node_a_id, update_fee.as_ref().unwrap()); + commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false); + }, + _ => panic!("Unexpected event"), + }; + + // However, if nodes[0] decreases their feerate, nodes[1] should reject it and close the + // channel. + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = 1000; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(nodes[0], 1); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => { + nodes[1].node.handle_update_fee(node_a_id, update_fee.as_ref().unwrap()); + check_closed_event!(nodes[1], 1, ClosureReason::PeerFeerateTooLow { + peer_feerate_sat_per_kw: 1000, required_feerate_sat_per_kw: 5000, + }, [node_a_id], 100000); + check_closed_broadcast!(nodes[1], true); + check_added_monitors!(nodes[1], 1); + }, + _ => panic!("Unexpected event"), + }; +} From 727d7820526c8e6a77ba7ce9acc6bd068e0d2eb3 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 28 Apr 2025 01:28:37 +0000 Subject: [PATCH 17/25] Cleanup `update_fee_tests` in anticipation of it being `rustfmt`'d --- lightning/src/ln/update_fee_tests.rs | 165 +++++++++++++++------------ 1 file changed, 95 insertions(+), 70 deletions(-) diff --git a/lightning/src/ln/update_fee_tests.rs b/lightning/src/ln/update_fee_tests.rs index 93b052a02c7..84261efb3bf 100644 --- a/lightning/src/ln/update_fee_tests.rs +++ b/lightning/src/ln/update_fee_tests.rs @@ -1,12 +1,20 @@ //! Functional tests testing channel feerate handling. use crate::events::{ClosureReason, Event}; -use crate::ln::functional_test_utils::*; -use crate::ln::chan_utils::{self, CommitmentTransaction, commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, HTLCOutputInCommitment}; -use crate::ln::channel::{Channel, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, CONCURRENT_INBOUND_HTLC_FEE_BUFFER}; +use crate::ln::chan_utils::{ + self, commitment_tx_base_weight, CommitmentTransaction, HTLCOutputInCommitment, + COMMITMENT_TX_WEIGHT_PER_HTLC, +}; +use crate::ln::channel::{ + get_holder_selected_channel_reserve_satoshis, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, + MIN_AFFORDABLE_HTLC_COUNT, +}; use crate::ln::channelmanager::PaymentId; +use crate::ln::functional_test_utils::*; +use crate::ln::msgs::{ + self, BaseMessageHandler, ChannelMessageHandler, ErrorAction, MessageSendEvent, +}; use crate::ln::outbound_payment::RecipientOnionFields; -use crate::ln::msgs::{self, BaseMessageHandler, ErrorAction, ChannelMessageHandler, MessageSendEvent}; use crate::sign::ecdsa::EcdsaChannelSigner; use crate::util::config::UserConfig; use crate::util::errors::APIError; @@ -29,7 +37,7 @@ pub fn test_async_inbound_update_fee() { create_announced_chan_between_nodes(&nodes, 0, 1); // balancing - send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1]], 8000000); // A B // update_fee -> @@ -69,8 +77,9 @@ pub fn test_async_inbound_update_fee() { // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000); - nodes[1].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[1].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[1], 1); let payment_event = { @@ -150,7 +159,7 @@ pub fn test_update_fee_unordered_raa() { create_announced_chan_between_nodes(&nodes, 0, 1); // balancing - send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1]], 8000000); // First nodes[0] generates an update_fee { @@ -173,8 +182,9 @@ pub fn test_update_fee_unordered_raa() { // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000); - nodes[1].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[1].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[1], 1); let payment_event = { @@ -317,7 +327,6 @@ pub fn test_multi_flight_update_fee() { check_added_monitors!(nodes[1], 1); } - #[xtest(feature = "_externalize_tests")] pub fn test_update_fee_vanilla() { let chanmon_cfgs = create_chanmon_cfgs(2); @@ -340,7 +349,7 @@ pub fn test_update_fee_vanilla() { let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { (update_fee.as_ref(), commitment_signed) }, _ => panic!("Unexpected event"), @@ -381,7 +390,8 @@ pub fn test_update_fee_that_funder_cannot_afford() { let channel_id = chan.2; let secp_ctx = Secp256k1::new(); let default_config = UserConfig::default(); - let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config); + let bs_channel_reserve_sats = + get_holder_selected_channel_reserve_satoshis(channel_value, &default_config); let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); @@ -421,24 +431,29 @@ pub fn test_update_fee_that_funder_cannot_afford() { *feerate_lock += 4; } nodes[0].node.timer_tick_occurred(); - nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot afford to send new feerate at {}", feerate + 4), 1); + let err = format!("Cannot afford to send new feerate at {}", feerate + 4); + nodes[0].logger.assert_log("lightning::ln::channel", err, 1); check_added_monitors!(nodes[0], 0); const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654; let remote_point = { - let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&node_a_id).unwrap().lock().unwrap(); - let remote_chan = chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); - let chan_signer = remote_chan.get_signer(); - chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx).unwrap() + let mut per_peer_lock; + let mut peer_state_lock; + + let channel = get_channel_ref!(nodes[1], nodes[0], per_peer_lock, peer_state_lock, chan.2); + let chan_signer = channel.as_funded().unwrap().get_signer(); + let point_number = INITIAL_COMMITMENT_NUMBER - 1; + chan_signer.as_ref().get_per_commitment_point(point_number, &secp_ctx).unwrap() }; let res = { - let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let local_chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); - let local_chan = local_chan_lock.channel_by_id.get(&chan.2).and_then(Channel::as_funded).unwrap(); - let local_chan_signer = local_chan.get_signer(); + let mut per_peer_lock; + let mut peer_state_lock; + + let local_chan = get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan.2); + let local_chan_signer = local_chan.as_funded().unwrap().get_signer(); + let nondust_htlcs: Vec = vec![]; let commitment_tx = CommitmentTransaction::new( INITIAL_COMMITMENT_NUMBER - 1, @@ -447,13 +462,15 @@ pub fn test_update_fee_that_funder_cannot_afford() { channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000, non_buffer_feerate + 4, nondust_htlcs, - &local_chan.funding.channel_transaction_parameters.as_counterparty_broadcastable(), + &local_chan.funding().channel_transaction_parameters.as_counterparty_broadcastable(), &secp_ctx, ); - local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment( - &local_chan.funding.channel_transaction_parameters, &commitment_tx, Vec::new(), - Vec::new(), &secp_ctx, - ).unwrap() + let params = &local_chan.funding().channel_transaction_parameters; + local_chan_signer + .as_ecdsa() + .unwrap() + .sign_counterparty_commitment(params, &commitment_tx, Vec::new(), Vec::new(), &secp_ctx) + .unwrap() }; let commit_signed_msg = msgs::CommitmentSigned { @@ -476,11 +493,12 @@ pub fn test_update_fee_that_funder_cannot_afford() { //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve) //Should produce and error. nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); - nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3); + let err = "Funding remote cannot afford proposed new fee"; + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", err, 3); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") }, - [node_a_id], channel_value); + let reason = ClosureReason::ProcessingError { err: err.to_string() }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], channel_value); } #[xtest(feature = "_externalize_tests")] @@ -498,7 +516,6 @@ pub fn test_update_fee_that_saturates_subs() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); - let node_b_id = nodes[1].node.get_our_node_id(); let chan_id = create_chan_between_nodes_with_value(&nodes[0], &nodes[1], 10_000, 8_500_000).3; @@ -515,18 +532,20 @@ pub fn test_update_fee_that_saturates_subs() { // in `commitment_signed`. let remote_point = { - let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(&node_a_id).unwrap().lock().unwrap(); - let remote_chan = chan_lock.channel_by_id.get(&chan_id).and_then(Channel::as_funded).unwrap(); - let chan_signer = remote_chan.get_signer(); + let mut per_peer_lock; + let mut peer_state_lock; + + let channel = get_channel_ref!(nodes[1], nodes[0], per_peer_lock, peer_state_lock, chan_id); + let chan_signer = channel.as_funded().unwrap().get_signer(); chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, &secp_ctx).unwrap() }; let res = { - let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); - let local_chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); - let local_chan = local_chan_lock.channel_by_id.get(&chan_id).and_then(Channel::as_funded).unwrap(); - let local_chan_signer = local_chan.get_signer(); + let mut per_peer_lock; + let mut peer_state_lock; + + let local_chan = get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_id); + let local_chan_signer = local_chan.as_funded().unwrap().get_signer(); let nondust_htlcs: Vec = vec![]; let commitment_tx = CommitmentTransaction::new( INITIAL_COMMITMENT_NUMBER, @@ -537,13 +556,15 @@ pub fn test_update_fee_that_saturates_subs() { 0, FEERATE, nondust_htlcs, - &local_chan.funding.channel_transaction_parameters.as_counterparty_broadcastable(), + &local_chan.funding().channel_transaction_parameters.as_counterparty_broadcastable(), &secp_ctx, ); - local_chan_signer.as_ecdsa().unwrap().sign_counterparty_commitment( - &local_chan.funding.channel_transaction_parameters, &commitment_tx, Vec::new(), - Vec::new(), &secp_ctx, - ).unwrap() + let params = &local_chan.funding().channel_transaction_parameters; + local_chan_signer + .as_ecdsa() + .unwrap() + .sign_counterparty_commitment(params, &commitment_tx, Vec::new(), Vec::new(), &secp_ctx) + .unwrap() }; let commit_signed_msg = msgs::CommitmentSigned { @@ -563,11 +584,12 @@ pub fn test_update_fee_that_saturates_subs() { nodes[1].node.handle_update_fee(node_a_id, &update_fee); nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); - nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3); + let err = "Funding remote cannot afford proposed new fee"; + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", err, 3); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") }, - [node_a_id], 10_000); + let reason = ClosureReason::ProcessingError { err: err.to_string() }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 10_000); } #[xtest(feature = "_externalize_tests")] @@ -583,7 +605,7 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); // balancing - send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1]], 8000000); { let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); @@ -595,7 +617,7 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { (update_fee.as_ref(), commitment_signed) }, _ => panic!("Unexpected event"), @@ -608,13 +630,10 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 800000); // nothing happens since node[1] is in AwaitingRemoteRevoke - nodes[1].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - { - let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); - assert_eq!(added_monitors.len(), 0); - added_monitors.clear(); - } + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[1].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors(&nodes[1], 0); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); // node[1] has nothing to do @@ -665,13 +684,15 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { _ => panic!("Unexpected event"), }; - claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage); + claim_payment(&nodes[1], &[&nodes[0]], our_payment_preimage); - send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000); - send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000); + send_payment(&nodes[1], &[&nodes[0]], 800000); + send_payment(&nodes[0], &[&nodes[1]], 800000); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [node_b_id], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [node_a_id], 100000); + let node_a_reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, node_a_reason, [node_b_id], 100000); + let node_b_reason = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, node_b_reason, [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -714,7 +735,7 @@ pub fn test_update_fee() { let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { (update_fee.as_ref(), commitment_signed) }, _ => panic!("Unexpected event"), @@ -741,7 +762,7 @@ pub fn test_update_fee() { let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { node_id:_, channel_id: _, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { (update_fee.as_ref(), commitment_signed) }, _ => panic!("Unexpected event"), @@ -788,8 +809,10 @@ pub fn test_update_fee() { assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30); assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [node_b_id], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [node_a_id], 100000); + let node_a_reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, node_a_reason, [node_b_id], 100000); + let node_b_reason = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, node_b_reason, [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -826,7 +849,7 @@ pub fn test_chan_init_feerate_unaffordability() { let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); match msg_events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, .. } => { assert_eq!(msg.data, "Insufficient funding amount for initial reserve"); }, _ => panic!("Unexpected event"), @@ -904,9 +927,11 @@ pub fn accept_busted_but_better_fee() { match events[0] { MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => { nodes[1].node.handle_update_fee(node_a_id, update_fee.as_ref().unwrap()); - check_closed_event!(nodes[1], 1, ClosureReason::PeerFeerateTooLow { - peer_feerate_sat_per_kw: 1000, required_feerate_sat_per_kw: 5000, - }, [node_a_id], 100000); + let reason = ClosureReason::PeerFeerateTooLow { + peer_feerate_sat_per_kw: 1000, + required_feerate_sat_per_kw: 5000, + }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); }, From d5b6a7a1a4cacb9edc26a3068bf33507ed513164 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 28 Apr 2025 14:10:43 +0000 Subject: [PATCH 18/25] De-macro `check_added_monitors` in `update_fee_tests.rs` --- lightning/src/ln/update_fee_tests.rs | 110 +++++++++++++-------------- 1 file changed, 55 insertions(+), 55 deletions(-) diff --git a/lightning/src/ln/update_fee_tests.rs b/lightning/src/ln/update_fee_tests.rs index 84261efb3bf..78ebe649dcc 100644 --- a/lightning/src/ln/update_fee_tests.rs +++ b/lightning/src/ln/update_fee_tests.rs @@ -62,7 +62,7 @@ pub fn test_async_inbound_update_fee() { *feerate_lock += 20; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); @@ -80,7 +80,7 @@ pub fn test_async_inbound_update_fee() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[1].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let payment_event = { let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events(); @@ -95,13 +95,13 @@ pub fn test_async_inbound_update_fee() { nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &payment_event.commitment_msg); // (2) let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // deliver(1), generate (3): nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); // deliver (2) let bs_update = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -110,7 +110,7 @@ pub fn test_async_inbound_update_fee() { assert!(bs_update.update_fail_htlcs.is_empty()); // (4) assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4) assert!(bs_update.update_fee.is_none()); // (4) - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); // deliver (3) let as_update = get_htlc_update_msgs!(nodes[0], node_b_id); @@ -119,19 +119,19 @@ pub fn test_async_inbound_update_fee() { assert!(as_update.update_fail_htlcs.is_empty()); // (5) assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5) assert!(as_update.update_fee.is_none()); // (5) - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_update.commitment_signed); // deliver (4) let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // only (6) so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_update.commitment_signed); // deliver (5) let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events_2 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_2.len(), 1); @@ -141,7 +141,7 @@ pub fn test_async_inbound_update_fee() { } nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_revoke); // deliver (6) - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } #[xtest(feature = "_externalize_tests")] @@ -167,7 +167,7 @@ pub fn test_update_fee_unordered_raa() { *feerate_lock += 20; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); @@ -185,7 +185,7 @@ pub fn test_update_fee_unordered_raa() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[1].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let payment_event = { let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events(); @@ -200,10 +200,10 @@ pub fn test_update_fee_unordered_raa() { nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &payment_event.commitment_msg); // (2) let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_msg); // deliver (2) - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // We can't continue, sadly, because our (1) now has a bogus signature } @@ -247,7 +247,7 @@ pub fn test_multi_flight_update_fee() { *feerate_lock = initial_feerate + 20; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); @@ -262,7 +262,7 @@ pub fn test_multi_flight_update_fee() { nodes[1].node.handle_update_fee(node_a_id, update_msg_1); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed_1); let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment // transaction: @@ -289,7 +289,7 @@ pub fn test_multi_flight_update_fee() { // Deliver (1), generating (3) and (4) nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_msg); let as_second_update = get_htlc_update_msgs!(nodes[0], node_b_id); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(as_second_update.update_add_htlcs.is_empty()); assert!(as_second_update.update_fulfill_htlcs.is_empty()); assert!(as_second_update.update_fail_htlcs.is_empty()); @@ -301,30 +301,30 @@ pub fn test_multi_flight_update_fee() { // Deliver (2) commitment_signed nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment_signed); let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // No commitment_signed so get_event_msg's assert(len == 1) passes nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Delever (4) nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_second_update.commitment_signed); let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_commitment); let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_revoke); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } #[xtest(feature = "_externalize_tests")] @@ -344,7 +344,7 @@ pub fn test_update_fee_vanilla() { *feerate_lock += 25; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); @@ -358,20 +358,20 @@ pub fn test_update_fee_vanilla() { nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } #[xtest(feature = "_externalize_tests")] @@ -406,7 +406,7 @@ pub fn test_update_fee_that_funder_cannot_afford() { *feerate_lock = feerate; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let update_msg = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1].node.handle_update_fee(node_a_id, &update_msg.update_fee.unwrap()); @@ -433,7 +433,7 @@ pub fn test_update_fee_that_funder_cannot_afford() { nodes[0].node.timer_tick_occurred(); let err = format!("Cannot afford to send new feerate at {}", feerate + 4); nodes[0].logger.assert_log("lightning::ln::channel", err, 1); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654; @@ -495,7 +495,7 @@ pub fn test_update_fee_that_funder_cannot_afford() { nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); let err = "Funding remote cannot afford proposed new fee"; nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", err, 3); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_broadcast!(nodes[1], true); let reason = ClosureReason::ProcessingError { err: err.to_string() }; check_closed_event!(nodes[1], 1, reason, [node_a_id], channel_value); @@ -586,7 +586,7 @@ pub fn test_update_fee_that_saturates_subs() { nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); let err = "Funding remote cannot afford proposed new fee"; nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", err, 3); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_broadcast!(nodes[1], true); let reason = ClosureReason::ProcessingError { err: err.to_string() }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 10_000); @@ -612,7 +612,7 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { *feerate_lock += 20; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); @@ -625,7 +625,7 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 800000); @@ -640,14 +640,14 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // AwaitingRemoteRevoke ends here let commitment_update = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -659,20 +659,20 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { nodes[0].node.handle_update_add_htlc(node_b_id, &commitment_update.update_add_htlcs[0]); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_update.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); expect_pending_htlcs_forwardable!(nodes[0]); @@ -730,7 +730,7 @@ pub fn test_update_fee() { *feerate_lock = feerate + 20; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); @@ -745,12 +745,12 @@ pub fn test_update_fee() { // Generate (2) and (3): nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Deliver (2): nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Create and deliver (4)... { @@ -758,7 +758,7 @@ pub fn test_update_fee() { *feerate_lock = feerate + 30; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let (update_msg, commitment_signed) = match events_0[0] { @@ -770,21 +770,21 @@ pub fn test_update_fee() { nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // ... creating (5) let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes // Handle (3), creating (6): nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed_0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes // Deliver (5): nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Deliver (6), creating (7): nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg_0); @@ -794,16 +794,16 @@ pub fn test_update_fee() { assert!(commitment_update.update_fail_htlcs.is_empty()); assert!(commitment_update.update_fail_malformed_htlcs.is_empty()); assert!(commitment_update.update_fee.is_none()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Deliver (7) nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_update.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30); @@ -882,7 +882,7 @@ pub fn accept_busted_but_better_fee() { *feerate_lock = 1000; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -901,7 +901,7 @@ pub fn accept_busted_but_better_fee() { *feerate_lock = 2000; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -920,7 +920,7 @@ pub fn accept_busted_but_better_fee() { *feerate_lock = 1000; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -933,7 +933,7 @@ pub fn accept_busted_but_better_fee() { }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); }, _ => panic!("Unexpected event"), }; From 415b6a15f31818268ce9d72d057b94ad6ccf5690 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 28 Apr 2025 01:37:11 +0000 Subject: [PATCH 19/25] Run rustfmt on `update_fee_tests` --- lightning/src/ln/update_fee_tests.rs | 160 +++++++++++++++++---------- 1 file changed, 104 insertions(+), 56 deletions(-) diff --git a/lightning/src/ln/update_fee_tests.rs b/lightning/src/ln/update_fee_tests.rs index 78ebe649dcc..284f56cab3f 100644 --- a/lightning/src/ln/update_fee_tests.rs +++ b/lightning/src/ln/update_fee_tests.rs @@ -16,9 +16,9 @@ use crate::ln::msgs::{ }; use crate::ln::outbound_payment::RecipientOnionFields; use crate::sign::ecdsa::EcdsaChannelSigner; +use crate::types::features::ChannelTypeFeatures; use crate::util::config::UserConfig; use crate::util::errors::APIError; -use crate::types::features::ChannelTypeFeatures; use lightning_macros::xtest; @@ -66,17 +66,20 @@ pub fn test_async_inbound_update_fee() { let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); - let (update_msg, commitment_signed) = match events_0[0] { // (1) - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - (update_fee.as_ref(), commitment_signed) - }, + let (update_msg, commitment_signed) = match events_0[0] { + // (1) + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => (update_fee.as_ref(), commitment_signed), _ => panic!("Unexpected event"), }; nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 40000); let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[1].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); @@ -136,7 +139,7 @@ pub fn test_async_inbound_update_fee() { let events_2 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_2.len(), 1); match events_2[0] { - Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment + Event::PendingHTLCsForwardable { .. } => {}, // If we actually processed we'd receive the payment _ => panic!("Unexpected event"), } @@ -171,17 +174,20 @@ pub fn test_update_fee_unordered_raa() { let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); - let update_msg = match events_0[0] { // (1) - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => { - update_fee.as_ref() - }, + let update_msg = match events_0[0] { + // (1) + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, .. }, + .. + } => update_fee.as_ref(), _ => panic!("Unexpected event"), }; nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]... - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 40000); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 40000); let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[1].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); @@ -251,10 +257,12 @@ pub fn test_multi_flight_update_fee() { let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); - let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1) - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - (update_fee.as_ref().unwrap(), commitment_signed) - }, + let (update_msg_1, commitment_signed_1) = match events_0[0] { + // (1) + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => (update_fee.as_ref().unwrap(), commitment_signed), _ => panic!("Unexpected event"), }; @@ -296,7 +304,10 @@ pub fn test_multi_flight_update_fee() { assert!(as_second_update.update_fail_malformed_htlcs.is_empty()); // Check that the update_fee newly generated matches what we delivered: assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id); - assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw); + assert_eq!( + as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, + update_msg_2.feerate_per_kw + ); // Deliver (2) commitment_signed nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment_signed); @@ -309,7 +320,9 @@ pub fn test_multi_flight_update_fee() { check_added_monitors(&nodes[1], 1); // Delever (4) - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_second_update.commitment_signed); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &as_second_update.commitment_signed); let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], node_a_id); check_added_monitors(&nodes[1], 1); @@ -349,9 +362,10 @@ pub fn test_update_fee_vanilla() { let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - (update_fee.as_ref(), commitment_signed) - }, + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => (update_fee.as_ref(), commitment_signed), _ => panic!("Unexpected event"), }; nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); @@ -386,7 +400,13 @@ pub fn test_update_fee_that_funder_cannot_afford() { let channel_value = 5000; let push_sats = 700; - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000); + let chan = create_announced_chan_between_nodes_with_value( + &nodes, + 0, + 1, + channel_value, + push_sats * 1000, + ); let channel_id = chan.2; let secp_ctx = Secp256k1::new(); let default_config = UserConfig::default(); @@ -399,8 +419,12 @@ pub fn test_update_fee_that_funder_cannot_afford() { // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we // calculate two different feerates here - the expected local limit as well as the expected // remote limit. - let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(&channel_type_features) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32; - let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(&channel_type_features)) as u32; + let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 + / (commitment_tx_base_weight(&channel_type_features) + + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) + as u32; + let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 + / commitment_tx_base_weight(&channel_type_features)) as u32; { let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); *feerate_lock = feerate; @@ -420,7 +444,8 @@ pub fn test_update_fee_that_funder_cannot_afford() { //We made sure neither party's funds are below the dust limit and there are no HTLCs here assert_eq!(commitment_tx.output.len(), 2); let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000; - let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value.to_sat()); + let mut actual_fee = + commitment_tx.output.iter().fold(0, |acc, output| acc + output.value.to_sat()); actual_fee = channel_value - actual_fee; assert_eq!(total_fee, actual_fee); } @@ -451,7 +476,8 @@ pub fn test_update_fee_that_funder_cannot_afford() { let mut per_peer_lock; let mut peer_state_lock; - let local_chan = get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan.2); + let local_chan = + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan.2); let local_chan_signer = local_chan.as_funded().unwrap().get_signer(); let nondust_htlcs: Vec = vec![]; @@ -459,7 +485,9 @@ pub fn test_update_fee_that_funder_cannot_afford() { INITIAL_COMMITMENT_NUMBER - 1, &remote_point, push_sats, - channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000, + channel_value + - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) + / 1000, non_buffer_feerate + 4, nondust_htlcs, &local_chan.funding().channel_transaction_parameters.as_counterparty_broadcastable(), @@ -482,10 +510,7 @@ pub fn test_update_fee_that_funder_cannot_afford() { partial_signature_with_nonce: None, }; - let update_fee = msgs::UpdateFee { - channel_id: chan.2, - feerate_per_kw: non_buffer_feerate + 4, - }; + let update_fee = msgs::UpdateFee { channel_id: chan.2, feerate_per_kw: non_buffer_feerate + 4 }; nodes[1].node.handle_update_fee(node_a_id, &update_fee); @@ -512,7 +537,8 @@ pub fn test_update_fee_that_saturates_subs() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(default_config.clone()), Some(default_config)]); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(default_config.clone()), Some(default_config)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -544,7 +570,8 @@ pub fn test_update_fee_that_saturates_subs() { let mut per_peer_lock; let mut peer_state_lock; - let local_chan = get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_id); + let local_chan = + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_id); let local_chan_signer = local_chan.as_funded().unwrap().get_signer(); let nondust_htlcs: Vec = vec![]; let commitment_tx = CommitmentTransaction::new( @@ -576,10 +603,7 @@ pub fn test_update_fee_that_saturates_subs() { partial_signature_with_nonce: None, }; - let update_fee = msgs::UpdateFee { - channel_id: chan_id, - feerate_per_kw: FEERATE, - }; + let update_fee = msgs::UpdateFee { channel_id: chan_id, feerate_per_kw: FEERATE }; nodes[1].node.handle_update_fee(node_a_id, &update_fee); @@ -617,9 +641,10 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - (update_fee.as_ref(), commitment_signed) - }, + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => (update_fee.as_ref(), commitment_signed), _ => panic!("Unexpected event"), }; nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); @@ -627,7 +652,8 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); check_added_monitors(&nodes[1], 1); - let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 800000); + let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 800000); // nothing happens since node[1] is in AwaitingRemoteRevoke let onion = RecipientOnionFields::secret_only(our_payment_secret); @@ -658,7 +684,9 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { assert_eq!(commitment_update.update_fee.is_none(), true); nodes[0].node.handle_update_add_htlc(node_b_id, &commitment_update.update_add_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_update.commitment_signed); + nodes[0] + .node + .handle_commitment_signed_batch_test(node_b_id, &commitment_update.commitment_signed); check_added_monitors(&nodes[0], 1); let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], node_b_id); @@ -680,7 +708,7 @@ pub fn test_update_fee_with_fundee_update_add_htlc() { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentClaimable { .. } => { }, + Event::PaymentClaimable { .. } => {}, _ => panic!("Unexpected event"), }; @@ -735,9 +763,10 @@ pub fn test_update_fee() { let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - (update_fee.as_ref(), commitment_signed) - }, + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => (update_fee.as_ref(), commitment_signed), _ => panic!("Unexpected event"), }; nodes[1].node.handle_update_fee(node_a_id, update_msg.unwrap()); @@ -762,9 +791,10 @@ pub fn test_update_fee() { let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); let (update_msg, commitment_signed) = match events_0[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - (update_fee.as_ref(), commitment_signed) - }, + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => (update_fee.as_ref(), commitment_signed), _ => panic!("Unexpected event"), }; @@ -797,7 +827,9 @@ pub fn test_update_fee() { check_added_monitors(&nodes[1], 1); // Deliver (7) - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_update.commitment_signed); + nodes[0] + .node + .handle_commitment_signed_batch_test(node_b_id, &commitment_update.commitment_signed); check_added_monitors(&nodes[0], 1); let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes @@ -834,7 +866,11 @@ pub fn test_chan_init_feerate_unaffordability() { // Set the push_msat amount such that nodes[0] will not be able to afford to add even a single // HTLC. let mut push_amt = 100_000_000; - push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, &channel_type_features); + push_amt -= commit_tx_fee_msat( + feerate_per_kw, + MIN_AFFORDABLE_HTLC_COUNT as u64, + &channel_type_features, + ); assert_eq!(nodes[0].node.create_channel(node_b_id, 100_000, push_amt + 1, 42, None, None).unwrap_err(), APIError::APIMisuseError { err: "Funding amount (356) can't even pay fee for initial commitment transaction fee of 357.".to_string() }); @@ -842,14 +878,17 @@ pub fn test_chan_init_feerate_unaffordability() { // requirement only comes into play on the open_channel handling side. push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000; nodes[0].node.create_channel(node_b_id, 100_000, push_amt, 42, None, None).unwrap(); - let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + let mut open_channel_msg = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); open_channel_msg.push_msat += 1; nodes[1].node.handle_open_channel(node_a_id, &open_channel_msg); let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); match msg_events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, .. } => { + MessageSendEvent::HandleError { + action: ErrorAction::SendErrorMessage { ref msg }, .. + } => { assert_eq!(msg.data, "Insufficient funding amount for initial reserve"); }, _ => panic!("Unexpected event"), @@ -887,7 +926,10 @@ pub fn accept_busted_but_better_fee() { let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => { nodes[1].node.handle_update_fee(node_a_id, update_fee.as_ref().unwrap()); commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false); }, @@ -906,7 +948,10 @@ pub fn accept_busted_but_better_fee() { let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => { nodes[1].node.handle_update_fee(node_a_id, update_fee.as_ref().unwrap()); commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false); }, @@ -925,7 +970,10 @@ pub fn accept_busted_but_better_fee() { let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, .. }, + .. + } => { nodes[1].node.handle_update_fee(node_a_id, update_fee.as_ref().unwrap()); let reason = ClosureReason::PeerFeerateTooLow { peer_feerate_sat_per_kw: 1000, From 7587580bda7147416a5e4e15e2bf68c9830b7b5b Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 28 Apr 2025 14:35:06 +0000 Subject: [PATCH 20/25] Remove some useless tests in functional_tests and reject huge chans In general we shouldn't have tests lying around that are useless as its not genuinely testing anything and may break due to harmless protocol changes. Here, we drop a few useless tests. Further, we remove a test that channels of 2^24 (ie 26) sats fail to open, which was intended to test pre-WUMBO channel rejection, but actually tested too-small channels (and we have other tests for WUMBO channels). --- lightning/src/ln/functional_tests.rs | 23 ++--------------------- 1 file changed, 2 insertions(+), 21 deletions(-) diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 70fc54ed6eb..bd7b830cf31 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -4667,27 +4667,15 @@ pub fn bolt2_open_channel_sending_node_checks_part2() { let node_b_id = nodes[1].node.get_our_node_id(); - // BOLT #2 spec: Sending node must set funding_satoshis to less than 2^24 satoshis - let channel_value_satoshis=2^24; - let push_msat=10001; - assert!(nodes[0].node.create_channel(node_b_id, channel_value_satoshis, push_msat, 42, None, None).is_err()); - // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis let channel_value_satoshis=10000; // Test when push_msat is equal to 1000 * funding_satoshis. let push_msat=1000*channel_value_satoshis+1; assert!(nodes[0].node.create_channel(node_b_id, channel_value_satoshis, push_msat, 42, None, None).is_err()); - // BOLT #2 spec: Sending node must set set channel_reserve_satoshis greater than or equal to dust_limit_satoshis - let channel_value_satoshis=10000; - let push_msat=10001; - assert!(nodes[0].node.create_channel(node_b_id, channel_value_satoshis, push_msat, 42, None, None).is_ok()); //Create a valid channel - let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.common_fields.dust_limit_satoshis); + nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); - // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0 - // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1 - assert!(node0_to_1_send_open_channel.common_fields.channel_flags<=1); + let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver. assert!(BREAKDOWN_TIMEOUT>0); @@ -4696,13 +4684,6 @@ pub fn bolt2_open_channel_sending_node_checks_part2() { // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within. let chain_hash = ChainHash::using_genesis_block(Network::Testnet); assert_eq!(node0_to_1_send_open_channel.common_fields.chain_hash, chain_hash); - - // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys. - assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.funding_pubkey.serialize()).is_ok()); - assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.revocation_basepoint.serialize()).is_ok()); - assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.htlc_basepoint.serialize()).is_ok()); - assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.payment_basepoint.serialize()).is_ok()); - assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.common_fields.delayed_payment_basepoint.serialize()).is_ok()); } #[xtest(feature = "_externalize_tests")] From 4a5be89821740609ddaf0ab23a17e5f4ee97faf7 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 28 Apr 2025 02:02:15 +0000 Subject: [PATCH 21/25] Clean up `functional_tests.rs` in anticipation of `rustfmt`ing it --- lightning/src/ln/functional_tests.rs | 1284 ++++++++++++++++---------- rustfmt_excluded_files | 1 - 2 files changed, 788 insertions(+), 497 deletions(-) diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index bd7b830cf31..1da6c7ae0df 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -94,9 +94,12 @@ fn test_channel_resumption_fail_post_funding() { // After ddf75afd16 we'd panic on reconnection if we exchanged funding info, so test that // explicitly here. - nodes[0].node.peer_connected(node_b_id, &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new()); } @@ -129,7 +132,8 @@ pub fn test_insane_channel_opens() { // Test helper that asserts we get the correct error string given a mutator // that supposedly makes the channel open message insane let insane_open_helper = |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| { - nodes[1].node.handle_open_channel(node_a_id, &message_mutator(open_channel_message.clone())); + let open_channel_mutated = message_mutator(open_channel_message.clone()); + nodes[1].node.handle_open_channel(node_a_id, &open_channel_mutated); let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); let expected_regex = regex::Regex::new(expected_error_str).unwrap(); @@ -181,7 +185,8 @@ pub fn test_funding_exceeds_no_wumbo_limit() { match nodes[0].node.create_channel(node_b_id, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None, None) { Err(APIError::APIMisuseError { err }) => { - assert_eq!(format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1), err); + let exp_err = format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1); + assert_eq!(err, exp_err); }, _ => panic!() } @@ -306,30 +311,30 @@ pub fn fake_network_test() { let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3); // Rebalance the network a bit by relaying one payment through all the channels... - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000); - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000); - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000); - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], 8000000); // Send some more payments - send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000); - send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000); - send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000); + send_payment(&nodes[1], &[&nodes[2], &nodes[3]], 1000000); + send_payment(&nodes[3], &[&nodes[2], &nodes[1], &nodes[0]], 1000000); + send_payment(&nodes[3], &[&nodes[2], &nodes[1]], 1000000); // Test failure packets - let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1; - fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1); + let payment_hash_1 = route_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], 1000000).1; + fail_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], payment_hash_1); // Add a new channel that skips 3 let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3); - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000); - send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000); - send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000); - send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000); - send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000); - send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000); - send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[3]], 1000000); + send_payment(&nodes[2], &[&nodes[3]], 1000000); + send_payment(&nodes[1], &[&nodes[3]], 8000000); + send_payment(&nodes[1], &[&nodes[3]], 8000000); + send_payment(&nodes[1], &[&nodes[3]], 8000000); + send_payment(&nodes[1], &[&nodes[3]], 8000000); + send_payment(&nodes[1], &[&nodes[3]], 8000000); // Do some rebalance loop payments, simultaneously let mut hops = Vec::with_capacity(3); @@ -366,9 +371,9 @@ pub fn fake_network_test() { node_b_id, TEST_FINAL_CLTV ).with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, 1000000); - let payment_preimage_1 = send_along_route(&nodes[1], - Route { paths: vec![Path { hops, blinded_tail: None }], route_params: Some(route_params.clone()) }, - &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0; + let route = Route { paths: vec![Path { hops, blinded_tail: None }], route_params: Some(route_params.clone()) }; + let path: &[_] = &[&nodes[2], &nodes[3], &nodes[1]]; + let payment_preimage_1 = send_along_route(&nodes[1], route, path, 1000000).0; let mut hops = Vec::with_capacity(3); hops.push(RouteHop { @@ -400,27 +405,38 @@ pub fn fake_network_test() { }); hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000; hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000; - let payment_hash_2 = send_along_route(&nodes[1], - Route { paths: vec![Path { hops, blinded_tail: None }], route_params: Some(route_params) }, - &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1; + let route = Route { paths: vec![Path { hops, blinded_tail: None }], route_params: Some(route_params) }; + let path: &[_] = &[&nodes[3], &nodes[2], &nodes[1]]; + let payment_hash_2 = send_along_route(&nodes[1], route, path, 1000000).1; // Claim the rebalances... - fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2); - claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1); + fail_payment(&nodes[1], &[&nodes[3], &nodes[2], &nodes[1]], payment_hash_2); + claim_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[1]], payment_preimage_1); // Close down the channels... close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [node_b_id], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [node_a_id], 100000); + let node_a_reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, node_a_reason, [node_b_id], 100000); + let node_b_reason = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, node_b_reason, [node_a_id], 100000); + close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [node_c_id], 100000); - check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [node_b_id], 100000); + let node_b_reason = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, node_b_reason, [node_c_id], 100000); + let node_c_reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[2], 1, node_c_reason, [node_b_id], 100000); + close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true); - check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [node_d_id], 100000); - check_closed_event!(nodes[3], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [node_c_id], 100000); + let node_c_reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[2], 1, node_c_reason, [node_d_id], 100000); + let node_d_reason = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[3], 1, node_d_reason, [node_c_id], 100000); + close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [node_d_id], 100000); - check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [node_b_id], 100000); + let node_b_reason = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, node_b_reason, [node_d_id], 100000); + let node_d_reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[3], 1, node_d_reason, [node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -439,17 +455,17 @@ pub fn duplicate_htlc_test() { create_announced_chan_between_nodes(&nodes, 3, 4); create_announced_chan_between_nodes(&nodes, 3, 5); - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000); + let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[3], &nodes[4]], 1000000); *nodes[0].network_payment_count.borrow_mut() -= 1; - assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage); + assert_eq!(route_payment(&nodes[1], &[&nodes[3]], 1000000).0, payment_preimage); *nodes[0].network_payment_count.borrow_mut() -= 1; - assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage); + assert_eq!(route_payment(&nodes[2], &[&nodes[3], &nodes[5]], 1000000).0, payment_preimage); - claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage); - fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash); - claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage); + claim_payment(&nodes[0], &[&nodes[3], &nodes[4]], payment_preimage); + fail_payment(&nodes[2], &[&nodes[3], &nodes[5]], payment_hash); + claim_payment(&nodes[1], &[&nodes[3]], payment_preimage); } #[xtest(feature = "_externalize_tests")] @@ -471,9 +487,9 @@ pub fn test_duplicate_htlc_different_direction_onchain() { let payment_value_msats = payment_value_sats * 1000; // balancing - send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1]], 8000000); - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000); + let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 900_000); let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_value_msats); let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap(); @@ -534,7 +550,7 @@ pub fn test_duplicate_htlc_different_direction_onchain() { assert_eq!(node_id, node_b_id); assert_eq!(msg.as_ref().unwrap().data, "Channel closed because commitment or closing transaction was confirmed on chain."); }, - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. }, .. } => { assert!(update_add_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); @@ -640,8 +656,8 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); - expect_payment_failed_conditions(&nodes[0], payment_hash, false, - PaymentFailedConditions::new().blamed_chan_closed(true)); + let conditions = PaymentFailedConditions::new().blamed_chan_closed(true); + expect_payment_failed_conditions(&nodes[0], payment_hash, false, conditions); // Make sure we handle possible duplicate fails or extra messages after failing back match post_fail_back_action { @@ -667,7 +683,8 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac connect_blocks(&nodes[2], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2); let node_2_txn = test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::SUCCESS); check_closed_broadcast!(nodes[2], true); - check_closed_event(&nodes[2], 1, ClosureReason::HTLCsTimedOut, false, &[node_b_id], 100_000); + let reason = ClosureReason::HTLCsTimedOut; + check_closed_event(&nodes[2], 1, reason, false, &[node_b_id], 100_000); check_added_monitors!(nodes[2], 1); mine_transaction(&nodes[1], &node_2_txn[0]); // Commitment @@ -732,17 +749,18 @@ pub fn channel_monitor_network_test() { connect_blocks(&nodes[4], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[4].best_block_info().1); // Rebalance the network a bit by relaying one payment through all the channels... - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000); - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000); - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000); - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3], &nodes[4]], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3], &nodes[4]], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3], &nodes[4]], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3], &nodes[4]], 8000000); // Simple case with no pending HTLCs: - let error_message = "Channel force-closed"; - nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &node_a_id, error_message.to_string()).unwrap(); + let err = "Channel force-closed".to_string(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &node_a_id, err).unwrap(); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [node_a_id], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); { let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE); assert_eq!(node_txn.len(), 1); @@ -780,7 +798,8 @@ pub fn channel_monitor_network_test() { check_closed_broadcast!(nodes[2], true); assert_eq!(nodes[1].node.list_channels().len(), 0); assert_eq!(nodes[2].node.list_channels().len(), 1); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [node_c_id], 100000); + let node_b_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[1], 1, node_b_reason, [node_c_id], 100000); check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); macro_rules! claim_funds { @@ -806,8 +825,8 @@ pub fn channel_monitor_network_test() { // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2] // HTLC-Timeout and a nodes[3] claim against it (+ its own announces) - let error_message = "Channel force-closed"; - nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &node_d_id, error_message.to_string()).unwrap(); + let err = "Channel force-closed".to_string(); + nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &node_d_id, err).unwrap(); check_added_monitors!(nodes[2], 1); check_closed_broadcast!(nodes[2], true); let node2_commitment_txid; @@ -826,7 +845,8 @@ pub fn channel_monitor_network_test() { check_closed_broadcast!(nodes[3], true); assert_eq!(nodes[2].node.list_channels().len(), 0); assert_eq!(nodes[3].node.list_channels().len(), 1); - check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [node_d_id], 100000); + let node_c_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[2], 1, node_c_reason, [node_d_id], 100000); check_closed_event!(nodes[3], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); // Drop the ChannelMonitor for the previous channel to avoid it broadcasting transactions and @@ -933,7 +953,7 @@ pub fn test_justice_tx_htlc_timeout() { let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1); // A pending HTLC which will be revoked: - let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let payment_preimage_3 = route_payment(&nodes[0], &[&nodes[1]], 3000000).0; // Get the will-be-revoked local txn from nodes[0] let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_5.2); assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx @@ -944,7 +964,7 @@ pub fn test_justice_tx_htlc_timeout() { assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].compute_txid()); assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout // Revoke the old state - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); { mine_transaction(&nodes[1], &revoked_local_txn[0]); @@ -964,6 +984,7 @@ pub fn test_justice_tx_htlc_timeout() { mine_transaction(&nodes[0], &revoked_local_txn[0]); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires + // Verify broadcast of revoked HTLC-timeout let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT); check_added_monitors!(nodes[0], 1); @@ -1004,15 +1025,16 @@ pub fn test_justice_tx_htlc_success() { let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1); // A pending HTLC which will be revoked: - let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let payment_preimage_4 = route_payment(&nodes[0], &[&nodes[1]], 3000000).0; // Get the will-be-revoked local txn from B let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_6.2); assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx assert_eq!(revoked_local_txn[0].input.len(), 1); assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.compute_txid()); assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present + // Revoke the old state - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_4); { mine_transaction(&nodes[0], &revoked_local_txn[0]); { @@ -1058,7 +1080,7 @@ pub fn revoked_output_claim() { // Only output is the full channel value back to nodes[0]: assert_eq!(revoked_local_txn[0].output.len(), 1); // Send a payment through, updating everyone's latest commitment txn - send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000); + send_payment(&nodes[0], &[&nodes[1]], 5000000); // Inform nodes[1] that nodes[0] broadcast a stale tx mine_transaction(&nodes[1], &revoked_local_txn[0]); @@ -1103,7 +1125,7 @@ fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: if !broadcast_initial_commitment { // Send a payment to move the channel forward - send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000); + send_payment(&nodes[0], &[&nodes[1]], 5_000_000); } // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output. @@ -1113,7 +1135,7 @@ fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: let revoked_commitment_tx = &revoked_local_txn[0]; // Send another payment, now revoking the previous commitment tx - send_payment(&nodes[0], &vec!(&nodes[1])[..], 5_000_000); + send_payment(&nodes[0], &[&nodes[1]], 5_000_000); let justice_tx = persisters[1].justice_tx(channel_id, &revoked_commitment_tx.compute_txid()).unwrap(); check_spends!(justice_tx, revoked_commitment_tx); @@ -1122,13 +1144,13 @@ fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: mine_transactions(&nodes[0], &[revoked_commitment_tx, &justice_tx]); check_added_monitors!(nodes[1], 1); - check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, - &[node_a_id], 100_000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100_000); get_announce_close_broadcast_events(&nodes, 1, 0); check_added_monitors!(nodes[0], 1); - check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false, - &[node_b_id], 100_000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100_000); // Check that the justice tx has sent the revoked output value to nodes[1] let monitor = get_monitor!(nodes[1], channel_id); @@ -1177,7 +1199,7 @@ pub fn claim_htlc_outputs() { check_spends!(revoked_local_txn[1], revoked_local_txn[0]); // Revoke the old state. - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); { mine_transaction(&nodes[0], &revoked_local_txn[0]); @@ -1234,8 +1256,8 @@ pub fn test_multiple_package_conflicts() { user_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; user_cfg.manually_accept_inbound_channels = true; - let node_chanmgrs = - create_node_chanmgrs(3, &node_cfgs, &[Some(user_cfg.clone()), Some(user_cfg.clone()), Some(user_cfg)]); + let configs = [Some(user_cfg.clone()), Some(user_cfg.clone()), Some(user_cfg)]; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -1516,8 +1538,8 @@ pub fn test_htlc_on_chain_success() { connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1); // Rebalance the network a bit by relaying one payment through all the channels... - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 8000000); let (our_payment_preimage, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); let (our_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); @@ -1554,7 +1576,8 @@ pub fn test_htlc_on_chain_success() { assert_eq!(node_txn[1].lock_time, LockTime::ZERO); // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward - connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()])); + let txn = vec![commitment_tx[0].clone(), node_txn[0].clone(), node_txn[1].clone()]; + connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, txn)); connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); @@ -1607,12 +1630,12 @@ pub fn test_htlc_on_chain_success() { let nodes_0_event = remove_first_msg_event_to_node(&node_a_id, &mut events); match nodes_2_event { - MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {}, + MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, .. } => {}, _ => panic!("Unexpected event"), } match nodes_0_event { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. }, .. } => { assert!(update_add_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); @@ -1688,11 +1711,13 @@ pub fn test_htlc_on_chain_success() { assert_eq!(commitment_spend.input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); assert_eq!(commitment_spend.lock_time.to_consensus_u32(), nodes[1].best_block_info().1); assert!(commitment_spend.output[0].script_pubkey.is_p2wpkh()); // direct payment + // We don't bother to check that B can claim the HTLC output on its commitment tx here as // we already checked the same situation with A. // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent - connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()])); + let txn = vec![node_a_commitment_tx[0].clone(), commitment_spend.clone()]; + connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, txn)); connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); @@ -1745,10 +1770,10 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); // Rebalance the network a bit by relaying one payment thorugh all the channels... - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 8000000); - let (_payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000); + let (_payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); // Broadcast legit commitment tx from C on B's chain let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2); @@ -1761,7 +1786,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. }, .. } => { assert!(update_add_htlcs.is_empty()); assert!(!update_fail_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); @@ -1780,8 +1805,8 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence mine_transaction(&nodes[1], &commitment_tx[0]); - check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, - &[node_c_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_c_id], 100000); let htlc_expiry = get_monitor!(nodes[1], chan_2.2).get_claimable_balances().iter().filter_map(|bal| if let Balance::MaybeTimeoutClaimableHTLC { claimable_height, .. } = bal { Some(*claimable_height) @@ -1815,7 +1840,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. }, .. } => { assert!(update_add_htlcs.is_empty()); assert!(!update_fail_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); @@ -1885,7 +1910,7 @@ pub fn test_simple_commitment_revoked_fail_backward() { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. }, .. } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 1); assert!(update_fulfill_htlcs.is_empty()); @@ -1894,7 +1919,8 @@ pub fn test_simple_commitment_revoked_fail_backward() { nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true); - expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_2.0.contents.short_channel_id, true); + let scid = chan_2.0.contents.short_channel_id; + expect_payment_failed_with_update!(nodes[0], payment_hash, false, scid, true); }, _ => panic!("Unexpected event"), } @@ -1929,7 +1955,8 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 }); + let amt = if no_to_remote { 10_000 } else { 3_000_000 }; + let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], amt); // Get the will-be-revoked local txn from nodes[2] let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2); assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 }); @@ -1939,8 +1966,11 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let value = if use_dust { // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as // well, so HTLCs at exactly the dust limit will not be included in commitment txn. - nodes[2].node.per_peer_state.read().unwrap().get(&node_b_id) - .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context().holder_dust_limit_satoshis * 1000 + let per_peer_state_lock; + let mut peer_state_lock; + let chan = + get_channel_ref!(nodes[2], nodes[1], per_peer_state_lock, peer_state_lock, chan_2.2); + chan.context().holder_dust_limit_satoshis * 1000 } else { 3000000 }; let (_, first_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); @@ -1957,7 +1987,8 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fee.is_none()); nodes[1].node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[0]); - let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true); + let cs = updates.commitment_signed; + let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], cs, false, true, false, true); // Drop the last RAA from 3 -> 2 nodes[2].node.fail_htlc_backwards(&second_payment_hash); @@ -1999,8 +2030,9 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting // on nodes[2]'s RAA. let (route, fourth_payment_hash, _, fourth_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 1000000); - nodes[1].node.send_payment_with_route(route, fourth_payment_hash, - RecipientOnionFields::secret_only(fourth_payment_secret), PaymentId(fourth_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(fourth_payment_secret); + let id = PaymentId(fourth_payment_hash.0); + nodes[1].node.send_payment_with_route(route, fourth_payment_hash, onion, id).unwrap(); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); check_added_monitors!(nodes[1], 0); @@ -2055,7 +2087,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use if deliver_bs_raa { let nodes_2_event = remove_first_msg_event_to_node(&node_c_id, &mut events); match nodes_2_event { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. }, .. } => { assert_eq!(node_c_id, *node_id); assert_eq!(update_add_htlcs.len(), 1); assert!(update_fulfill_htlcs.is_empty()); @@ -2068,7 +2100,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let nodes_2_event = remove_first_msg_event_to_node(&node_c_id, &mut events); match nodes_2_event { - MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { channel_id, ref data }) }, node_id: _ } => { + MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { channel_id, ref data }) }, .. } => { assert_eq!(channel_id, chan_2.2); assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain."); }, @@ -2077,7 +2109,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let nodes_0_event = remove_first_msg_event_to_node(&node_a_id, &mut events); match nodes_0_event { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. }, .. } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 3); assert!(update_fulfill_htlcs.is_empty()); @@ -2178,8 +2210,9 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { // Alice -> Bob: Route a payment but without Bob sending revoke_and_ack. { let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000); - nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let payment_event = { @@ -2194,8 +2227,9 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { // Alice -> Bob: Route another payment but now Alice waits for Bob's earlier revoke_and_ack. let (route, failed_payment_hash, _, failed_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000); { - nodes[0].node.send_payment_with_route(route, failed_payment_hash, - RecipientOnionFields::secret_only(failed_payment_secret), PaymentId(failed_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(failed_payment_secret); + let id = PaymentId(failed_payment_hash.0); + nodes[0].node.send_payment_with_route(route, failed_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -2277,7 +2311,8 @@ pub fn test_htlc_ignore_latest_remote_commitment() { connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [node_b_id], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); let node_txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); assert_eq!(node_txn.len(), 2); @@ -2313,8 +2348,9 @@ pub fn test_force_close_fail_back() { let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); let mut payment_event = { - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -2346,7 +2382,9 @@ pub fn test_force_close_fail_back() { nodes[2].node.force_close_broadcasting_latest_txn(&channel_id, &node_b_id, error_message.to_string()).unwrap(); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); - check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [node_b_id], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[2], 1, reason, [node_b_id], 100000); + let commitment_tx = { let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't @@ -2491,19 +2529,19 @@ pub fn test_simple_peer_disconnect() { reconnect_args.send_channel_ready = (true, true); reconnect_nodes(reconnect_args); - let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; - let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; - fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2); - claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1); + let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000).0; + let payment_hash_2 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000).1; + fail_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_hash_2); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_1); nodes[0].node.peer_disconnected(node_b_id); nodes[1].node.peer_disconnected(node_a_id); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - let (payment_preimage_3, payment_hash_3, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000); - let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; - let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; - let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; + let (payment_preimage_3, payment_hash_3, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); + let payment_preimage_4 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000).0; + let payment_hash_5 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000).1; + let payment_hash_6 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000).1; nodes[0].node.peer_disconnected(node_b_id); nodes[1].node.peer_disconnected(node_a_id); @@ -2548,8 +2586,8 @@ pub fn test_simple_peer_disconnect() { } check_added_monitors(&nodes[0], 1); - claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4); - fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_4); + fail_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_hash_6); } fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken_lnd: bool) { @@ -2578,8 +2616,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000); let payment_event = { - nodes[0].node.send_payment_with_route(route, payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret_1); + let id = PaymentId(payment_hash_1.0); + nodes[0].node.send_payment_with_route(route, payment_hash_1, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -2949,8 +2988,9 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { // Now try to send a second payment which will fail to send let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret_2); + let id = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let events_1 = nodes[0].node.get_and_clear_pending_msg_events(); @@ -2967,7 +3007,7 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); match events_2[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed }, .. } => { assert_eq!(*node_id, node_a_id); assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); @@ -2997,14 +3037,16 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { nodes[0].node.peer_disconnected(node_b_id); nodes[1].node.peer_disconnected(node_a_id); - nodes[0].node.peer_connected(node_b_id, &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(node_a_id, &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); @@ -3109,8 +3151,10 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { // indicates there are more HTLCs coming. let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match. let payment_id = PaymentId([42; 32]); - let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, - RecipientOnionFields::secret_only(payment_secret), payment_id, &route).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let session_privs = nodes[0].node + .test_add_new_pending_payment(our_payment_hash, onion, payment_id, &route).unwrap(); + nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash, RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap(); @@ -3149,7 +3193,8 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { // 100_000 msat as u64, followed by the height at which we failed back above let mut expected_failure_data = (100_000 as u64).to_be_bytes().to_vec(); expected_failure_data.extend_from_slice(&(block_count - 1).to_be_bytes()); - expect_payment_failed!(nodes[0], our_payment_hash, true, LocalHTLCFailureReason::IncorrectPaymentDetails, &expected_failure_data[..]); + let reason = LocalHTLCFailureReason::IncorrectPaymentDetails; + expect_payment_failed!(nodes[0], our_payment_hash, true, reason, &expected_failure_data[..]); } #[xtest(feature = "_externalize_tests")] @@ -3179,16 +3224,19 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { // Route a first payment to get the 1 -> 2 channel in awaiting_raa... let (route, first_payment_hash, _, first_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000); - nodes[1].node.send_payment_with_route(route, first_payment_hash, - RecipientOnionFields::secret_only(first_payment_secret), PaymentId(first_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(first_payment_secret); + let id = PaymentId(first_payment_hash.0); + nodes[1].node.send_payment_with_route(route, first_payment_hash, onion, id).unwrap(); assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1); check_added_monitors!(nodes[1], 1); // Now attempt to route a second payment, which should be placed in the holding cell let sending_node = if forwarded_htlc { &nodes[0] } else { &nodes[1] }; let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(sending_node, nodes[2], 100000); - sending_node.node.send_payment_with_route(route, second_payment_hash, - RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(second_payment_secret); + let id = PaymentId(second_payment_hash.0); + sending_node.node.send_payment_with_route(route, second_payment_hash, onion, id).unwrap(); + if forwarded_htlc { check_added_monitors!(nodes[0], 1); let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -3204,7 +3252,9 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { connect_blocks(&nodes[1], 1); if forwarded_htlc { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }]); + let fail_type = + HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); check_added_monitors!(nodes[1], 1); let fail_commit = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(fail_commit.len(), 1); @@ -3215,7 +3265,8 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { }, _ => unreachable!(), } - expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, chan_2.0.contents.short_channel_id, false); + let scid = chan_2.0.contents.short_channel_id; + expect_payment_failed_with_update!(nodes[0], second_payment_hash, false, scid, false); } else { expect_payment_failed!(nodes[1], second_payment_hash, false); } @@ -3238,7 +3289,9 @@ macro_rules! check_spendable_outputs { match event { Event::SpendableOutputs { mut outputs, channel_id: _ } => { for outp in outputs.drain(..) { - txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx).unwrap()); + let script = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(); + let tx = $keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), script, 253, None, &secp_ctx); + txn.push(tx.unwrap()); all_outputs.push(outp); } }, @@ -3266,11 +3319,13 @@ pub fn test_claim_sizeable_push_msat() { let node_a_id = nodes[0].node.get_our_node_id(); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000); - let error_message = "Channel force-closed"; - nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &node_a_id, error_message.to_string()).unwrap(); + let err = "Channel force-closed".to_string(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &node_a_id, err).unwrap(); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [node_a_id], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], chan.3); @@ -3298,13 +3353,14 @@ pub fn test_claim_on_remote_sizeable_push_msat() { let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - let error_message = "Channel force-closed"; + let err = "Channel force-closed".to_string(); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000); - nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &node_b_id, error_message.to_string()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &node_b_id, err).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [node_b_id], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 1); @@ -3335,12 +3391,12 @@ pub fn test_claim_on_remote_revoked_sizeable_push_msat() { let node_a_id = nodes[0].node.get_our_node_id(); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000); - let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3000000).0; let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2); assert_eq!(revoked_local_txn[0].input.len(), 1); assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.compute_txid()); - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); @@ -3419,9 +3475,9 @@ pub fn test_static_spendable_outputs_timeout_tx() { let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); // Rebalance the network a bit by relaying one payment through all the channels ... - send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1]], 8000000); - let (_, our_payment_hash, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000); + let (_, our_payment_hash, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000); let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2); assert_eq!(commitment_tx[0].input.len(), 1); @@ -3440,7 +3496,7 @@ pub fn test_static_spendable_outputs_timeout_tx() { // Check B's monitor was able to send back output descriptor event for timeout tx on A's commitment tx let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 1); // ChannelMonitor: timeout tx - check_spends!(node_txn[0], commitment_tx[0].clone()); + check_spends!(node_txn[0], commitment_tx[0].clone()); assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); mine_transaction(&nodes[1], &node_txn[0]); @@ -3466,12 +3522,12 @@ fn do_test_static_spendable_outputs_justice_tx_revoked_commitment_tx(split_tx: b // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3000000).0; let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2); assert_eq!(revoked_local_txn[0].input.len(), 1); assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.compute_txid()); - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); if split_tx { connect_blocks(&nodes[1], TEST_FINAL_CLTV - COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE + 1); @@ -3524,12 +3580,12 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3000000).0; let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2); assert_eq!(revoked_local_txn[0].input.len(), 1); assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.compute_txid()); - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); // A will generate HTLC-Timeout from revoked commitment tx mine_transaction(&nodes[0], &revoked_local_txn[0]); @@ -3550,7 +3606,8 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { // locktime. connect_blocks(&nodes[1], TEST_FINAL_CLTV); // B will generate justice tx from A's revoked commitment/HTLC tx - connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()])); + let txn = vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]; + connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, txn)); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); @@ -3597,7 +3654,7 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3000000).0; let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan_1.2); assert_eq!(revoked_local_txn[0].input.len(), 1); assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.compute_txid()); @@ -3605,7 +3662,7 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { // The to-be-revoked commitment tx should have one HTLC and one to_remote output assert_eq!(revoked_local_txn[0].output.len(), 2); - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); // B will generate HTLC-Success from revoked commitment tx mine_transaction(&nodes[1], &revoked_local_txn[0]); @@ -3624,7 +3681,8 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { assert_eq!(revoked_local_txn[0].output[unspent_local_txn_output].script_pubkey.len(), 2 + 20); // P2WPKH // A will generate justice tx from B's revoked commitment/HTLC tx - connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()])); + let txn = vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]; + connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, txn)); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); @@ -3687,8 +3745,8 @@ pub fn test_onchain_to_onchain_claim() { connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1); // Rebalance the network a bit by relaying one payment through all the channels ... - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); - send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 8000000); + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 8000000); let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2); @@ -3715,7 +3773,8 @@ pub fn test_onchain_to_onchain_claim() { assert_eq!(c_txn[0].lock_time, LockTime::ZERO); // Success tx // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor - connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![commitment_tx[0].clone(), c_txn[0].clone()])); + let txn = vec![commitment_tx[0].clone(), c_txn[0].clone()]; + connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, txn)); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); @@ -3747,7 +3806,7 @@ pub fn test_onchain_to_onchain_claim() { } match nodes_0_event { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => { + MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. }, .. } => { assert!(update_add_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); @@ -3791,8 +3850,15 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { // It is now fixed, so we simply set the fee to the expected value here. let mut config = test_default_channel_config(); config.channel_config.forwarding_fee_base_msat = 196; - let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, - &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]); + + let configs = [ + Some(config.clone()), + Some(config.clone()), + Some(config.clone()), + Some(config.clone()), + Some(config.clone()), + ]; + let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &configs); let mut nodes = create_network(5, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3813,13 +3879,14 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { connect_blocks(&nodes[3], node_max_height * 2 - nodes[3].best_block_info().1); connect_blocks(&nodes[4], node_max_height * 2 - nodes[4].best_block_info().1); - let (our_payment_preimage, duplicate_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], 900_000); + let (our_payment_preimage, dup_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], 900_000); - let payment_secret = nodes[4].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, None).unwrap(); + let payment_secret = nodes[4].node.create_inbound_payment_for_hash(dup_payment_hash, None, 7200, None).unwrap(); let payment_params = PaymentParameters::from_node_id(node_e_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[4].node.bolt11_invoice_features()).unwrap(); let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[4], payment_params, 800_000); - send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[2], &nodes[4]]], 800_000, duplicate_payment_hash, payment_secret); + let path: &[&[_]] = &[&[&nodes[1], &nodes[2], &nodes[4]]]; + send_along_route_with_secret(&nodes[0], route, path, 800_000, dup_payment_hash, payment_secret); // Now mine C's commitment transaction on node B and mine enough blocks to get the HTLC timeout // transaction (which we'll split in two so that we can resolve the HTLCs differently). @@ -3862,7 +3929,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { // Now give node E the payment preimage and pass it back to C. nodes[4].node.claim_funds(our_payment_preimage); - expect_payment_claimed!(nodes[4], duplicate_payment_hash, 800_000); + expect_payment_claimed!(nodes[4], dup_payment_hash, 800_000); check_added_monitors!(nodes[4], 1); let updates = get_htlc_update_msgs!(nodes[4], node_c_id); nodes[2].node.handle_update_fulfill_htlc(node_e_id, &updates.update_fulfill_htlcs[0]); @@ -3914,7 +3981,8 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_updates.update_fail_htlcs[0]); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true); - expect_payment_failed_with_update!(nodes[0], duplicate_payment_hash, false, chan_2.0.contents.short_channel_id, true); + let failing_scid = chan_2.0.contents.short_channel_id; + expect_payment_failed_with_update!(nodes[0], dup_payment_hash, false, failing_scid, true); // Finally, give node B the HTLC success transaction and ensure it extracts the preimage to // provide to node A. @@ -4007,8 +4075,16 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno // It is now fixed, so we simply set the fee to the expected value here. let mut config = test_default_channel_config(); config.channel_config.forwarding_fee_base_msat = 196; - let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, - &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]); + + let configs = [ + Some(config.clone()), + Some(config.clone()), + Some(config.clone()), + Some(config.clone()), + Some(config.clone()), + Some(config.clone()), + ]; + let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &configs); let nodes = create_network(6, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -4029,38 +4105,62 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000); assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2); - let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&node_c_id) - .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context().holder_dust_limit_satoshis; - // 0th HTLC: - let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee - // 1st HTLC: - let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee + let ds_dust_limit = { + let per_peer_state_lock; + let mut peer_state_lock; + let chan = + get_channel_ref!(nodes[3], nodes[2], per_peer_state_lock, peer_state_lock, chan_2_3.2); + chan.context().holder_dust_limit_satoshis + }; + + // 0th HTLC (not added - smaller than dust limit + HTLC tx fee): + let path_4: &[_] = &[&nodes[2], &nodes[3], &nodes[4]]; + let (_, hash_1, ..) = route_payment(&nodes[0], path_4, ds_dust_limit*1000); + + // 1st HTLC (not added - smaller than dust limit + HTLC tx fee): + let (_, hash_2, ..) = route_payment(&nodes[0], path_4, ds_dust_limit*1000); let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000); - // 2nd HTLC: - send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee - // 3rd HTLC: - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee + + // 2nd HTLC (not added - smaller than dust limit + HTLC tx fee): + let path_5: &[&[_]] = &[&[&nodes[2], &nodes[3], &nodes[5]]]; + let secret = nodes[5].node.create_inbound_payment_for_hash(hash_1, None, 7200, None).unwrap(); + let route_2 = route.clone(); + send_along_route_with_secret(&nodes[1], route_2, path_5, ds_dust_limit*1000, hash_1, secret); + + // 3rd HTLC (not added - smaller than dust limit + HTLC tx fee): + let secret = nodes[5].node.create_inbound_payment_for_hash(hash_2, None, 7200, None).unwrap(); + send_along_route_with_secret(&nodes[1], route, path_5, ds_dust_limit*1000, hash_2, secret); + // 4th HTLC: - let (_, payment_hash_3, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000); + let (_, hash_3, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000); + // 5th HTLC: - let (_, payment_hash_4, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000); + let (_, hash_4, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000); let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000); + // 6th HTLC: - send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, None).unwrap()); + let secret = nodes[5].node.create_inbound_payment_for_hash(hash_3, None, 7200, None).unwrap(); + send_along_route_with_secret(&nodes[1], route.clone(), path_5, 1000000, hash_3, secret); + // 7th HTLC: - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, None).unwrap()); + let secret = nodes[5].node.create_inbound_payment_for_hash(hash_4, None, 7200, None).unwrap(); + send_along_route_with_secret(&nodes[1], route, path_5, 1000000, hash_4, secret); // 8th HTLC: - let (_, payment_hash_5, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000); - // 9th HTLC: + let (_, hash_5, ..) = route_payment(&nodes[0], path_4, 1000000); + + // 9th HTLC (not added - smaller than dust limit + HTLC tx fee): let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000); - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee + let secret = nodes[5].node.create_inbound_payment_for_hash(hash_5, None, 7200, None).unwrap(); + send_along_route_with_secret(&nodes[1], route, path_5, ds_dust_limit*1000, hash_5, secret); + + // 10th HTLC (not added - smaller than dust limit + HTLC tx fee): + let (_, hash_6, ..) = route_payment(&nodes[0], path_4, ds_dust_limit*1000); - // 10th HTLC: - let (_, payment_hash_6, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee // 11th HTLC: let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000); - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, None).unwrap()); + let secret = nodes[5].node.create_inbound_payment_for_hash(hash_6, None, 7200, None).unwrap(); + send_along_route_with_secret(&nodes[1], route, path_5, 1000000, hash_6, secret); // Double-check that six of the new HTLC were added // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie, @@ -4070,17 +4170,17 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go. // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs - nodes[4].node.fail_htlc_backwards(&payment_hash_1); - nodes[4].node.fail_htlc_backwards(&payment_hash_3); - nodes[4].node.fail_htlc_backwards(&payment_hash_5); - nodes[4].node.fail_htlc_backwards(&payment_hash_6); + nodes[4].node.fail_htlc_backwards(&hash_1); + nodes[4].node.fail_htlc_backwards(&hash_3); + nodes[4].node.fail_htlc_backwards(&hash_5); + nodes[4].node.fail_htlc_backwards(&hash_6); check_added_monitors!(nodes[4], 0); let failed_destinations = vec![ - HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }, - HTLCHandlingFailureType::Receive { payment_hash: payment_hash_3 }, - HTLCHandlingFailureType::Receive { payment_hash: payment_hash_5 }, - HTLCHandlingFailureType::Receive { payment_hash: payment_hash_6 }, + HTLCHandlingFailureType::Receive { payment_hash: hash_1 }, + HTLCHandlingFailureType::Receive { payment_hash: hash_3 }, + HTLCHandlingFailureType::Receive { payment_hash: hash_5 }, + HTLCHandlingFailureType::Receive { payment_hash: hash_6 }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations); check_added_monitors!(nodes[4], 1); @@ -4093,13 +4193,13 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false); // Fail 3rd below-dust and 7th above-dust HTLCs - nodes[5].node.fail_htlc_backwards(&payment_hash_2); - nodes[5].node.fail_htlc_backwards(&payment_hash_4); + nodes[5].node.fail_htlc_backwards(&hash_2); + nodes[5].node.fail_htlc_backwards(&hash_4); check_added_monitors!(nodes[5], 0); let failed_destinations_2 = vec![ - HTLCHandlingFailureType::Receive { payment_hash: payment_hash_2 }, - HTLCHandlingFailureType::Receive { payment_hash: payment_hash_4 }, + HTLCHandlingFailureType::Receive { payment_hash: hash_2 }, + HTLCHandlingFailureType::Receive { payment_hash: hash_4 }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2); check_added_monitors!(nodes[5], 1); @@ -4132,7 +4232,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno if deliver_last_raa { commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false); } else { - let _cs_last_raa = commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false, true, false, true); + let cs = six_removes.commitment_signed; + commitment_signed_dance!(nodes[2], nodes[3], cs, false, true, false, true); } // D's latest commitment transaction now contains 1st + 2nd + 9th HTLCs (implicitly, they're @@ -4189,7 +4290,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let mut a_done = false; for msg in cs_msgs { match msg { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { + MessageSendEvent::UpdateHTLCs { ref node_id, ref updates, .. } => { // Both under-dust HTLCs and the one above-dust HTLC that we had already failed // should be failed-backwards here. let target = if *node_id == node_a_id { @@ -4232,7 +4333,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno for event in as_events.iter() { if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event { assert!(as_faileds.insert(*payment_hash)); - if *payment_hash != payment_hash_2 { + if *payment_hash != hash_2 { assert_eq!(*payment_failed_permanently, deliver_last_raa); } else { assert!(!payment_failed_permanently); @@ -4243,13 +4344,13 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno } else if let &Event::PaymentFailed { .. } = event { } else { panic!("Unexpected event"); } } - assert!(as_faileds.contains(&payment_hash_1)); - assert!(as_faileds.contains(&payment_hash_2)); + assert!(as_faileds.contains(&hash_1)); + assert!(as_faileds.contains(&hash_2)); if announce_latest { - assert!(as_faileds.contains(&payment_hash_3)); - assert!(as_faileds.contains(&payment_hash_5)); + assert!(as_faileds.contains(&hash_3)); + assert!(as_faileds.contains(&hash_5)); } - assert!(as_faileds.contains(&payment_hash_6)); + assert!(as_faileds.contains(&hash_6)); let bs_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(bs_events.len(), if announce_latest { 8 } else { 6 }); @@ -4258,7 +4359,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno for event in bs_events.iter() { if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event { assert!(bs_faileds.insert(*payment_hash)); - if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 { + if *payment_hash != hash_1 && *payment_hash != hash_5 { assert_eq!(*payment_failed_permanently, deliver_last_raa); } else { assert!(!payment_failed_permanently); @@ -4269,12 +4370,12 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno } else if let &Event::PaymentFailed { .. } = event { } else { panic!("Unexpected event"); } } - assert!(bs_faileds.contains(&payment_hash_1)); - assert!(bs_faileds.contains(&payment_hash_2)); + assert!(bs_faileds.contains(&hash_1)); + assert!(bs_faileds.contains(&hash_2)); if announce_latest { - assert!(bs_faileds.contains(&payment_hash_4)); + assert!(bs_faileds.contains(&hash_4)); } - assert!(bs_faileds.contains(&payment_hash_5)); + assert!(bs_faileds.contains(&hash_5)); // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should // get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to @@ -4313,7 +4414,7 @@ pub fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { // Create some initial channels let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000); + let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9000000); let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2); assert_eq!(local_txn[0].input.len(), 1); check_spends!(local_txn[0], chan_1.3); @@ -4391,7 +4492,7 @@ pub fn test_key_derivation_params() { connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1); - let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000); + let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9000000); let local_txn_0 = get_local_commitment_txn!(nodes[0], chan_0.2); let local_txn_1 = get_local_commitment_txn!(nodes[0], chan_1.2); assert_eq!(local_txn_1[0].input.len(), 1); @@ -4453,11 +4554,12 @@ pub fn test_static_output_closing_tx() { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1]], 8000000); let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2; mine_transaction(&nodes[0], &closing_tx); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [node_b_id], 100000); + let reason = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); @@ -4465,7 +4567,8 @@ pub fn test_static_output_closing_tx() { check_spends!(spend_txn[0], closing_tx); mine_transaction(&nodes[1], &closing_tx); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [node_a_id], 100000); + let reason = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -4508,7 +4611,8 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { connect_block(&nodes[1], &block); block.header.prev_blockhash = block.block_hash(); } - test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS }); + let htlc_type = if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS }; + test_txn_broadcast(&nodes[1], &chan, None, htlc_type); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [node_a_id], 100000); @@ -4525,8 +4629,9 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if use_dust { 50000 } else { 3000000 }); - nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let _as_update = get_htlc_update_msgs!(nodes[0], node_b_id); @@ -4696,9 +4801,9 @@ pub fn bolt2_open_channel_sane_dust_limit() { let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - let channel_value_satoshis=1000000; + let value_sats = 1000000; let push_msat=10001; - nodes[0].node.create_channel(node_b_id, channel_value_satoshis, push_msat, 42, None, None).unwrap(); + nodes[0].node.create_channel(node_b_id, value_sats, push_msat, 42, None, None).unwrap(); let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); node0_to_1_send_open_channel.common_fields.dust_limit_satoshis = 547; node0_to_1_send_open_channel.channel_reserve_satoshis = 100001; @@ -4706,7 +4811,7 @@ pub fn bolt2_open_channel_sane_dust_limit() { nodes[1].node.handle_open_channel(node_a_id, &node0_to_1_send_open_channel); let events = nodes[1].node.get_and_clear_pending_msg_events(); let err_msg = match events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, .. } => { msg.clone() }, _ => panic!("Unexpected event"), @@ -4760,8 +4865,9 @@ pub fn test_fail_holding_cell_htlc_upon_free() { let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); // Send a payment which passes reserve checks but gets stuck in the holding cell. - nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, onion, id).unwrap(); chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); @@ -4846,13 +4952,15 @@ pub fn test_free_and_fail_holding_cell_htlcs() { let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_2); // Send 2 payments which pass reserve checks but get stuck in the holding cell. - nodes[0].node.send_payment_with_route(route_1, payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret_1); + let id_1 = PaymentId(payment_hash_1.0); + nodes[0].node.send_payment_with_route(route_1, payment_hash_1, onion, id_1).unwrap(); chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1); - let payment_id_2 = PaymentId(nodes[0].keys_manager.get_secure_random_bytes()); - nodes[0].node.send_payment_with_route(route_2.clone(), payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), payment_id_2).unwrap(); + + let id_2 = PaymentId(nodes[0].keys_manager.get_secure_random_bytes()); + let onion = RecipientOnionFields::secret_only(payment_secret_2); + nodes[0].node.send_payment_with_route(route_2.clone(), payment_hash_2, onion, id_2).unwrap(); chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2); @@ -4876,7 +4984,7 @@ pub fn test_free_and_fail_holding_cell_htlcs() { assert_eq!(events.len(), 2); match &events[0] { &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => { - assert_eq!(payment_id_2, *payment_id.as_ref().unwrap()); + assert_eq!(id_2, *payment_id.as_ref().unwrap()); assert_eq!(payment_hash_2.clone(), *payment_hash); assert_eq!(*payment_failed_permanently, false); assert_eq!(*short_channel_id, Some(route_2.paths[0].hops[0].short_channel_id)); @@ -4978,8 +5086,9 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send); let payment_event = { - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -5086,8 +5195,9 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ //First hop let mut payment_event = { - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -5116,7 +5226,7 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ assert_eq!(events_3.len(), 1); let update_msg : (msgs::UpdateFailMalformedHTLC, Vec) = { match events_3[0] { - MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed }, .. } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); @@ -5138,7 +5248,7 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ //Confirm that handlinge the update_malformed_htlc message produces an update_fail_htlc message to be forwarded back along the route match events_4[0] { - MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. }, .. } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 1); @@ -5169,8 +5279,9 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { // First hop let mut payment_event = { - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); SendEvent::from_node(&nodes[0]) }; @@ -5260,8 +5371,13 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&node_a_id) - .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis; + let bs_dust_limit = { + let per_peer_state_lock; + let mut peer_state_lock; + let chan = + get_channel_ref!(nodes[1], nodes[0], per_peer_state_lock, peer_state_lock, chan.2); + chan.context().holder_dust_limit_satoshis + }; // We route 2 dust-HTLCs between A and B let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000); @@ -5352,13 +5468,17 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&node_a_id) - .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis; + let bs_dust_limit = { + let per_peer_state_lock; + let mut peer_state_lock; + let chan = + get_channel_ref!(nodes[1], nodes[0], per_peer_state_lock, peer_state_lock, chan.2); + chan.context().holder_dust_limit_satoshis + }; let (_payment_preimage_1, dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000); let (_payment_preimage_2, non_dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000); @@ -5369,7 +5489,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { // We revoked bs_commitment_tx if revoked { let (payment_preimage_3, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000); - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); } let mut timeout_tx = Vec::new(); @@ -5476,7 +5596,9 @@ pub fn test_user_configurable_csv_delay() { // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel() nodes[0].node.create_channel(node_b_id, 1000000, 1000000, 42, None, None).unwrap(); - nodes[1].node.handle_open_channel(node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id)); + let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + nodes[1].node.handle_open_channel(node_a_id, &open_channel); + let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); accept_channel.common_fields.to_self_delay = 200; nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); @@ -5490,7 +5612,8 @@ pub fn test_user_configurable_csv_delay() { _ => { panic!(); } } } else { panic!(); } - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg }, [node_b_id], 1000000); + let reason = ClosureReason::ProcessingError { err: reason_msg }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 1000000); // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new() nodes[1].node.create_channel(node_a_id, 1000000, 1000000, 42, None, None).unwrap(); @@ -5534,10 +5657,12 @@ pub fn test_check_htlc_underpaying() { let route_params = RouteParameters::from_payment_params_and_value(payment_params, 10_000); let route = get_route(&node_a_id, &route_params, &nodes[0].network_graph.read_only(), None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap(); + let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]); let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap(); - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -5558,7 +5683,7 @@ pub fn test_check_htlc_underpaying() { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let (update_fail_htlc, commitment_signed) = match events[0] { - MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed }, .. } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 1); @@ -5576,7 +5701,8 @@ pub fn test_check_htlc_underpaying() { // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32 let mut expected_failure_data = (10_000 as u64).to_be_bytes().to_vec(); expected_failure_data.extend_from_slice(&CHAN_CONFIRM_DEPTH.to_be_bytes()); - expect_payment_failed!(nodes[0], our_payment_hash, true, LocalHTLCFailureReason::IncorrectPaymentDetails, &expected_failure_data[..]); + let reason = LocalHTLCFailureReason::IncorrectPaymentDetails; + expect_payment_failed!(nodes[0], our_payment_hash, true, reason, &expected_failure_data[..]); } #[xtest(feature = "_externalize_tests")] @@ -5622,14 +5748,15 @@ pub fn test_announce_disable_channels() { } } // Reconnect peers - nodes[0].node.peer_connected(node_b_id, &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 3); - nodes[1].node.peer_connected(node_a_id, &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 3); @@ -5688,11 +5815,11 @@ pub fn test_bump_penalty_txn_on_revoked_commitment() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); - let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; + let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3000000).0; let payment_params = PaymentParameters::from_node_id(node_a_id, TEST_FINAL_CLTV) .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap(); let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000); - send_along_route(&nodes[1], route, &vec!(&nodes[0])[..], 3000000); + send_along_route(&nodes[1], route, &[&nodes[0]], 3000000); let revoked_txn = get_local_commitment_txn!(nodes[0], chan.2); // Revoked commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC @@ -5704,7 +5831,7 @@ pub fn test_bump_penalty_txn_on_revoked_commitment() { let header_114 = connect_blocks(&nodes[1], 14); // Actually revoke tx by claiming a HTLC - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); connect_block(&nodes[1], &create_dummy_block(header_114, 42, vec![revoked_txn[0].clone()])); check_added_monitors!(nodes[1], 1); @@ -5806,7 +5933,7 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.compute_txid()); // Revoke local commitment tx - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()])); @@ -5951,7 +6078,7 @@ pub fn test_bump_penalty_txn_on_remote_commitment() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value_a_msats); - route_payment(&nodes[1], &vec!(&nodes[0])[..], htlc_value_b_msats); + route_payment(&nodes[1], &[&nodes[0]], htlc_value_b_msats); // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC let remote_txn = get_local_commitment_txn!(nodes[0], chan.2); @@ -5965,6 +6092,7 @@ pub fn test_bump_penalty_txn_on_remote_commitment() { mine_transaction(&nodes[1], &remote_txn[0]); check_added_monitors!(nodes[1], 2); connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires + // depending on the block connection style, node 1 may have broadcast either 3 or 10 txs remote_txn @@ -6081,22 +6209,25 @@ pub fn test_counterparty_raa_skip_no_crash() { keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1).unwrap(); keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; - next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(), - &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2).unwrap()).unwrap()); - } - - nodes[1].node.handle_revoke_and_ack(node_a_id, - &msgs::RevokeAndACK { - channel_id, - per_commitment_secret, - next_per_commitment_point, - #[cfg(taproot)] - next_local_nonce: None, - }); + let sec = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2).unwrap(); + let key = SecretKey::from_slice(&sec).unwrap(); + next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(), &key); + } + + let raa = msgs::RevokeAndACK { + channel_id, + per_commitment_secret, + next_per_commitment_point, + #[cfg(taproot)] + next_local_nonce: None, + }; + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack"); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() } - , [node_a_id], 100000); + let reason = ClosureReason::ProcessingError { + err: "Received an unexpected revoke_and_ack".to_string(), + }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -6113,15 +6244,15 @@ pub fn test_bump_txn_sanitize_tracking_maps() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); // Lock HTLC in both directions - let (payment_preimage_1, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000); - let (_, payment_hash_2, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000); + let (payment_preimage_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000); + let (_, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 9_000_000); let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2); assert_eq!(revoked_local_txn[0].input.len(), 1); assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.compute_txid()); // Revoke local commitment tx - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); // Broadcast set of revoked txn on A connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH); @@ -6258,7 +6389,13 @@ pub fn test_channel_update_has_correct_htlc_maximum_msat() { let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[Some(config_30_percent), Some(config_50_percent), Some(config_95_percent), Some(config_100_percent)]); + let configs = [ + Some(config_30_percent), + Some(config_50_percent), + Some(config_95_percent), + Some(config_100_percent), + ]; + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &configs); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); let channel_value_satoshis = 100000; @@ -6331,7 +6468,11 @@ pub fn test_manually_accept_inbound_channel_request() { let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { - nodes[1].node.accept_inbound_channel(&temporary_channel_id, &node_a_id, 23, Some(config_overrides)).unwrap(); + let config = Some(config_overrides); + nodes[1] + .node + .accept_inbound_channel(&temporary_channel_id, &node_a_id, 23, config) + .unwrap(); } _ => panic!("Unexpected event"), } @@ -6354,8 +6495,12 @@ pub fn test_manually_accept_inbound_channel_request() { // Continue channel opening process until channel update messages are sent. nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); - let (temporary_channel_id, tx, funding_outpoint) = create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); - nodes[0].node.unsafe_manual_funding_transaction_generated(temporary_channel_id, node_b_id, funding_outpoint).unwrap(); + let (temp_channel_id, tx, funding_outpoint) = + create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); + nodes[0] + .node + .unsafe_manual_funding_transaction_generated(temp_channel_id, node_b_id, funding_outpoint) + .unwrap(); check_added_monitors!(nodes[0], 0); let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); @@ -6420,11 +6565,14 @@ pub fn test_manually_reject_inbound_channel_request() { // Assert that `nodes[1]` has no `MessageSendEvent::SendAcceptChannel` in `msg_events` before // rejecting the inbound channel request. assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - let error_message = "Channel force-closed"; + let err = "Channel force-closed".to_string(); let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { - nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &node_a_id, error_message.to_string()).unwrap(); + nodes[1] + .node + .force_close_broadcasting_latest_txn(&temporary_channel_id, &node_a_id, err) + .unwrap(); } _ => panic!("Unexpected event"), } @@ -6536,7 +6684,7 @@ pub fn test_onion_value_mpp_set_calculation() { let total_msat = 100_000; let expected_paths: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; - let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], total_msat); + let (mut route, hash, preimage, secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], total_msat); let sample_path = route.paths.pop().unwrap(); let mut path_1 = sample_path.clone(); @@ -6556,11 +6704,17 @@ pub fn test_onion_value_mpp_set_calculation() { route.paths.push(path_2); // Send payment - let payment_id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); - let onion_session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap(); - nodes[0].node.test_send_payment_internal(&route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap(); + let id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); + let onion = RecipientOnionFields::secret_only(secret); + let onion_session_privs = nodes[0] + .node + .test_add_new_pending_payment(hash, onion.clone(), id, &route) + .unwrap(); + let amt = Some(total_msat); + nodes[0] + .node + .test_send_payment_internal(&route, hash, onion, None, id, amt, onion_session_privs) + .unwrap(); check_added_monitors!(nodes[0], expected_paths.len()); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -6579,7 +6733,7 @@ pub fn test_onion_value_mpp_set_calculation() { let height = nodes[0].best_block_info().1; let session_priv = SecretKey::from_slice(&session_priv).unwrap(); let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv); - let recipient_onion_fields = RecipientOnionFields::secret_only(our_payment_secret); + let recipient_onion_fields = RecipientOnionFields::secret_only(secret); let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads(&route.paths[0], 100_000, &recipient_onion_fields, height + 1, &None, None, None).unwrap(); // Edit amt_to_forward to simulate the sender having set @@ -6589,7 +6743,9 @@ pub fn test_onion_value_mpp_set_calculation() { } = onion_payloads[1] { *sender_intended_htlc_amt_msat = 99_000; } else { panic!() } - let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap(); + let new_onion_packet = + onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &hash) + .unwrap(); payment_event.msgs[0].onion_routing_packet = new_onion_packet; } @@ -6614,10 +6770,10 @@ pub fn test_onion_value_mpp_set_calculation() { // Second path let ev = remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events); - pass_along_path(&nodes[0], expected_paths[1], 101_000, our_payment_hash.clone(), Some(our_payment_secret), ev, true, None); + pass_along_path(&nodes[0], expected_paths[1], 101_000, hash, Some(secret), ev, true, None); claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], expected_paths, our_payment_preimage) + ClaimAlongRouteArgs::new(&nodes[0], expected_paths, preimage) ); } @@ -6639,9 +6795,12 @@ fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64){ let mut dst_chan_ids = Vec::with_capacity(routing_node_count); for i in 0..routing_node_count { let routing_node = 2 + i; - let src_chan_id = create_announced_chan_between_nodes(&nodes, src_idx, routing_node).0.contents.short_channel_id; + let src_chan = create_announced_chan_between_nodes(&nodes, src_idx, routing_node); + let src_chan_id = src_chan.0.contents.short_channel_id; src_chan_ids.push(src_chan_id); - let dst_chan_id = create_announced_chan_between_nodes(&nodes, routing_node, dst_idx).0.contents.short_channel_id; + + let dst_chan = create_announced_chan_between_nodes(&nodes, routing_node, dst_idx); + let dst_chan_id = dst_chan.0.contents.short_channel_id; dst_chan_ids.push(dst_chan_id); let path = vec![&nodes[routing_node], &nodes[dst_idx]]; expected_paths.push(path); @@ -6650,7 +6809,7 @@ fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64){ // Create a route for each amount let example_amount = 100000; - let (mut route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount); + let (mut route, hash, preimage, secret) = get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount); let sample_path = route.paths.pop().unwrap(); for i in 0..routing_node_count { let routing_node = 2 + i; @@ -6664,11 +6823,18 @@ fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64){ } // Send payment with manually set total_msat - let payment_id = PaymentId(nodes[src_idx].keys_manager.backing.get_secure_random_bytes()); - let onion_session_privs = nodes[src_idx].node.test_add_new_pending_payment(our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), payment_id, &route).unwrap(); - nodes[src_idx].node.test_send_payment_internal(&route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), None, payment_id, Some(total_msat), onion_session_privs).unwrap(); + let id = PaymentId(nodes[src_idx].keys_manager.backing.get_secure_random_bytes()); + let onion = RecipientOnionFields::secret_only(secret); + let onion_session_privs = nodes[src_idx] + .node + .test_add_new_pending_payment(hash, onion, id, &route) + .unwrap(); + let onion = RecipientOnionFields::secret_only(secret); + let amt = Some(total_msat); + nodes[src_idx] + .node + .test_send_payment_internal(&route, hash, onion, None, id, amt, onion_session_privs) + .unwrap(); check_added_monitors!(nodes[src_idx], expected_paths.len()); let mut events = nodes[src_idx].node.get_and_clear_pending_msg_events(); @@ -6680,11 +6846,11 @@ fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64){ let current_path_amount = msat_amounts[path_idx]; amount_received += current_path_amount; let became_claimable_now = amount_received >= total_msat && amount_received - current_path_amount < total_msat; - pass_along_path(&nodes[src_idx], expected_path, amount_received, our_payment_hash.clone(), Some(our_payment_secret), ev, became_claimable_now, None); + pass_along_path(&nodes[src_idx], expected_path, amount_received, hash.clone(), Some(secret), ev, became_claimable_now, None); } claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[src_idx], &expected_paths, our_payment_preimage) + ClaimAlongRouteArgs::new(&nodes[src_idx], &expected_paths, preimage) ); } @@ -6719,9 +6885,10 @@ pub fn test_simple_mpp() { route.paths[1].hops[0].pubkey = node_c_id; route.paths[1].hops[0].short_channel_id = chan_2_id; route.paths[1].hops[1].short_channel_id = chan_4_id; - send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret); + let paths: &[&[_]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; + send_along_route_with_secret(&nodes[0], route, paths, 200_000, payment_hash, payment_secret); claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], payment_preimage) + ClaimAlongRouteArgs::new(&nodes[0], paths, payment_preimage) ); } @@ -6740,8 +6907,10 @@ pub fn test_preimage_storage() { { let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200, None).unwrap(); let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); - nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -6779,8 +6948,8 @@ pub fn test_bad_secret_hash() { create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; - let random_payment_hash = PaymentHash([42; 32]); - let random_payment_secret = PaymentSecret([43; 32]); + let random_hash = PaymentHash([42; 32]); + let random_secret = PaymentSecret([43; 32]); let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2, None).unwrap(); let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); @@ -6812,27 +6981,28 @@ pub fn test_bad_secret_hash() { } } - let expected_error_code = LocalHTLCFailureReason::IncorrectPaymentDetails; + let expected_err_code = LocalHTLCFailureReason::IncorrectPaymentDetails; // Error data is the HTLC value (100,000) and current block height - let expected_error_data = [0, 0, 0, 0, 0, 1, 0x86, 0xa0, 0, 0, 0, CHAN_CONFIRM_DEPTH as u8]; + let expected_err_data = [0, 0, 0, 0, 0, 1, 0x86, 0xa0, 0, 0, 0, CHAN_CONFIRM_DEPTH as u8]; // Send a payment with the right payment hash but the wrong payment secret - nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, - RecipientOnionFields::secret_only(random_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(random_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, onion, id).unwrap(); handle_unknown_invalid_payment_data!(our_payment_hash); - expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data); + expect_payment_failed!(nodes[0], our_payment_hash, true, expected_err_code, expected_err_data); // Send a payment with a random payment hash, but the right payment secret - nodes[0].node.send_payment_with_route(route.clone(), random_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(random_payment_hash.0)).unwrap(); - handle_unknown_invalid_payment_data!(random_payment_hash); - expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + nodes[0].node.send_payment_with_route(route.clone(), random_hash, onion, id).unwrap(); + handle_unknown_invalid_payment_data!(random_hash); + expect_payment_failed!(nodes[0], random_hash, true, expected_err_code, expected_err_data); // Send a payment with a random payment hash and random payment secret - nodes[0].node.send_payment_with_route(route, random_payment_hash, - RecipientOnionFields::secret_only(random_payment_secret), PaymentId(random_payment_hash.0)).unwrap(); - handle_unknown_invalid_payment_data!(random_payment_hash); - expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data); + let onion = RecipientOnionFields::secret_only(random_secret); + nodes[0].node.send_payment_with_route(route, random_hash, onion, id).unwrap(); + handle_unknown_invalid_payment_data!(random_hash); + expect_payment_failed!(nodes[0], random_hash, true, expected_err_code, expected_err_data); } #[xtest(feature = "_externalize_tests")] @@ -6858,7 +7028,7 @@ pub fn test_update_err_monitor_lockdown() { let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); // Rebalance the network to generate htlc in the two directions - send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); + send_payment(&nodes[0], &[&nodes[1]], 10_000_000); // Route a HTLC from node 0 to node 1 (but don't settle) let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000); @@ -6894,9 +7064,11 @@ pub fn test_update_err_monitor_lockdown() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - if let Some(channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2).as_funded_mut() { + let mut per_peer_lock; + let mut peer_state_lock; + let chan_ref = + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_1.2); + if let Some(channel) = chan_ref.as_funded_mut() { assert_eq!(updates.commitment_signed.len(), 1); if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed[0], &node_cfgs[0].logger) { assert_eq!(watchtower.chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::InProgress); @@ -6931,10 +7103,10 @@ pub fn test_concurrent_monitor_claim() { let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); // Rebalance the network to generate htlc in the two directions - send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); + send_payment(&nodes[0], &[&nodes[1]], 10_000_000); // Route a HTLC from node 0 to node 1 (but don't settle) - route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0; + route_payment(&nodes[0], &[&nodes[1]], 9_000_000).0; // Copy ChainMonitor to simulate watchtower Alice and update block height her ChannelMonitor timeout HTLC onchain let chain_source = test_utils::TestChainSource::new(Network::Testnet); @@ -6959,7 +7131,8 @@ pub fn test_concurrent_monitor_claim() { // Make Alice aware of enough blocks that it doesn't think we're violating transaction lock time // requirements here. const HTLC_TIMEOUT_BROADCAST: u32 = CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS; - alice_broadcaster.blocks.lock().unwrap().resize((HTLC_TIMEOUT_BROADCAST) as usize, (block.clone(), HTLC_TIMEOUT_BROADCAST)); + let next_block = (block.clone(), HTLC_TIMEOUT_BROADCAST); + alice_broadcaster.blocks.lock().unwrap().resize((HTLC_TIMEOUT_BROADCAST) as usize, next_block); watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST); // Watchtower Alice should have broadcast a commitment/HTLC-timeout @@ -6987,21 +7160,25 @@ pub fn test_concurrent_monitor_claim() { assert_eq!(watchtower.watch_channel(chan_1.2, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); watchtower }; - watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST - 1); + let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()); + watchtower_bob.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST - 1); // Route another payment to generate another update with still previous HTLC pending let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 3000000); - nodes[1].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(updates.update_add_htlcs.len(), 1); nodes[0].node.handle_update_add_htlc(node_b_id, &updates.update_add_htlcs[0]); { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - if let Some(channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2).as_funded_mut() { + let mut per_peer_lock; + let mut peer_state_lock; + let chan_ref = + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_1.2); + if let Some(channel) = chan_ref.as_funded_mut() { assert_eq!(updates.commitment_signed.len(), 1); if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed[0], &node_cfgs[0].logger) { // Watchtower Alice should already have seen the block and reject the update @@ -7071,17 +7248,22 @@ pub fn test_pre_lockin_no_chan_closed_update() { nodes[0].node.handle_accept_channel(node_b_id, &accept_chan_msg); // Move the first channel through the funding flow... - let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &node_b_id, 100000, 42); + let (temp_channel_id, tx, _) = create_funding_transaction(&nodes[0], &node_b_id, 100000, 42); - nodes[0].node.funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()).unwrap(); + nodes[0].node.funding_transaction_generated(temp_channel_id, node_b_id, tx.clone()).unwrap(); check_added_monitors!(nodes[0], 0); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }); - nodes[0].node.handle_error(node_b_id, &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() }); + + let err_msg = msgs::ErrorMessage { channel_id, data: "Hi".to_owned() }; + nodes[0].node.handle_error(node_b_id, &err_msg); + assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty()); - check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true, - [node_b_id], 100000); + let reason = ClosureReason::CounterpartyForceClosed { + peer_msg: UntrustedString("Hi".to_string()), + }; + check_closed_event!(nodes[0], 2, reason, true, [node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -7103,8 +7285,8 @@ pub fn test_htlc_no_detection() { // Create some initial channels let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); - send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000); - let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000); + send_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 2_000_000); let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2); assert_eq!(local_txn[0].input.len(), 1); assert_eq!(local_txn[0].output.len(), 3); @@ -7185,11 +7367,17 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain force_closing_node = 1; counterparty_node = 0; } - let error_message = "Channel force-closed"; - nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id(), error_message.to_string()).unwrap(); + let err = "Channel force-closed".to_string(); + let counterparty_node_id = nodes[counterparty_node].node.get_our_node_id(); + nodes[force_closing_node] + .node + .force_close_broadcasting_latest_txn(&chan_ab.2, &counterparty_node_id, err) + .unwrap(); check_closed_broadcast!(nodes[force_closing_node], true); check_added_monitors!(nodes[force_closing_node], 1); - check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[counterparty_node].node.get_our_node_id()], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[force_closing_node], 1, reason, [counterparty_node_id], 100000); + if go_onchain_before_fulfill { let txn_to_broadcast = match broadcast_alice { true => alice_txn.clone(), @@ -7199,7 +7387,8 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain if broadcast_alice { check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } } @@ -7219,7 +7408,8 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain nodes[1].node.handle_update_fulfill_htlc(node_c_id, &carol_updates.update_fulfill_htlcs[0]); let went_onchain = go_onchain_before_fulfill || force_closing_node == 1; - expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if went_onchain { None } else { Some(1000) }, went_onchain, false); + let fee = if went_onchain { None } else { Some(1000) }; + expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], fee, went_onchain, false); // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage. if !go_onchain_before_fulfill && broadcast_alice { let events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -7280,7 +7470,8 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain if broadcast_alice { check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); if broadcast_alice { @@ -7410,20 +7601,28 @@ pub fn test_peer_funding_sidechannel() { _ => panic!("Unexpected event {:?}", cs_funding_events), } - nodes[1].node.funding_transaction_generated_unchecked(temp_chan_id_ca, node_a_id, tx.clone(), funding_output.index).unwrap(); + let output_idx = funding_output.index; + nodes[1] + .node + .funding_transaction_generated_unchecked(temp_chan_id_ca, node_a_id, tx.clone(), output_idx) + .unwrap(); let funding_created_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, node_a_id); nodes[0].node.handle_funding_created(node_b_id, &funding_created_msg); get_event_msg!(nodes[0], MessageSendEvent::SendFundingSigned, node_b_id); expect_channel_pending_event(&nodes[0], &node_b_id); check_added_monitors!(nodes[0], 1); - let res = nodes[0].node.funding_transaction_generated(temp_chan_id_ab, node_b_id, tx.clone()); + let res = nodes[0].node.funding_transaction_generated(temp_chan_id_ab, node_b_id, tx); let err_msg = format!("{:?}", res.unwrap_err()); assert!(err_msg.contains("An existing channel using ID")); assert!(err_msg.contains("is open with peer")); + let channel_id = ChannelId::v1_from_funding_outpoint(funding_output); - let reason = ClosureReason::ProcessingError { err: format!("An existing channel using ID {} is open with peer {}", channel_id, node_b_id), }; - check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(temp_chan_id_ab, true, reason)]); + let err = + format!("An existing channel using ID {} is open with peer {}", channel_id, node_b_id); + let reason = ClosureReason::ProcessingError { err }; + let close_event = ExpectedCloseEvent::from_id_reason(temp_chan_id_ab, true, reason); + check_closed_events(&nodes[0], &[close_event]); get_err_msg(&nodes[0], &node_b_id); } @@ -7467,8 +7666,10 @@ pub fn test_duplicate_conflicting_funding_from_second_peer() { // watch_channel call which failed), but zero monitor updates. check_added_monitors!(nodes[0], 1); get_err_msg(&nodes[0], &node_b_id); - let err_reason = ClosureReason::ProcessingError { err: "Channel ID was a duplicate".to_owned() }; - check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(temp_chan_id, true, err_reason)]); + + let reason = ClosureReason::ProcessingError { err: "Channel ID was a duplicate".to_owned() }; + let close_event = ExpectedCloseEvent::from_id_reason(temp_chan_id, true, reason); + check_closed_events(&nodes[0], &[close_event]); } #[xtest(feature = "_externalize_tests")] @@ -7501,9 +7702,9 @@ pub fn test_duplicate_funding_err_in_funding() { // Now that we have a second channel with the same funding txo, send a bogus funding message // and let nodes[1] remove the inbound channel. - let (_, funding_tx, _) = create_funding_transaction(&nodes[2], &node_b_id, 100_000, 42); + let (_, fund_tx, _) = create_funding_transaction(&nodes[2], &node_b_id, 100_000, 42); - nodes[2].node.funding_transaction_generated(node_c_temp_chan_id, node_b_id, funding_tx).unwrap(); + nodes[2].node.funding_transaction_generated(node_c_temp_chan_id, node_b_id, fund_tx).unwrap(); let mut funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, node_b_id); funding_created_msg.temporary_channel_id = real_channel_id; @@ -7560,9 +7761,9 @@ pub fn test_duplicate_chan_id() { } // Move the first channel through the funding flow... - let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &node_b_id, 100000, 42); + let (temp_channel_id, tx, _) = create_funding_transaction(&nodes[0], &node_b_id, 100000, 42); - nodes[0].node.funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()).unwrap(); + nodes[0].node.funding_transaction_generated(temp_channel_id, node_b_id, tx.clone()).unwrap(); check_added_monitors!(nodes[0], 0); let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); @@ -7620,7 +7821,9 @@ pub fn test_duplicate_chan_id() { // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we // try to create another channel. Instead, we drop the channel entirely here (leaving the // channelmanager in a possibly nonsense state instead). - let mut channel = a_peer_state.channel_by_id.remove(&open_chan_2_msg.common_fields.temporary_channel_id).unwrap(); + let chan_id = open_chan_2_msg.common_fields.temporary_channel_id; + let mut channel = a_peer_state.channel_by_id.remove(&chan_id).unwrap(); + if let Some(mut chan) = channel.as_unfunded_outbound_v1_mut() { let logger = test_utils::TestLogger::new(); chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap() @@ -7634,11 +7837,12 @@ pub fn test_duplicate_chan_id() { // without trying to persist the `ChannelMonitor`. check_added_monitors!(nodes[1], 0); - check_closed_events(&nodes[1], &[ - ExpectedCloseEvent::from_id_reason(funding_created.temporary_channel_id, false, ClosureReason::ProcessingError { - err: "Already had channel with the new channel_id".to_owned() - }) - ]); + let reason = ClosureReason::ProcessingError { + err: "Already had channel with the new channel_id".to_owned() + }; + let close_event = + ExpectedCloseEvent::from_id_reason(funding_created.temporary_channel_id, false, reason); + check_closed_events(&nodes[1], &[close_event]); // ...still, nodes[1] will reject the duplicate channel. { @@ -7709,8 +7913,11 @@ pub fn test_error_chans_closed() { nodes[0].node.handle_error(node_b_id, &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() }); check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], false); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }, - [node_b_id], 100000); + + let reason = + ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); + assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); assert_eq!(nodes[0].node.list_usable_channels().len(), 2); assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2); @@ -7720,8 +7927,11 @@ pub fn test_error_chans_closed() { let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); nodes[0].node.handle_error(node_b_id, &msgs::ErrorMessage { channel_id: ChannelId::new_zero(), data: "ERR".to_owned() }); check_added_monitors!(nodes[0], 2); - check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }, - [node_b_id; 2], 100000); + + let reason = + ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }; + check_closed_event!(nodes[0], 2, reason, [node_b_id; 2], 100000); + let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); match events[0] { @@ -7801,8 +8011,10 @@ pub fn test_invalid_funding_tx() { let expected_err = "funding tx had wrong script/value or output index"; confirm_transaction_at(&nodes[1], &tx, 1); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: expected_err.to_string() }, - [node_a_id], 100000); + + let reason = ClosureReason::ProcessingError { err: expected_err.to_string() }; + check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); + check_added_monitors!(nodes[1], 1); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); @@ -7863,9 +8075,9 @@ pub fn test_coinbase_funding_tx() { nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); // Create the coinbase funding transaction. - let (temporary_channel_id, tx, _) = create_coinbase_funding_transaction(&nodes[0], &node_b_id, 100000, 42); + let (channel_id, tx, _) = create_coinbase_funding_transaction(&nodes[0], &node_b_id, 100000, 42); - nodes[0].node.funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()).unwrap(); + nodes[0].node.funding_transaction_generated(channel_id, node_b_id, tx.clone()).unwrap(); check_added_monitors!(nodes[0], 0); let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); @@ -7933,10 +8145,13 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); nodes[1].node.peer_disconnected(node_c_id); nodes[2].node.peer_disconnected(node_b_id); - let error_message = "Channel force-closed"; - nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &node_c_id, error_message.to_string()).unwrap(); + + let err = "Channel force-closed".to_string(); + nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &node_c_id, err).unwrap(); + check_closed_broadcast!(nodes[1], true); - check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [node_c_id], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[1], 1, reason, [node_c_id], 100000); check_added_monitors!(nodes[1], 1); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 1); @@ -7979,7 +8194,9 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t assert!(updates.update_fee.is_none()); nodes[0].node.handle_update_fail_htlc(node_b_id, &updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); - expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true); + + let failed_scid = chan_announce.contents.short_channel_id; + expect_payment_failed_with_update!(nodes[0], payment_hash, false, failed_scid, true); // We should also generate a SpendableOutputs event with the to_self output (once the // timelock is up). @@ -8019,8 +8236,9 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]); { - nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -8033,8 +8251,9 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { { // Note that we use a different PaymentId here to allow us to duplicativly pay - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_secret.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_secret.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -8085,7 +8304,8 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_updates_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false); - expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new()); + let conditions = PaymentFailedConditions::new(); + expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, conditions); claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage); } @@ -8136,33 +8356,44 @@ pub fn test_inconsistent_mpp_params() { core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } }); - let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]); + let (preimage, hash, secret) = get_payment_preimage_hash!(&nodes[3]); let cur_height = nodes[0].best_block_info().1; - let payment_id = PaymentId([42; 32]); + let id = PaymentId([42; 32]); let session_privs = { // We create a fake route here so that we start with three pending HTLCs, which we'll // ultimately have, just not right away. let mut dup_route = route.clone(); dup_route.paths.push(route.paths[1].clone()); - nodes[0].node.test_add_new_pending_payment(our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), payment_id, &dup_route).unwrap() + let onion = RecipientOnionFields::secret_only(secret); + nodes[0].node.test_add_new_pending_payment(hash, onion, id, &dup_route).unwrap() }; - nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id, - &None, session_privs[0]).unwrap(); + let onion = RecipientOnionFields::secret_only(secret); + let path_a = &route.paths[0]; + let real_amt = 15_000_000; + let priv_a = session_privs[0]; + nodes[0] + .node + .test_send_payment_along_path(path_a, &hash, onion, real_amt, cur_height, id, &None, priv_a) + .unwrap(); check_added_monitors!(nodes[0], 1); - { - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None); - } + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let path_a = &[&nodes[1], &nodes[3]]; + let event = events.pop().unwrap(); + pass_along_path(&nodes[0], path_a, real_amt, hash, Some(secret), event, false, None); assert!(nodes[3].node.get_and_clear_pending_events().is_empty()); - nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), 14_000_000, cur_height, payment_id, &None, session_privs[1]).unwrap(); + let path_b = &route.paths[1]; + let onion = RecipientOnionFields::secret_only(secret); + let amt_b = 14_000_000; + let priv_b = session_privs[1]; + nodes[0] + .node + .test_send_payment_along_path(path_b, &hash, onion, amt_b, cur_height, id, &None, priv_b) + .unwrap(); check_added_monitors!(nodes[0], 1); { @@ -8190,7 +8421,8 @@ pub fn test_inconsistent_mpp_params() { } expect_pending_htlcs_forwardable_ignore!(nodes[3]); nodes[3].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); + let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], [fail_type]); nodes[3].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[3], 1); @@ -8206,21 +8438,28 @@ pub fn test_inconsistent_mpp_params() { nodes[0].node.handle_update_fail_htlc(node_c_id, &fail_updates_2.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false); - expect_payment_failed_conditions(&nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain()); + let conditions = PaymentFailedConditions::new().mpp_parts_remain(); + expect_payment_failed_conditions(&nodes[0], hash, true, conditions); - nodes[0].node.test_send_payment_along_path(&route.paths[1], &our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), 15_000_000, cur_height, payment_id, - &None, session_privs[2]).unwrap(); + let onion = RecipientOnionFields::secret_only(secret); + let path_b = &route.paths[1]; + let priv_c = session_privs[2]; + nodes[0] + .node + .test_send_payment_along_path(path_b, &hash, onion, real_amt, cur_height, id, &None, priv_c) + .unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None); + let event = events.pop().unwrap(); + let path_b = &[&nodes[2], &nodes[3]]; + pass_along_path(&nodes[0], path_b, real_amt, hash, Some(secret), event, true, None); do_claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], our_payment_preimage) + ClaimAlongRouteArgs::new(&nodes[0], &[path_a, path_b], preimage) ); - expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true, true); + expect_payment_sent(&nodes[0], preimage, Some(None), true, true); } #[xtest(feature = "_externalize_tests")] @@ -8241,7 +8480,7 @@ pub fn test_double_partial_claim() { create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0); - let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000); + let (mut route, payment_hash, payment_preimage, secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000); assert_eq!(route.paths.len(), 2); route.paths.sort_by(|path_a, _| { // Sort the path so that the path through nodes[1] comes first @@ -8249,7 +8488,8 @@ pub fn test_double_partial_claim() { core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } }); - send_along_route_with_secret(&nodes[0], route.clone(), &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 15_000_000, payment_hash, payment_secret); + let paths: &[&[_]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; + send_along_route_with_secret(&nodes[0], route.clone(), paths, 15_000_000, payment_hash, secret); // nodes[3] has now received a PaymentClaimable event...which it will take some (exorbitant) // amount of time to respond to. @@ -8263,17 +8503,20 @@ pub fn test_double_partial_claim() { ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations); - pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash, PaymentFailureReason::RecipientRejected); + let reason = PaymentFailureReason::RecipientRejected; + pass_failed_payment_back(&nodes[0], paths, false, payment_hash, reason); // nodes[1] now retries one of the two paths... - nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 2); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); - let node_1_msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, false, None); + let msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); + let path = &[&nodes[1], &nodes[3]]; + pass_along_path(&nodes[0], path, 15_000_000, payment_hash, Some(secret), msgs, false, None); // At this point nodes[3] has received one half of the payment, and the user goes to handle // that PaymentClaimable event they got hours ago and never handled...we should refuse to claim. @@ -8355,12 +8598,12 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); - let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &node_b_id, 1_000_000, 42); + let (chan_id, tx, _) = create_funding_transaction(&nodes[0], &node_b_id, 1_000_000, 42); if on_holder_tx { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - let channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id); + let mut per_peer_lock; + let mut peer_state_lock; + let channel = get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_id); if let Some(mut chan) = channel.as_unfunded_outbound_v1_mut() { chan.context.holder_dust_limit_satoshis = 546; } else { @@ -8368,7 +8611,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e } } - nodes[0].node.funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()).unwrap(); + nodes[0].node.funding_transaction_generated(chan_id, node_b_id, tx.clone()).unwrap(); nodes[1].node.handle_funding_created(node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id)); check_added_monitors!(nodes[1], 1); expect_channel_pending_event(&nodes[1], &node_a_id); @@ -8452,19 +8695,30 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 }; // With default dust exposure: 5000 sats if on_holder_tx { - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); } else { - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ), true, APIError::ChannelUnavailable { .. }, {}); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + let res = nodes[0].node.send_payment_with_route(route, payment_hash, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); } } else if exposure_breach_event == ExposureEvent::AtHTLCReception { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 4 }); - nodes[1].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let amount_msats = if on_holder_tx { + dust_inbound_htlc_on_holder_tx_msat + } else { + dust_htlc_on_counterparty_tx_msat + 4 + }; + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], amount_msats); + + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[1], 1); + let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.remove(0)); @@ -8491,9 +8745,10 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e // we need to add a lot of HTLCs that will become dust at the new feerate // to cross the threshold. for _ in 0..AT_FEE_OUTBOUND_HTLCS { - let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(1_000), None); - nodes[0].node.send_payment_with_route(route.clone(), payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let (_, hash, payment_secret) = get_payment_preimage_hash(&nodes[1], Some(1_000), None); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(hash.0); + nodes[0].node.send_payment_with_route(route.clone(), hash, onion, id).unwrap(); } { let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); @@ -8627,8 +8882,9 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { // Send an additional non-dust htlc from 1 to 0, and check the complaint let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], dust_limit * 2); - nodes[1].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -8659,9 +8915,10 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { assert_eq!(nodes[1].node.list_channels()[0].pending_outbound_htlcs.len(), 0); // Send an additional non-dust htlc from 0 to 1 using the pre-calculated route above, and check the immediate complaint - unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route_0_1, payment_hash_0_1, - RecipientOnionFields::secret_only(payment_secret_0_1), PaymentId(payment_hash_0_1.0) - ), true, APIError::ChannelUnavailable { .. }, {}); + let onion = RecipientOnionFields::secret_only(payment_secret_0_1); + let id = PaymentId(payment_hash_0_1.0); + let res = nodes[0].node.send_payment_with_route(route_0_1, payment_hash_0_1, onion, id); + unwrap_send_err!(nodes[0], res, true, APIError::ChannelUnavailable { .. }, {}); nodes[0].logger.assert_log("lightning::ln::outbound_payment", format!("Failed to send along path due to error: Channel unavailable: Cannot send more than our next-HTLC maximum - {} msat", 2325000), 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -8695,7 +8952,8 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { let fail = get_htlc_update_msgs(&nodes[0], &node_c_id); nodes[2].node.handle_update_fail_htlc(node_a_id, &fail.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[0], fail.commitment_signed, false); - expect_payment_failed_conditions(&nodes[2], payment_hash, false, PaymentFailedConditions::new()); + let conditions = PaymentFailedConditions::new(); + expect_payment_failed_conditions(&nodes[2], payment_hash, false, conditions); } fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) { @@ -8789,8 +9047,9 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) // Send an additional non-dust htlc from 0 to 1, and check the complaint let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], NON_DUST_HTLC_MSAT); - nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -8824,10 +9083,11 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) assert_eq!(nodes[1].node.list_channels()[0].pending_inbound_htlcs.len(), DUST_HTLC_COUNT); // Set node 1's max dust htlc exposure equal to the `expected_dust_exposure_msat` - nodes[1].node.update_partial_channel_config(&node_a_id, &[chan_id], &ChannelConfigUpdate { + let config = ChannelConfigUpdate { max_dust_htlc_exposure_msat: Some(MaxDustHTLCExposure::FixedLimitMsat(expected_dust_exposure_msat)), ..ChannelConfigUpdate::default() - }).unwrap(); + }; + nodes[1].node.update_partial_channel_config(&node_a_id, &[chan_id], &config).unwrap(); // Check a successful payment send_payment(&nodes[0], &[&nodes[1]], NON_DUST_HTLC_MSAT); @@ -8850,15 +9110,18 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) } // Set node 1's max dust htlc exposure to 1msat below `expected_dust_exposure_msat` - nodes[1].node.update_partial_channel_config(&node_a_id, &[chan_id], &ChannelConfigUpdate { + let update = ChannelConfigUpdate { max_dust_htlc_exposure_msat: Some(MaxDustHTLCExposure::FixedLimitMsat(expected_dust_exposure_msat - 1)), ..ChannelConfigUpdate::default() - }).unwrap(); + }; + nodes[1].node.update_partial_channel_config(&node_a_id, &[chan_id], &update).unwrap(); // Send an additional non-dust htlc from 1 to 0 using the pre-calculated route above, and check the immediate complaint - unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route_1_0, payment_hash_1_0, - RecipientOnionFields::secret_only(payment_secret_1_0), PaymentId(payment_hash_1_0.0) - ), true, APIError::ChannelUnavailable { .. }, {}); + let onion = RecipientOnionFields::secret_only(payment_secret_1_0); + let id = PaymentId(payment_hash_1_0.0); + let res = nodes[1].node.send_payment_with_route(route_1_0, payment_hash_1_0, onion, id); + unwrap_send_err!(nodes[1], res, true, APIError::ChannelUnavailable { .. }, {}); + let dust_limit = if features == ChannelTypeFeatures::only_static_remote_key() { MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000 + htlc_success_tx_weight(&features) * node_1_dust_buffer_feerate / 1000 * 1000 } else { @@ -8877,10 +9140,11 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) assert_eq!(nodes[1].node.list_channels()[0].pending_inbound_htlcs.len(), DUST_HTLC_COUNT); // Set node 1's max dust htlc exposure equal to `expected_dust_exposure_msat` - nodes[1].node.update_partial_channel_config(&node_a_id, &[chan_id], &ChannelConfigUpdate { + let update = ChannelConfigUpdate { max_dust_htlc_exposure_msat: Some(MaxDustHTLCExposure::FixedLimitMsat(expected_dust_exposure_msat)), ..ChannelConfigUpdate::default() - }).unwrap(); + }; + nodes[1].node.update_partial_channel_config(&node_a_id, &[chan_id], &update).unwrap(); // Check a successful payment send_payment(&nodes[1], &[&nodes[0]], NON_DUST_HTLC_MSAT); @@ -8939,7 +9203,9 @@ pub fn test_non_final_funding_tx() { _ => panic!() } let err = "Error in transaction funding: Misuse error: Funding transaction absolute timelock is non-final".to_owned(); - check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(temp_channel_id, false, ClosureReason::ProcessingError { err })]); + let reason = ClosureReason::ProcessingError { err }; + let event = ExpectedCloseEvent::from_id_reason(temp_channel_id, false, reason); + check_closed_events(&nodes[0], &[event]); assert_eq!(get_err_msg(&nodes[0], &node_b_id).data, "Failed to fund channel"); } @@ -8976,7 +9242,7 @@ pub fn test_non_final_funding_tx_within_headroom() { }; // Transaction should be accepted if it's in a +1 headroom from best block. - assert!(nodes[0].node.funding_transaction_generated(temp_channel_id, node_b_id, tx.clone()).is_ok()); + nodes[0].node.funding_transaction_generated(temp_channel_id, node_b_id, tx.clone()).unwrap(); get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); } @@ -8989,25 +9255,30 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - let min_final_cltv_expiry_delta = 120; - let final_cltv_expiry_delta = if valid_delta { min_final_cltv_expiry_delta + 2 } else { - min_final_cltv_expiry_delta - 2 }; + let min_cltv_expiry_delta = 120; + let final_cltv_expiry_delta = + if valid_delta { min_cltv_expiry_delta + 2 } else { min_cltv_expiry_delta - 2 }; let recv_value = 100_000; create_chan_between_nodes(&nodes[0], &nodes[1]); - let payment_parameters = PaymentParameters::from_node_id(node_b_id, final_cltv_expiry_delta as u32); - let (payment_hash, payment_preimage, payment_secret) = if use_user_hash { - let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[1], - Some(recv_value), Some(min_final_cltv_expiry_delta)); - (payment_hash, payment_preimage, payment_secret) + let payment_parameters = + PaymentParameters::from_node_id(node_b_id, final_cltv_expiry_delta as u32); + let (hash, payment_preimage, secret) = if use_user_hash { + let (payment_preimage, hash, secret) = + get_payment_preimage_hash!(nodes[1], Some(recv_value), Some(min_cltv_expiry_delta)); + (hash, payment_preimage, secret) } else { - let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(recv_value), 7200, Some(min_final_cltv_expiry_delta)).unwrap(); - (payment_hash, nodes[1].node.get_payment_preimage(payment_hash, payment_secret).unwrap(), payment_secret) + let (hash, secret) = + nodes[1] + .node + .create_inbound_payment(Some(recv_value), 7200, Some(min_cltv_expiry_delta)) + .unwrap(); + (hash, nodes[1].node.get_payment_preimage(hash, secret).unwrap(), secret) }; let route = get_route!(nodes[0], payment_parameters, recv_value).unwrap(); - nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let onion = RecipientOnionFields::secret_only(secret); + nodes[0].node.send_payment_with_route(route, hash, onion, PaymentId(hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -9017,12 +9288,13 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash expect_pending_htlcs_forwardable!(nodes[1]); if valid_delta { - expect_payment_claimable!(nodes[1], payment_hash, payment_secret, recv_value, if use_user_hash { - None } else { Some(payment_preimage) }, node_b_id); + let preimage = if use_user_hash { None } else { Some(payment_preimage) }; + expect_payment_claimable!(nodes[1], hash, secret, recv_value, preimage, node_b_id); - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); } else { - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash }]); + let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash }; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); check_added_monitors!(nodes[1], 1); @@ -9030,7 +9302,7 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], fail_updates.commitment_signed, false, true); - expect_payment_failed!(nodes[0], payment_hash, true); + expect_payment_failed!(nodes[0], hash, true); } } @@ -9225,7 +9497,8 @@ pub fn test_remove_expired_outbound_unfunded_channels() { }, _ => panic!("Unexpected event"), } - check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, false, &[node_b_id], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -9279,7 +9552,8 @@ pub fn test_remove_expired_inbound_unfunded_channels() { }, _ => panic!("Unexpected event"), } - check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, false, &[node_a_id], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } #[xtest(feature = "_externalize_tests")] @@ -9316,7 +9590,8 @@ pub fn test_channel_close_when_not_timely_accepted() { // Since we disconnected from peer and did not connect back within time, // we should have forced-closed the channel by now. - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, [node_b_id], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); assert_eq!(nodes[0].node.list_channels().len(), 0); { @@ -9356,12 +9631,13 @@ pub fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() { assert_eq!(nodes[1].node.list_channels().len(), 0); // The peers now reconnect - nodes[0].node.peer_connected(node_b_id, &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); - nodes[1].node.peer_connected(node_a_id, &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + let init_msg = msgs::Init { + features: nodes[0].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); // Make sure the SendOpenChannel message is added to node_0 pending message events let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -9384,7 +9660,7 @@ fn do_test_multi_post_event_actions(do_reload: bool) { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let (persister, chain_monitor); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); - let nodes_0_deserialized; + let node_a_reload; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -9408,17 +9684,20 @@ fn do_test_multi_post_event_actions(do_reload: bool) { expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000); for dest in &[1, 2] { - let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[*dest], node_a_id); - nodes[0].node.handle_update_fulfill_htlc(nodes[*dest].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]); - commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill_updates.commitment_signed, false); + let htlc_fulfill = get_htlc_update_msgs!(nodes[*dest], node_a_id); + let dest_node_id = nodes[*dest].node.get_our_node_id(); + nodes[0] + .node + .handle_update_fulfill_htlc(dest_node_id, &htlc_fulfill.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill.commitment_signed, false); check_added_monitors(&nodes[0], 0); } let (route, payment_hash_3, _, payment_secret_3) = get_route_and_payment_hash!(nodes[1], nodes[0], 100_000); let payment_id = PaymentId(payment_hash_3.0); - nodes[1].node.send_payment_with_route(route, payment_hash_3, - RecipientOnionFields::secret_only(payment_secret_3), payment_id).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret_3); + nodes[1].node.send_payment_with_route(route, payment_hash_3, onion, payment_id).unwrap(); check_added_monitors(&nodes[1], 1); let send_event = SendEvent::from_node(&nodes[1]); @@ -9427,10 +9706,12 @@ fn do_test_multi_post_event_actions(do_reload: bool) { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); if do_reload { - let nodes_0_serialized = nodes[0].node.encode(); + let node_ser = nodes[0].node.encode(); let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_2).encode(); - reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, chain_monitor, nodes_0_deserialized); + let mons = [&chan_0_monitor_serialized[..], &chan_1_monitor_serialized[..]]; + let config = test_default_channel_config(); + reload_node!(nodes[0], config, &node_ser, &mons, persister, chain_monitor, node_a_reload); nodes[1].node.peer_disconnected(node_a_id); nodes[2].node.peer_disconnected(node_a_id); @@ -9569,8 +9850,8 @@ pub fn test_close_in_funding_batch() { let funding_txo_2 = OutPoint { txid: tx.compute_txid(), index: 1 }; let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1); let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2); - let error_message = "Channel force-closed"; - nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &node_b_id, error_message.to_string()).unwrap(); + let err = "Channel force-closed".to_string(); + nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &node_b_id, err).unwrap(); // The monitor should become closed. check_added_monitors(&nodes[0], 1); @@ -9663,8 +9944,8 @@ pub fn test_batch_funding_close_after_funding_signed() { let funding_txo_2 = OutPoint { txid: tx.compute_txid(), index: 1 }; let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1); let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2); - let error_message = "Channel force-closed"; - nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &node_b_id, error_message.to_string()).unwrap(); + let err = "Channel force-closed".to_string(); + nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &node_b_id, err).unwrap(); check_added_monitors(&nodes[0], 2); { let mut monitor_updates = nodes[0].chain_monitor.monitor_updates.lock().unwrap(); @@ -9735,8 +10016,11 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen } else { (&nodes[0], &nodes[1]) }; - let error_message = "Channel force-closed"; - closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node.node.get_our_node_id(), error_message.to_string()).unwrap(); + let closing_node_id = closing_node.node.get_our_node_id(); + let other_node_id = other_node.node.get_our_node_id(); + + let err = "Channel force-closed".to_string(); + closing_node.node.force_close_broadcasting_latest_txn(&chan_id, &other_node_id, err).unwrap(); let mut msg_events = closing_node.node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); match msg_events.pop().unwrap() { @@ -9744,7 +10028,8 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen _ => panic!("Unexpected event"), } check_added_monitors(closing_node, 1); - check_closed_event(closing_node, 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[other_node.node.get_our_node_id()], 1_000_000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event(closing_node, 1, reason, false, &[other_node_id], 1_000_000); let commitment_tx = { let mut txn = closing_node.tx_broadcaster.txn_broadcast(); @@ -9759,7 +10044,8 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen check_closed_broadcast(other_node, 1, true); check_added_monitors(other_node, 1); - check_closed_event(other_node, 1, ClosureReason::CommitmentTxConfirmed, false, &[closing_node.node.get_our_node_id()], 1_000_000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(other_node, 1, reason, false, &[closing_node_id], 1_000_000); assert!(nodes[0].node.list_channels().is_empty()); assert!(nodes[1].node.list_channels().is_empty()); @@ -9826,8 +10112,11 @@ pub fn test_manual_funding_abandon() { let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); - let (temporary_channel_id, _tx, funding_outpoint) = create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); - nodes[0].node.unsafe_manual_funding_transaction_generated(temporary_channel_id, node_b_id, funding_outpoint).unwrap(); + let (temp_channel_id, _tx, funding_outpoint) = create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); + nodes[0] + .node + .unsafe_manual_funding_transaction_generated(temp_channel_id, node_b_id, funding_outpoint) + .unwrap(); check_added_monitors!(nodes[0], 0); let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); @@ -9871,8 +10160,11 @@ pub fn test_funding_signed_event() { let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); - let (temporary_channel_id, tx, funding_outpoint) = create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); - nodes[0].node.unsafe_manual_funding_transaction_generated(temporary_channel_id, node_b_id, funding_outpoint).unwrap(); + let (temp_channel_id, tx, funding_outpoint) = create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); + nodes[0] + .node + .unsafe_manual_funding_transaction_generated(temp_channel_id, node_b_id, funding_outpoint) + .unwrap(); check_added_monitors!(nodes[0], 0); let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); diff --git a/rustfmt_excluded_files b/rustfmt_excluded_files index 4f4804f2a30..34316d2c2f9 100644 --- a/rustfmt_excluded_files +++ b/rustfmt_excluded_files @@ -13,7 +13,6 @@ lightning/src/ln/chanmon_update_fail_tests.rs lightning/src/ln/channel.rs lightning/src/ln/channelmanager.rs lightning/src/ln/functional_test_utils.rs -lightning/src/ln/functional_tests.rs lightning/src/ln/max_payment_path_len_tests.rs lightning/src/ln/mod.rs lightning/src/ln/monitor_tests.rs From 8465d8a96ca9e5199b6f4a028fa38c3452a5b1ff Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 1 May 2025 16:56:04 +0000 Subject: [PATCH 22/25] f drop unecessary \n --- lightning/src/ln/functional_tests.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 1da6c7ae0df..6ba8268b429 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -611,7 +611,6 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac *node.fee_estimator.sat_per_kw.lock().unwrap() = 2000; } - let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); let node_c_id = nodes[2].node.get_our_node_id(); From 791fcb51b3ba671e49bede363795383ce69b9635 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 12 May 2025 22:11:30 +0000 Subject: [PATCH 23/25] f re-add the payment_ in payment_secret, even where it wasn't --- lightning/src/ln/functional_tests.rs | 111 +++++++++++++++------------ 1 file changed, 60 insertions(+), 51 deletions(-) diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 6ba8268b429..8b93149508a 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -4104,31 +4104,34 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000); assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2); - let ds_dust_limit = { + let dust_limit_msat = { let per_peer_state_lock; let mut peer_state_lock; let chan = get_channel_ref!(nodes[3], nodes[2], per_peer_state_lock, peer_state_lock, chan_2_3.2); - chan.context().holder_dust_limit_satoshis + chan.context().holder_dust_limit_satoshis * 1000 }; // 0th HTLC (not added - smaller than dust limit + HTLC tx fee): let path_4: &[_] = &[&nodes[2], &nodes[3], &nodes[4]]; - let (_, hash_1, ..) = route_payment(&nodes[0], path_4, ds_dust_limit*1000); + let (_, hash_1, ..) = route_payment(&nodes[0], path_4, dust_limit_msat); // 1st HTLC (not added - smaller than dust limit + HTLC tx fee): - let (_, hash_2, ..) = route_payment(&nodes[0], path_4, ds_dust_limit*1000); - let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000); + let (_, hash_2, ..) = route_payment(&nodes[0], path_4, dust_limit_msat); + let (route_to_5, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], dust_limit_msat); // 2nd HTLC (not added - smaller than dust limit + HTLC tx fee): let path_5: &[&[_]] = &[&[&nodes[2], &nodes[3], &nodes[5]]]; - let secret = nodes[5].node.create_inbound_payment_for_hash(hash_1, None, 7200, None).unwrap(); - let route_2 = route.clone(); - send_along_route_with_secret(&nodes[1], route_2, path_5, ds_dust_limit*1000, hash_1, secret); + let payment_secret = + nodes[5].node.create_inbound_payment_for_hash(hash_1, None, 7200, None).unwrap(); + let route = route_to_5.clone(); + send_along_route_with_secret(&nodes[1], route, path_5, dust_limit_msat, hash_1, payment_secret); // 3rd HTLC (not added - smaller than dust limit + HTLC tx fee): - let secret = nodes[5].node.create_inbound_payment_for_hash(hash_2, None, 7200, None).unwrap(); - send_along_route_with_secret(&nodes[1], route, path_5, ds_dust_limit*1000, hash_2, secret); + let payment_secret = + nodes[5].node.create_inbound_payment_for_hash(hash_2, None, 7200, None).unwrap(); + let route = route_to_5; + send_along_route_with_secret(&nodes[1], route, path_5, dust_limit_msat, hash_2, payment_secret); // 4th HTLC: let (_, hash_3, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000); @@ -4138,28 +4141,32 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000); // 6th HTLC: - let secret = nodes[5].node.create_inbound_payment_for_hash(hash_3, None, 7200, None).unwrap(); - send_along_route_with_secret(&nodes[1], route.clone(), path_5, 1000000, hash_3, secret); + let payment_secret = + nodes[5].node.create_inbound_payment_for_hash(hash_3, None, 7200, None).unwrap(); + send_along_route_with_secret(&nodes[1], route.clone(), path_5, 1000000, hash_3, payment_secret); // 7th HTLC: - let secret = nodes[5].node.create_inbound_payment_for_hash(hash_4, None, 7200, None).unwrap(); - send_along_route_with_secret(&nodes[1], route, path_5, 1000000, hash_4, secret); + let payment_secret = + nodes[5].node.create_inbound_payment_for_hash(hash_4, None, 7200, None).unwrap(); + send_along_route_with_secret(&nodes[1], route, path_5, 1000000, hash_4, payment_secret); // 8th HTLC: let (_, hash_5, ..) = route_payment(&nodes[0], path_4, 1000000); // 9th HTLC (not added - smaller than dust limit + HTLC tx fee): - let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000); - let secret = nodes[5].node.create_inbound_payment_for_hash(hash_5, None, 7200, None).unwrap(); - send_along_route_with_secret(&nodes[1], route, path_5, ds_dust_limit*1000, hash_5, secret); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], dust_limit_msat); + let payment_secret = + nodes[5].node.create_inbound_payment_for_hash(hash_5, None, 7200, None).unwrap(); + send_along_route_with_secret(&nodes[1], route, path_5, dust_limit_msat, hash_5, payment_secret); // 10th HTLC (not added - smaller than dust limit + HTLC tx fee): - let (_, hash_6, ..) = route_payment(&nodes[0], path_4, ds_dust_limit*1000); + let (_, hash_6, ..) = route_payment(&nodes[0], path_4, dust_limit_msat); // 11th HTLC: let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000); - let secret = nodes[5].node.create_inbound_payment_for_hash(hash_6, None, 7200, None).unwrap(); - send_along_route_with_secret(&nodes[1], route, path_5, 1000000, hash_6, secret); + let payment_secret = + nodes[5].node.create_inbound_payment_for_hash(hash_6, None, 7200, None).unwrap(); + send_along_route_with_secret(&nodes[1], route, path_5, 1000000, hash_6, payment_secret); // Double-check that six of the new HTLC were added // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie, @@ -6683,7 +6690,8 @@ pub fn test_onion_value_mpp_set_calculation() { let total_msat = 100_000; let expected_paths: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; - let (mut route, hash, preimage, secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], total_msat); + let (mut route, hash, preimage, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[3], total_msat); let sample_path = route.paths.pop().unwrap(); let mut path_1 = sample_path.clone(); @@ -6704,7 +6712,7 @@ pub fn test_onion_value_mpp_set_calculation() { // Send payment let id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); - let onion = RecipientOnionFields::secret_only(secret); + let onion = RecipientOnionFields::secret_only(payment_secret); let onion_session_privs = nodes[0] .node .test_add_new_pending_payment(hash, onion.clone(), id, &route) @@ -6732,7 +6740,7 @@ pub fn test_onion_value_mpp_set_calculation() { let height = nodes[0].best_block_info().1; let session_priv = SecretKey::from_slice(&session_priv).unwrap(); let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv); - let recipient_onion_fields = RecipientOnionFields::secret_only(secret); + let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret); let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads(&route.paths[0], 100_000, &recipient_onion_fields, height + 1, &None, None, None).unwrap(); // Edit amt_to_forward to simulate the sender having set @@ -6769,7 +6777,8 @@ pub fn test_onion_value_mpp_set_calculation() { // Second path let ev = remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events); - pass_along_path(&nodes[0], expected_paths[1], 101_000, hash, Some(secret), ev, true, None); + let payment_secret = Some(payment_secret); + pass_along_path(&nodes[0], expected_paths[1], 101_000, hash, payment_secret, ev, true, None); claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[0], expected_paths, preimage) @@ -6808,7 +6817,7 @@ fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64){ // Create a route for each amount let example_amount = 100000; - let (mut route, hash, preimage, secret) = get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount); + let (mut route, hash, preimage, payment_secret) = get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount); let sample_path = route.paths.pop().unwrap(); for i in 0..routing_node_count { let routing_node = 2 + i; @@ -6823,12 +6832,12 @@ fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64){ // Send payment with manually set total_msat let id = PaymentId(nodes[src_idx].keys_manager.backing.get_secure_random_bytes()); - let onion = RecipientOnionFields::secret_only(secret); + let onion = RecipientOnionFields::secret_only(payment_secret); let onion_session_privs = nodes[src_idx] .node .test_add_new_pending_payment(hash, onion, id, &route) .unwrap(); - let onion = RecipientOnionFields::secret_only(secret); + let onion = RecipientOnionFields::secret_only(payment_secret); let amt = Some(total_msat); nodes[src_idx] .node @@ -6845,7 +6854,7 @@ fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64){ let current_path_amount = msat_amounts[path_idx]; amount_received += current_path_amount; let became_claimable_now = amount_received >= total_msat && amount_received - current_path_amount < total_msat; - pass_along_path(&nodes[src_idx], expected_path, amount_received, hash.clone(), Some(secret), ev, became_claimable_now, None); + pass_along_path(&nodes[src_idx], expected_path, amount_received, hash.clone(), Some(payment_secret), ev, became_claimable_now, None); } claim_payment_along_route( @@ -8355,7 +8364,7 @@ pub fn test_inconsistent_mpp_params() { core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } }); - let (preimage, hash, secret) = get_payment_preimage_hash!(&nodes[3]); + let (preimage, hash, payment_secret) = get_payment_preimage_hash!(&nodes[3]); let cur_height = nodes[0].best_block_info().1; let id = PaymentId([42; 32]); @@ -8365,10 +8374,10 @@ pub fn test_inconsistent_mpp_params() { // ultimately have, just not right away. let mut dup_route = route.clone(); dup_route.paths.push(route.paths[1].clone()); - let onion = RecipientOnionFields::secret_only(secret); + let onion = RecipientOnionFields::secret_only(payment_secret); nodes[0].node.test_add_new_pending_payment(hash, onion, id, &dup_route).unwrap() }; - let onion = RecipientOnionFields::secret_only(secret); + let onion = RecipientOnionFields::secret_only(payment_secret); let path_a = &route.paths[0]; let real_amt = 15_000_000; let priv_a = session_privs[0]; @@ -8382,11 +8391,11 @@ pub fn test_inconsistent_mpp_params() { assert_eq!(events.len(), 1); let path_a = &[&nodes[1], &nodes[3]]; let event = events.pop().unwrap(); - pass_along_path(&nodes[0], path_a, real_amt, hash, Some(secret), event, false, None); + pass_along_path(&nodes[0], path_a, real_amt, hash, Some(payment_secret), event, false, None); assert!(nodes[3].node.get_and_clear_pending_events().is_empty()); let path_b = &route.paths[1]; - let onion = RecipientOnionFields::secret_only(secret); + let onion = RecipientOnionFields::secret_only(payment_secret); let amt_b = 14_000_000; let priv_b = session_privs[1]; nodes[0] @@ -8440,7 +8449,7 @@ pub fn test_inconsistent_mpp_params() { let conditions = PaymentFailedConditions::new().mpp_parts_remain(); expect_payment_failed_conditions(&nodes[0], hash, true, conditions); - let onion = RecipientOnionFields::secret_only(secret); + let onion = RecipientOnionFields::secret_only(payment_secret); let path_b = &route.paths[1]; let priv_c = session_privs[2]; nodes[0] @@ -8453,7 +8462,7 @@ pub fn test_inconsistent_mpp_params() { assert_eq!(events.len(), 1); let event = events.pop().unwrap(); let path_b = &[&nodes[2], &nodes[3]]; - pass_along_path(&nodes[0], path_b, real_amt, hash, Some(secret), event, true, None); + pass_along_path(&nodes[0], path_b, real_amt, hash, Some(payment_secret), event, true, None); do_claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[0], &[path_a, path_b], preimage) @@ -8479,7 +8488,7 @@ pub fn test_double_partial_claim() { create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0); - let (mut route, payment_hash, payment_preimage, secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000); + let (mut route, hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000); assert_eq!(route.paths.len(), 2); route.paths.sort_by(|path_a, _| { // Sort the path so that the path through nodes[1] comes first @@ -8488,7 +8497,7 @@ pub fn test_double_partial_claim() { }); let paths: &[&[_]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; - send_along_route_with_secret(&nodes[0], route.clone(), paths, 15_000_000, payment_hash, secret); + send_along_route_with_secret(&nodes[0], route.clone(), paths, 15_000_000, hash, payment_secret); // nodes[3] has now received a PaymentClaimable event...which it will take some (exorbitant) // amount of time to respond to. @@ -8497,25 +8506,25 @@ pub fn test_double_partial_claim() { connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later let failed_destinations = vec![ - HTLCHandlingFailureType::Receive { payment_hash }, - HTLCHandlingFailureType::Receive { payment_hash }, + HTLCHandlingFailureType::Receive { payment_hash: hash }, + HTLCHandlingFailureType::Receive { payment_hash: hash }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations); let reason = PaymentFailureReason::RecipientRejected; - pass_failed_payment_back(&nodes[0], paths, false, payment_hash, reason); + pass_failed_payment_back(&nodes[0], paths, false, hash, reason); // nodes[1] now retries one of the two paths... - let onion = RecipientOnionFields::secret_only(secret); - let id = PaymentId(payment_hash.0); - nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 2); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); let msgs = remove_first_msg_event_to_node(&node_b_id, &mut events); let path = &[&nodes[1], &nodes[3]]; - pass_along_path(&nodes[0], path, 15_000_000, payment_hash, Some(secret), msgs, false, None); + pass_along_path(&nodes[0], path, 15_000_000, hash, Some(payment_secret), msgs, false, None); // At this point nodes[3] has received one half of the payment, and the user goes to handle // that PaymentClaimable event they got hours ago and never handled...we should refuse to claim. @@ -9263,20 +9272,20 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash let payment_parameters = PaymentParameters::from_node_id(node_b_id, final_cltv_expiry_delta as u32); - let (hash, payment_preimage, secret) = if use_user_hash { - let (payment_preimage, hash, secret) = + let (hash, payment_preimage, payment_secret) = if use_user_hash { + let (payment_preimage, hash, payment_secret) = get_payment_preimage_hash!(nodes[1], Some(recv_value), Some(min_cltv_expiry_delta)); - (hash, payment_preimage, secret) + (hash, payment_preimage, payment_secret) } else { - let (hash, secret) = + let (hash, payment_secret) = nodes[1] .node .create_inbound_payment(Some(recv_value), 7200, Some(min_cltv_expiry_delta)) .unwrap(); - (hash, nodes[1].node.get_payment_preimage(hash, secret).unwrap(), secret) + (hash, nodes[1].node.get_payment_preimage(hash, payment_secret).unwrap(), payment_secret) }; let route = get_route!(nodes[0], payment_parameters, recv_value).unwrap(); - let onion = RecipientOnionFields::secret_only(secret); + let onion = RecipientOnionFields::secret_only(payment_secret); nodes[0].node.send_payment_with_route(route, hash, onion, PaymentId(hash.0)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -9288,7 +9297,7 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash if valid_delta { let preimage = if use_user_hash { None } else { Some(payment_preimage) }; - expect_payment_claimable!(nodes[1], hash, secret, recv_value, preimage, node_b_id); + expect_payment_claimable!(nodes[1], hash, payment_secret, recv_value, preimage, node_b_id); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); } else { From 398b28080005328ed4574631fb148a3644b9af95 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 28 Apr 2025 14:11:30 +0000 Subject: [PATCH 24/25] De-macro `check_added_monitors` in `functional_tests.rs` --- lightning/src/ln/functional_tests.rs | 498 +++++++++++++-------------- 1 file changed, 249 insertions(+), 249 deletions(-) diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 8b93149508a..4e305ab5a51 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -227,7 +227,7 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { if steps & 0x0f == 3 { return; } nodes[0].node.funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); let channel_id = ChannelId::v1_from_funding_txid( @@ -498,7 +498,7 @@ pub fn test_duplicate_htlc_different_direction_onchain() { // Provide preimage to node 0 by claiming payment nodes[0].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[0], payment_hash, payment_value_msats); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Broadcast node 1 commitment txn let remote_txn = get_local_commitment_txn!(nodes[1], chan_1.2); @@ -515,7 +515,7 @@ pub fn test_duplicate_htlc_different_direction_onchain() { assert_eq!(has_both_htlcs, 2); mine_transaction(&nodes[0], &remote_txn[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires @@ -649,7 +649,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac node_id: Some(node_c_id), channel_id: chan_2.2 }]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let htlc_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates; @@ -676,7 +676,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac PostFailBackAction::ClaimOnChain => { nodes[2].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); get_htlc_update_msgs(&nodes[2], &node_b_id); connect_blocks(&nodes[2], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2); @@ -684,7 +684,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac check_closed_broadcast!(nodes[2], true); let reason = ClosureReason::HTLCsTimedOut; check_closed_event(&nodes[2], 1, reason, false, &[node_b_id], 100_000); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); mine_transaction(&nodes[1], &node_2_txn[0]); // Commitment mine_transaction(&nodes[1], &node_2_txn[1]); // HTLC success @@ -695,7 +695,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac nodes[2].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash }]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let commitment_update = get_htlc_update_msgs(&nodes[2], &node_b_id); let update_fail = commitment_update.update_fail_htlcs[0].clone(); @@ -707,7 +707,7 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac PostFailBackAction::ClaimOffChain => { nodes[2].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let commitment_update = get_htlc_update_msgs(&nodes[2], &node_b_id); let update_fulfill = commitment_update.update_fulfill_htlcs[0].clone(); @@ -756,7 +756,7 @@ pub fn channel_monitor_network_test() { // Simple case with no pending HTLCs: let err = "Channel force-closed".to_string(); nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &node_a_id, err).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_broadcast!(nodes[1], true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); @@ -769,7 +769,7 @@ pub fn channel_monitor_network_test() { } mine_transaction(&nodes[0], &node_txn[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); test_txn_broadcast(&nodes[0], &chan_1, Some(node_txn[0].clone()), HTLCType::NONE); } check_closed_broadcast!(nodes[0], true); @@ -785,13 +785,13 @@ pub fn channel_monitor_network_test() { let error_message = "Channel force-closed"; nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &node_c_id, error_message.to_string()).unwrap(); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); { let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE); connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1); test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT); mine_transaction(&nodes[2], &node_txn[0]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); test_txn_broadcast(&nodes[2], &chan_2, Some(node_txn[0].clone()), HTLCType::NONE); } check_closed_broadcast!(nodes[2], true); @@ -806,7 +806,7 @@ pub fn channel_monitor_network_test() { { $node.node.claim_funds($preimage); expect_payment_claimed!($node, $payment_hash, 3_000_000); - check_added_monitors!($node, 1); + check_added_monitors(&$node, 1); let events = $node.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -826,7 +826,7 @@ pub fn channel_monitor_network_test() { // HTLC-Timeout and a nodes[3] claim against it (+ its own announces) let err = "Channel force-closed".to_string(); nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &node_d_id, err).unwrap(); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); check_closed_broadcast!(nodes[2], true); let node2_commitment_txid; { @@ -838,7 +838,7 @@ pub fn channel_monitor_network_test() { // Claim the payment on nodes[3], giving it knowledge of the preimage claim_funds!(nodes[3], nodes[2], payment_preimage_1, payment_hash_1); mine_transaction(&nodes[3], &node_txn[0]); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); check_preimage_claim(&nodes[3], &node_txn); } check_closed_broadcast!(nodes[3], true); @@ -873,7 +873,7 @@ pub fn channel_monitor_network_test() { }, _ => panic!("Unexpected event"), } - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer. { @@ -905,7 +905,7 @@ pub fn channel_monitor_network_test() { }, _ => panic!("Unexpected event"), } - check_added_monitors!(nodes[4], 1); + check_added_monitors(&nodes[4], 1); test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS); check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [node_d_id], 100000); @@ -977,7 +977,7 @@ pub fn test_justice_tx_htlc_timeout() { assert_ne!(node_txn[0].input[0].previous_output, node_txn[0].input[1].previous_output); node_txn.clear(); } - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); test_txn_broadcast(&nodes[1], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::NONE); @@ -986,7 +986,7 @@ pub fn test_justice_tx_htlc_timeout() { // Verify broadcast of revoked HTLC-timeout let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); // Broadcast revoked HTLC-timeout on node 1 mine_transaction(&nodes[1], &node_txn[1]); @@ -1044,13 +1044,13 @@ pub fn test_justice_tx_htlc_success() { check_spends!(node_txn[0], revoked_local_txn[0]); node_txn.swap_remove(0); } - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); test_txn_broadcast(&nodes[0], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::NONE); mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); mine_transaction(&nodes[0], &node_txn[1]); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone()); @@ -1083,7 +1083,7 @@ pub fn revoked_output_claim() { // Inform nodes[1] that nodes[0] broadcast a stale tx mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 1); // ChannelMonitor: justice tx against revoked to_local output @@ -1093,7 +1093,7 @@ pub fn revoked_output_claim() { // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan mine_transaction(&nodes[0], &revoked_local_txn[0]); get_announce_close_broadcast_events(&nodes, 0, 1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); } @@ -1142,12 +1142,12 @@ fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: mine_transactions(&nodes[1], &[revoked_commitment_tx, &justice_tx]); mine_transactions(&nodes[0], &[revoked_commitment_tx, &justice_tx]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100_000); get_announce_close_broadcast_events(&nodes, 1, 0); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100_000); @@ -1202,10 +1202,10 @@ pub fn claim_htlc_outputs() { { mine_transaction(&nodes[0], &revoked_local_txn[0]); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); @@ -1552,7 +1552,7 @@ pub fn test_htlc_on_chain_success() { expect_payment_claimed!(nodes[2], payment_hash_1, 3_000_000); nodes[2].node.claim_funds(our_payment_preimage_2); expect_payment_claimed!(nodes[2], payment_hash_2, 3_000_000); - check_added_monitors!(nodes[2], 2); + check_added_monitors(&nodes[2], 2); let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); @@ -1561,7 +1561,7 @@ pub fn test_htlc_on_chain_success() { mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 2 (2 * HTLC-Success tx) assert_eq!(node_txn.len(), 2); @@ -1684,7 +1684,7 @@ pub fn test_htlc_on_chain_success() { check_spends!(node_a_commitment_tx[0], chan_1.3); mine_transaction(&nodes[1], &node_a_commitment_tx[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert!(node_txn.len() == 1 || node_txn.len() == 2); // HTLC-Success, RBF bump of above aggregated HTLC txn @@ -1719,7 +1719,7 @@ pub fn test_htlc_on_chain_success() { connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, txn)); connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 5); let mut first_claimed = false; @@ -1778,9 +1778,9 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2); check_spends!(commitment_tx[0], chan_2.3); nodes[2].node.fail_htlc_backwards(&payment_hash); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash.clone() }]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1796,7 +1796,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { }; mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert_eq!(node_txn.len(), 0); @@ -1829,13 +1829,13 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { assert_eq!(nodes[1].tx_broadcaster.txn_broadcast().len(), 0); mine_transaction(&nodes[1], &timeout_tx); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_broadcast!(nodes[1], true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { @@ -1857,7 +1857,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { connect_blocks(&nodes[0], TEST_FINAL_CLTV + MIN_CLTV_EXPIRY_DELTA as u32); // Confirm blocks until the HTLC expires check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx assert_eq!(node_txn.len(), 1); @@ -1901,11 +1901,11 @@ pub fn test_simple_commitment_revoked_fail_backward() { mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_broadcast!(nodes[1], true); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { @@ -1978,7 +1978,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use nodes[2].node.fail_htlc_backwards(&first_payment_hash); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: first_payment_hash }]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -1992,7 +1992,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use nodes[2].node.fail_htlc_backwards(&second_payment_hash); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: second_payment_hash }]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -2001,15 +2001,15 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use assert!(updates.update_fee.is_none()); nodes[1].node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[0]); nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Note that nodes[1] is in AwaitingRAA, so won't send a CS let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &as_raa); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); nodes[2].node.fail_htlc_backwards(&third_payment_hash); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: third_payment_hash }]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); @@ -2020,11 +2020,11 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use // At this point first_payment_hash has dropped out of the latest two commitment // transactions that nodes[1] is tracking... nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &updates.commitment_signed); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &as_raa); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting // on nodes[2]'s RAA. @@ -2034,13 +2034,13 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use nodes[1].node.send_payment_with_route(route, fourth_payment_hash, onion, id).unwrap(); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); if deliver_bs_raa { nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_raa); // One monitor for the new revocation preimage, no second on as we won't generate a new // commitment transaction for nodes[0] until process_pending_htlc_forwards(). - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { @@ -2059,7 +2059,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); mine_transaction(&nodes[1], &revoked_local_txn[0]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let events = nodes[1].node.get_and_clear_pending_events(); @@ -2078,7 +2078,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use ))); nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 }); @@ -2212,7 +2212,7 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let payment_event = { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -2229,7 +2229,7 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { let onion = RecipientOnionFields::secret_only(failed_payment_secret); let id = PaymentId(failed_payment_hash.0); nodes[0].node.send_payment_with_route(route, failed_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -2282,7 +2282,7 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { _ => panic!("Unexpected event {:?}", events[1]), } check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } #[xtest(feature = "_externalize_tests")] @@ -2309,7 +2309,7 @@ pub fn test_htlc_ignore_latest_remote_commitment() { nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &node_b_id, error_message.to_string()).unwrap(); connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); @@ -2321,7 +2321,7 @@ pub fn test_htlc_ignore_latest_remote_commitment() { let block = create_dummy_block(nodes[1].best_block_hash(), 42, vec![node_txn[0].clone()]); connect_block(&nodes[1], &block); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); // Duplicate the connect_block call since this may happen due to other listeners @@ -2350,7 +2350,7 @@ pub fn test_force_close_fail_back() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2367,10 +2367,10 @@ pub fn test_force_close_fail_back() { payment_event = SendEvent::from_event(events_2.remove(0)); assert_eq!(payment_event.msgs.len(), 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let (_, _) = get_revoke_commit_msgs!(nodes[2], node_b_id); // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous @@ -2380,7 +2380,7 @@ pub fn test_force_close_fail_back() { let channel_id = payment_event.commitment_msg[0].channel_id; nodes[2].node.force_close_broadcasting_latest_txn(&channel_id, &node_b_id, error_message.to_string()).unwrap(); check_closed_broadcast!(nodes[2], true); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; check_closed_event!(nodes[2], 1, reason, [node_b_id], 100000); @@ -2397,7 +2397,7 @@ pub fn test_force_close_fail_back() { // Note no UpdateHTLCs event here from nodes[1] to nodes[0]! check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success.. @@ -2441,7 +2441,7 @@ pub fn test_dup_events_on_peer_disconnect() { nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], payment_hash, 1_000_000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let claim_msgs = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[0].node.handle_update_fulfill_htlc(node_b_id, &claim_msgs.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); @@ -2618,7 +2618,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken let onion = RecipientOnionFields::secret_only(payment_secret_1); let id = PaymentId(payment_hash_1.0); nodes[0].node.send_payment_with_route(route, payment_hash_1, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2632,24 +2632,24 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); if messages_delivered >= 3 { nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); if messages_delivered >= 4 { nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); if messages_delivered >= 5 { nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment_signed); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); if messages_delivered >= 6 { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } } } @@ -2742,7 +2742,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken } nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); let events_3 = nodes[1].node.get_and_clear_pending_msg_events(); @@ -2775,24 +2775,24 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken if messages_delivered >= 2 { nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], node_b_id); if messages_delivered >= 3 { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); if messages_delivered >= 4 { nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed); let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); if messages_delivered >= 5 { nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } } } @@ -2990,7 +2990,7 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events_1 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_1.len(), 1); @@ -3001,7 +3001,7 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { nodes[1].node.claim_funds(payment_preimage_1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); @@ -3028,7 +3028,7 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { nodes[0].node.handle_commitment_signed_batch_test(node_b_id, commitment_signed); let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); }, _ => panic!("Unexpected event"), } @@ -3071,7 +3071,7 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_resp.2.as_ref().unwrap().commitment_signed); let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_revoke_and_ack(node_a_id, as_resp.1.as_ref().unwrap()); let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -3080,7 +3080,7 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty()); assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty()); assert!(bs_second_commitment_signed.update_fee.is_none()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); let as_commitment_signed = get_htlc_update_msgs!(nodes[0], node_b_id); @@ -3089,21 +3089,21 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { assert!(as_commitment_signed.update_fail_htlcs.is_empty()); assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty()); assert!(as_commitment_signed.update_fee.is_none()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_commitment_signed.commitment_signed); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed.commitment_signed); let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); @@ -3125,7 +3125,7 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_payment_path_successful!(nodes[0]); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); @@ -3157,7 +3157,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash, RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); // Now do the relevant commitment_signed/RAA dances along the path, noting that the final @@ -3180,7 +3180,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(htlc_timeout_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_timeout_updates.update_fail_htlcs.len(), 1); @@ -3227,7 +3227,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { let id = PaymentId(first_payment_hash.0); nodes[1].node.send_payment_with_route(route, first_payment_hash, onion, id).unwrap(); assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Now attempt to route a second payment, which should be placed in the holding cell let sending_node = if forwarded_htlc { &nodes[0] } else { &nodes[1] }; @@ -3237,13 +3237,13 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { sending_node.node.send_payment_with_route(route, second_payment_hash, onion, id).unwrap(); if forwarded_htlc { - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); } - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -3254,7 +3254,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { let fail_type = HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let fail_commit = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(fail_commit.len(), 1); match fail_commit[0] { @@ -3321,7 +3321,7 @@ pub fn test_claim_sizeable_push_msat() { let err = "Channel force-closed".to_string(); nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &node_a_id, err).unwrap(); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); @@ -3357,7 +3357,7 @@ pub fn test_claim_on_remote_sizeable_push_msat() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000); nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &node_b_id, err).unwrap(); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); @@ -3368,7 +3368,7 @@ pub fn test_claim_on_remote_sizeable_push_msat() { mine_transaction(&nodes[1], &node_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); @@ -3398,7 +3398,7 @@ pub fn test_claim_on_remote_revoked_sizeable_push_msat() { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); @@ -3433,9 +3433,9 @@ pub fn test_static_spendable_outputs_preimage_tx() { // Settle A's commitment tx on B's chain nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], payment_hash, 3_000_000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); mine_transaction(&nodes[1], &commitment_tx[0]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); match events[0] { MessageSendEvent::UpdateHTLCs { .. } => {}, @@ -3484,7 +3484,7 @@ pub fn test_static_spendable_outputs_timeout_tx() { // Settle A's commitment tx on B' chain mine_transaction(&nodes[1], &commitment_tx[0]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); match events[1] { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, @@ -3534,7 +3534,7 @@ fn do_test_static_spendable_outputs_justice_tx_revoked_commitment_tx(split_tx: b mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); // If the HTLC expires in more than COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE blocks, we'll @@ -3589,7 +3589,7 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { // A will generate HTLC-Timeout from revoked commitment tx mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires @@ -3608,7 +3608,7 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { let txn = vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]; connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, txn)); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); // There will be 2 justice transactions: @@ -3666,7 +3666,7 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { // B will generate HTLC-Success from revoked commitment tx mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); @@ -3683,7 +3683,7 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { let txn = vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()]; connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, txn)); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); // There will be 2 justice transactions, one on the revoked HTLC output on the commitment @@ -3752,7 +3752,7 @@ pub fn test_onchain_to_onchain_claim() { check_spends!(commitment_tx[0], chan_2.3); nodes[2].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); @@ -3761,7 +3761,7 @@ pub fn test_onchain_to_onchain_claim() { mine_transaction(&nodes[2], &commitment_tx[0]); check_closed_broadcast!(nodes[2], true); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx) @@ -3774,7 +3774,7 @@ pub fn test_onchain_to_onchain_claim() { // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor let txn = vec![commitment_tx[0].clone(), c_txn[0].clone()]; connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, txn)); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { @@ -3793,7 +3793,7 @@ pub fn test_onchain_to_onchain_claim() { }, _ => panic!("Unexpected event"), } - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 3); let nodes_2_event = remove_first_msg_event_to_node(&node_c_id, &mut msg_events); @@ -3834,7 +3834,7 @@ pub fn test_onchain_to_onchain_claim() { assert_eq!(b_txn[0].lock_time.to_consensus_u32(), nodes[1].best_block_info().1); // Success tx check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } #[xtest(feature = "_externalize_tests")] @@ -3896,7 +3896,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { mine_transaction(&nodes[1], &commitment_txn[0]); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); // Confirm blocks until both HTLCs expire and get a transaction which times out one HTLC. @@ -3929,19 +3929,19 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { // Now give node E the payment preimage and pass it back to C. nodes[4].node.claim_funds(our_payment_preimage); expect_payment_claimed!(nodes[4], dup_payment_hash, 800_000); - check_added_monitors!(nodes[4], 1); + check_added_monitors(&nodes[4], 1); let updates = get_htlc_update_msgs!(nodes[4], node_c_id); nodes[2].node.handle_update_fulfill_htlc(node_e_id, &updates.update_fulfill_htlcs[0]); let _cs_updates = get_htlc_update_msgs!(nodes[2], node_b_id); expect_payment_forwarded!(nodes[2], nodes[1], nodes[4], Some(196), false, false); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); commitment_signed_dance!(nodes[2], nodes[4], &updates.commitment_signed, false); // Mine the commitment transaction on node C and get the HTLC success transactions it will // generate (note that the ChannelMonitor doesn't differentiate between HTLCs once it has the // preimage). mine_transaction(&nodes[2], &commitment_txn[0]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); check_closed_broadcast(&nodes[2], 1, true); @@ -3975,7 +3975,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { let first_htlc_id = htlc_updates.update_fail_htlcs[0].htlc_id; assert!(htlc_updates.update_fulfill_htlcs.is_empty()); assert!(htlc_updates.update_fail_malformed_htlcs.is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_update_fail_htlc(node_b_id, &htlc_updates.update_fail_htlcs[0]); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -3993,7 +3993,7 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { assert_eq!(updates.update_fulfill_htlcs.len(), 1); assert_ne!(updates.update_fulfill_htlcs[0].htlc_id, first_htlc_id); assert!(updates.update_fail_malformed_htlcs.is_empty()); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false); @@ -4021,10 +4021,10 @@ pub fn test_dynamic_spendable_outputs_local_htlc_success_tx() { // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], payment_hash, 9_000_000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); mine_transaction(&nodes[1], &local_txn[0]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let events = nodes[1].node.get_and_clear_pending_msg_events(); match events[0] { @@ -4180,7 +4180,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno nodes[4].node.fail_htlc_backwards(&hash_3); nodes[4].node.fail_htlc_backwards(&hash_5); nodes[4].node.fail_htlc_backwards(&hash_6); - check_added_monitors!(nodes[4], 0); + check_added_monitors(&nodes[4], 0); let failed_destinations = vec![ HTLCHandlingFailureType::Receive { payment_hash: hash_1 }, @@ -4189,7 +4189,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno HTLCHandlingFailureType::Receive { payment_hash: hash_6 }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations); - check_added_monitors!(nodes[4], 1); + check_added_monitors(&nodes[4], 1); let four_removes = get_htlc_update_msgs!(nodes[4], node_d_id); nodes[3].node.handle_update_fail_htlc(node_e_id, &four_removes.update_fail_htlcs[0]); @@ -4201,14 +4201,14 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno // Fail 3rd below-dust and 7th above-dust HTLCs nodes[5].node.fail_htlc_backwards(&hash_2); nodes[5].node.fail_htlc_backwards(&hash_4); - check_added_monitors!(nodes[5], 0); + check_added_monitors(&nodes[5], 0); let failed_destinations_2 = vec![ HTLCHandlingFailureType::Receive { payment_hash: hash_2 }, HTLCHandlingFailureType::Receive { payment_hash: hash_4 }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2); - check_added_monitors!(nodes[5], 1); + check_added_monitors(&nodes[5], 1); let two_removes = get_htlc_update_msgs!(nodes[5], node_d_id); nodes[3].node.handle_update_fail_htlc(node_f_id, &two_removes.update_fail_htlcs[0]); @@ -4227,7 +4227,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno HTLCHandlingFailureType::Forward { node_id: Some(node_f_id), channel_id: chan_3_5.2 }, ]; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); let six_removes = get_htlc_update_msgs!(nodes[3], node_c_id); nodes[2].node.handle_update_fail_htlc(node_d_id, &six_removes.update_fail_htlcs[0]); nodes[2].node.handle_update_fail_htlc(node_d_id, &six_removes.update_fail_htlcs[1]); @@ -4289,7 +4289,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations); } - check_added_monitors!(nodes[2], 3); + check_added_monitors(&nodes[2], 3); let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(cs_msgs.len(), 2); @@ -4428,7 +4428,7 @@ pub fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx mine_transaction(&nodes[0], &local_txn[0]); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires @@ -4518,7 +4518,7 @@ pub fn test_key_derivation_params() { mine_transaction(&nodes[0], &local_txn_1[0]); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); let htlc_timeout = { @@ -4598,7 +4598,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being // present in B's local commitment transaction, but none of A's commitment transactions. nodes[1].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, if use_dust { 50000 } else { 3_000_000 }); let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -4606,10 +4606,10 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_updates = get_revoke_commit_msgs!(nodes[0], node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_updates.0); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let starting_block = nodes[1].best_block_info(); let mut block = create_dummy_block(starting_block.0, 42, Vec::new()); @@ -4620,7 +4620,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { let htlc_type = if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS }; test_txn_broadcast(&nodes[1], &chan, None, htlc_type); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [node_a_id], 100000); } @@ -4638,7 +4638,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let _as_update = get_htlc_update_msgs!(nodes[0], node_b_id); @@ -4655,7 +4655,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { } test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [node_b_id], 100000); } @@ -4678,22 +4678,22 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value); nodes[1].node.fail_htlc_backwards(&our_payment_hash); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let as_updates = get_revoke_commit_msgs!(nodes[0], node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_updates.0); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_updates.1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); if check_revoke_no_close { nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } let starting_block = nodes[1].best_block_info(); @@ -4705,7 +4705,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no if !check_revoke_no_close { test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [node_b_id], 100000); } else { expect_payment_failed!(nodes[0], our_payment_hash, true); @@ -4848,7 +4848,7 @@ pub fn test_fail_holding_cell_htlc_upon_free() { *feerate_lock += 20; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -4880,9 +4880,9 @@ pub fn test_fail_holding_cell_htlc_upon_free() { // Flush the pending fee update. nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &as_revoke_and_ack); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Upon receipt of the RAA, there will be an attempt to resend the holding cell // HTLC, but now that the fee has been raised the payment will now fail, causing @@ -4933,7 +4933,7 @@ pub fn test_free_and_fail_holding_cell_htlcs() { *feerate_lock += 200; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -4973,10 +4973,10 @@ pub fn test_free_and_fail_holding_cell_htlcs() { // Flush the pending fee update. nodes[1].node.handle_commitment_signed_batch_test(node_a_id, commitment_signed); let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &revoke_and_ack); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs, // but now that the fee has been raised the second payment will now fail, causing us @@ -5015,7 +5015,7 @@ pub fn test_free_and_fail_holding_cell_htlcs() { _ => panic!("Unexpected event"), }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); let events = nodes[1].node.get_and_clear_pending_events(); @@ -5032,7 +5032,7 @@ pub fn test_free_and_fail_holding_cell_htlcs() { _ => panic!("Unexpected event"), } nodes[1].node.claim_funds(payment_preimage_1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, amt_1); let update_msgs = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -5070,7 +5070,7 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { *feerate_lock += 20; } nodes[1].node.timer_tick_occurred(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -5095,7 +5095,7 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -5103,7 +5103,7 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { SendEvent::from_event(events.remove(0)) }; nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -5113,10 +5113,10 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { // Flush the pending fee update. nodes[2].node.handle_commitment_signed_batch_test(node_b_id, commitment_signed); let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], node_b_id); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); nodes[1].node.handle_revoke_and_ack(node_c_id, &raa); nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &commitment_signed); - check_added_monitors!(nodes[1], 2); + check_added_monitors(&nodes[1], 2); // A final RAA message is generated to finalize the fee update. let events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -5130,7 +5130,7 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { }; nodes[2].node.handle_revoke_and_ack(node_b_id, &raa_msg); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process. @@ -5143,7 +5143,7 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { // In response, we call ChannelManager's process_pending_htlc_forwards nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // This causes the HTLC to be failed backwards. let fail_event = nodes[1].node.get_and_clear_pending_msg_events(); @@ -5165,10 +5165,10 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { // Complete the HTLC failure+removal process. let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], node_b_id); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_signed); - check_added_monitors!(nodes[1], 2); + check_added_monitors(&nodes[1], 2); let final_raa_event = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(final_raa_event.len(), 1); let raa = match &final_raa_event[0] { @@ -5177,7 +5177,7 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { }; nodes[0].node.handle_revoke_and_ack(node_b_id, &raa); expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_1_2.0.contents.short_channel_id, false); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } #[xtest(feature = "_externalize_tests")] @@ -5204,25 +5204,25 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); SendEvent::from_event(events.remove(0)) }; nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); payment_event = SendEvent::from_event(events_2.remove(0)); assert_eq!(payment_event.msgs.len(), 1); //Second Hop payment_event.msgs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); @@ -5246,7 +5246,7 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ nodes[1].node.handle_update_fail_malformed_htlc(node_c_id, &update_msg.0); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }]); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); @@ -5264,7 +5264,7 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ _ => panic!("Unexpected event"), }; - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); } #[xtest(feature = "_externalize_tests")] @@ -5288,21 +5288,21 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); SendEvent::from_node(&nodes[0]) }; nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); payment_event = SendEvent::from_node(&nodes[1]); assert_eq!(payment_event.msgs.len(), 1); // Second Hop payment_event.msgs[0].onion_routing_packet.version = 1; // Trigger an invalid_onion_version error nodes[2].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); - check_added_monitors!(nodes[2], 0); + check_added_monitors(&nodes[2], 0); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); @@ -5327,7 +5327,7 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { node_id: Some(node_c_id), channel_id: chan_2.2 }]); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); match events_4[0] { MessageSendEvent::UpdateHTLCs { ref updates, .. } => { @@ -5395,14 +5395,14 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { // Fail one HTLC to prune it in the will-be-latest-local commitment tx nodes[1].node.fail_htlc_backwards(&payment_hash_2); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_2 }]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let remove = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &remove.update_fail_htlcs[0]); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &remove.commitment_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Cache one local commitment tx as lastest let as_last_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2); @@ -5430,7 +5430,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { } check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); @@ -5508,7 +5508,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - ANTI_REORG_DELAY); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone()); assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); @@ -5521,7 +5521,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC mine_transaction(&nodes[0], &bs_commitment_tx[0]); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); @@ -5669,7 +5669,7 @@ pub fn test_check_htlc_underpaying() { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -5699,7 +5699,7 @@ pub fn test_check_htlc_underpaying() { }, _ => panic!("Unexpected event"), }; - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlc); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true); @@ -5839,7 +5839,7 @@ pub fn test_bump_penalty_txn_on_revoked_commitment() { // Actually revoke tx by claiming a HTLC claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); connect_block(&nodes[1], &create_dummy_block(header_114, 42, vec![revoked_txn[0].clone()])); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); macro_rules! check_broadcasted_txn { ($penalty_txids:ident, $fee_rates:ident) => { @@ -5944,7 +5944,7 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()])); check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 1000000); connect_blocks(&nodes[1], 50); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above) @@ -6060,7 +6060,7 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { node_txn.clear(); } check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); } #[xtest(feature = "_externalize_tests")] @@ -6096,7 +6096,7 @@ pub fn test_bump_penalty_txn_on_remote_commitment() { nodes[1].node.claim_funds(payment_preimage); expect_payment_claimed!(nodes[1], payment_hash, htlc_value_a_msats); mine_transaction(&nodes[1], &remote_txn[0]); - check_added_monitors!(nodes[1], 2); + check_added_monitors(&nodes[1], 2); connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires // depending on the block connection style, node 1 may have broadcast either 3 or 10 txs @@ -6229,7 +6229,7 @@ pub fn test_counterparty_raa_skip_no_crash() { }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack"); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string(), }; @@ -6267,7 +6267,7 @@ pub fn test_bump_txn_sanitize_tracking_maps() { mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 1000000); let penalty_txn = { let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -6313,11 +6313,11 @@ pub fn test_channel_conf_timeout() { // The inbound node should fail the channel after exactly 2016 blocks connect_blocks(&nodes[1], 2015); - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); connect_blocks(&nodes[1], 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::FundingTimedOut, [node_a_id], 1000000); let close_ev = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(close_ev.len(), 1); @@ -6507,16 +6507,16 @@ pub fn test_manually_accept_inbound_channel_request() { .node .unsafe_manual_funding_transaction_generated(temp_channel_id, node_b_id, funding_outpoint) .unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); nodes[1].node.handle_funding_created(node_a_id, &funding_created); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &node_a_id); let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); nodes[0].node.handle_funding_signed(node_b_id, &funding_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events = &nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match &events[0] { @@ -6722,7 +6722,7 @@ pub fn test_onion_value_mpp_set_calculation() { .node .test_send_payment_internal(&route, hash, onion, None, id, amt, onion_session_privs) .unwrap(); - check_added_monitors!(nodes[0], expected_paths.len()); + check_added_monitors(&nodes[0], expected_paths.len()); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), expected_paths.len()); @@ -6757,14 +6757,14 @@ pub fn test_onion_value_mpp_set_calculation() { } node.node.handle_update_add_htlc(prev_node.node.get_our_node_id(), &payment_event.msgs[0]); - check_added_monitors!(node, 0); + check_added_monitors(&node, 0); commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(node); if idx == 0 { let mut events_2 = node.node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); - check_added_monitors!(node, 1); + check_added_monitors(&node, 1); payment_event = SendEvent::from_event(events_2.remove(0)); assert_eq!(payment_event.msgs.len(), 1); } else { @@ -6843,7 +6843,7 @@ fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64){ .node .test_send_payment_internal(&route, hash, onion, None, id, amt, onion_session_privs) .unwrap(); - check_added_monitors!(nodes[src_idx], expected_paths.len()); + check_added_monitors(&nodes[src_idx], expected_paths.len()); let mut events = nodes[src_idx].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), expected_paths.len()); @@ -6919,7 +6919,7 @@ pub fn test_preimage_storage() { let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); @@ -6965,7 +6965,7 @@ pub fn test_bad_secret_hash() { // resulting events. macro_rules! handle_unknown_invalid_payment_data { ($payment_hash: expr) => { - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); @@ -6975,7 +6975,7 @@ pub fn test_bad_secret_hash() { // again to process the pending backwards-failure of the HTLC expect_pending_htlcs_forwardable!(nodes[1]); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive{ payment_hash: $payment_hash }]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // We should fail the payment back let mut events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -7065,7 +7065,7 @@ pub fn test_update_err_monitor_lockdown() { // Try to update ChannelMonitor nodes[1].node.claim_funds(preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash, 9_000_000); let updates = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -7087,7 +7087,7 @@ pub fn test_update_err_monitor_lockdown() { } } // Our local monitor is in-sync and hasn't processed yet timeout - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); } @@ -7176,7 +7176,7 @@ pub fn test_concurrent_monitor_claim() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(updates.update_add_htlcs.len(), 1); @@ -7199,7 +7199,7 @@ pub fn test_concurrent_monitor_claim() { } } // Our local monitor is in-sync and hasn't processed yet timeout - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST); @@ -7259,7 +7259,7 @@ pub fn test_pre_lockin_no_chan_closed_update() { let (temp_channel_id, tx, _) = create_funding_transaction(&nodes[0], &node_b_id, 100000, 42); nodes[0].node.funding_transaction_generated(temp_channel_id, node_b_id, tx.clone()).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }); @@ -7307,7 +7307,7 @@ pub fn test_htlc_no_detection() { // this test before #653 fix. chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1); check_closed_broadcast!(nodes[0], true); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); connect_blocks(&nodes[0], TEST_FINAL_CLTV); @@ -7382,7 +7382,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain .force_close_broadcasting_latest_txn(&chan_ab.2, &counterparty_node_id, err) .unwrap(); check_closed_broadcast!(nodes[force_closing_node], true); - check_added_monitors!(nodes[force_closing_node], 1); + check_added_monitors(&nodes[force_closing_node], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; check_closed_event!(nodes[force_closing_node], 1, reason, [counterparty_node_id], 100000); @@ -7394,7 +7394,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()])); if broadcast_alice { check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } @@ -7404,7 +7404,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain // Carol then claims the funds and sends an update_fulfill message to Bob, and they go through the // process of removing the HTLC from their commitment transactions. nodes[2].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); let carol_updates = get_htlc_update_msgs!(nodes[2], node_b_id); @@ -7432,7 +7432,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &carol_updates.commitment_signed); // One monitor update for the preimage to update the Bob<->Alice channel, one monitor update // Carol<->Bob's updated commitment transaction info. - check_added_monitors!(nodes[1], 2); + check_added_monitors(&nodes[1], 2); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); @@ -7452,9 +7452,9 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain }; nodes[2].node.handle_revoke_and_ack(node_b_id, &bob_revocation); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &bob_updates.commitment_signed); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -7466,7 +7466,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain _ => panic!("Unexpected event"), }; nodes[1].node.handle_revoke_and_ack(node_c_id, &carol_revocation); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // If this test requires the force-closed channel to not be on-chain until after the fulfill, // here's where we put said channel's commitment tx on-chain. @@ -7477,7 +7477,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain // If Bob was the one to force-close, he will have already passed these checks earlier. if broadcast_alice { check_closed_broadcast!(nodes[1], true); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } @@ -7618,7 +7618,7 @@ pub fn test_peer_funding_sidechannel() { nodes[0].node.handle_funding_created(node_b_id, &funding_created_msg); get_event_msg!(nodes[0], MessageSendEvent::SendFundingSigned, node_b_id); expect_channel_pending_event(&nodes[0], &node_b_id); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let res = nodes[0].node.funding_transaction_generated(temp_chan_id_ab, node_b_id, tx); let err_msg = format!("{:?}", res.unwrap_err()); @@ -7666,13 +7666,13 @@ pub fn test_duplicate_conflicting_funding_from_second_peer() { let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &node_a_id); nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); // At this point, the channel should be closed, after having generated one monitor write (the // watch_channel call which failed), but zero monitor updates. - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); get_err_msg(&nodes[0], &node_b_id); let reason = ClosureReason::ProcessingError { err: "Channel ID was a duplicate".to_owned() }; @@ -7772,7 +7772,7 @@ pub fn test_duplicate_chan_id() { let (temp_channel_id, tx, _) = create_funding_transaction(&nodes[0], &node_b_id, 100000, 42); nodes[0].node.funding_transaction_generated(temp_channel_id, node_b_id, tx.clone()).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); let channel_id = ChannelId::v1_from_funding_txid( @@ -7839,11 +7839,11 @@ pub fn test_duplicate_chan_id() { panic!("Unexpected Channel phase") }.unwrap() }; - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); nodes[1].node.handle_funding_created(node_a_id, &funding_created); // At this point we'll look up if the channel_id is present and immediately fail the channel // without trying to persist the `ChannelMonitor`. - check_added_monitors!(nodes[1], 0); + check_added_monitors(&nodes[1], 0); let reason = ClosureReason::ProcessingError { err: "Already had channel with the new channel_id".to_owned() @@ -7919,7 +7919,7 @@ pub fn test_error_chans_closed() { // Closing one channel doesn't impact others nodes[0].node.handle_error(node_b_id, &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() }); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); check_closed_broadcast!(nodes[0], false); let reason = @@ -7934,7 +7934,7 @@ pub fn test_error_chans_closed() { // A null channel ID should close all channels let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); nodes[0].node.handle_error(node_b_id, &msgs::ErrorMessage { channel_id: ChannelId::new_zero(), data: "ERR".to_owned() }); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); let reason = ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("ERR".to_string()) }; @@ -8003,11 +8003,11 @@ pub fn test_invalid_funding_tx() { nodes[0].node.funding_transaction_generated_unchecked(temporary_channel_id, node_b_id, tx.clone(), 0).unwrap(); nodes[1].node.handle_funding_created(node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id)); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &node_a_id); nodes[0].node.handle_funding_signed(node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id)); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_channel_pending_event(&nodes[0], &node_b_id); let events_1 = nodes[0].node.get_and_clear_pending_events(); @@ -8023,7 +8023,7 @@ pub fn test_invalid_funding_tx() { let reason = ClosureReason::ProcessingError { err: expected_err.to_string() }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] { @@ -8086,17 +8086,17 @@ pub fn test_coinbase_funding_tx() { let (channel_id, tx, _) = create_coinbase_funding_transaction(&nodes[0], &node_b_id, 100000, 42); nodes[0].node.funding_transaction_generated(channel_id, node_b_id, tx.clone()).unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); nodes[1].node.handle_funding_created(node_a_id, &funding_created); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &node_a_id); let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); nodes[0].node.handle_funding_signed(node_b_id, &funding_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_channel_pending_event(&nodes[0], &node_b_id); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); @@ -8160,7 +8160,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t check_closed_broadcast!(nodes[1], true); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; check_closed_event!(nodes[1], 1, reason, [node_c_id], 100000); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 1); @@ -8192,7 +8192,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t nodes[1].chain_monitor.chain_monitor.transactions_confirmed( &nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: channel_id }]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(updates.update_add_htlcs.is_empty()); @@ -8247,7 +8247,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route.clone(), our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -8262,7 +8262,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_secret.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -8288,7 +8288,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], expected_destinations); nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let fail_updates_1 = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2); @@ -8307,7 +8307,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); nodes[1].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let fail_updates_1 = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_updates_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false); @@ -8385,7 +8385,7 @@ pub fn test_inconsistent_mpp_params() { .node .test_send_payment_along_path(path_a, &hash, onion, real_amt, cur_height, id, &None, priv_a) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -8402,7 +8402,7 @@ pub fn test_inconsistent_mpp_params() { .node .test_send_payment_along_path(path_b, &hash, onion, amt_b, cur_height, id, &None, priv_b) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -8413,14 +8413,14 @@ pub fn test_inconsistent_mpp_params() { commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[2]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[3].node.handle_update_add_htlc(node_c_id, &payment_event.msgs[0]); - check_added_monitors!(nodes[3], 0); + check_added_monitors(&nodes[3], 0); commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true); // At this point, nodes[3] should notice the two HTLCs don't contain the same total payment @@ -8433,14 +8433,14 @@ pub fn test_inconsistent_mpp_params() { expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], [fail_type]); nodes[3].node.process_pending_htlc_forwards(); - check_added_monitors!(nodes[3], 1); + check_added_monitors(&nodes[3], 1); let fail_updates_1 = get_htlc_update_msgs!(nodes[3], node_c_id); nodes[2].node.handle_update_fail_htlc(node_d_id, &fail_updates_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 }]); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); let fail_updates_2 = get_htlc_update_msgs!(nodes[2], node_a_id); nodes[0].node.handle_update_fail_htlc(node_c_id, &fail_updates_2.update_fail_htlcs[0]); @@ -8456,7 +8456,7 @@ pub fn test_inconsistent_mpp_params() { .node .test_send_payment_along_path(path_b, &hash, onion, real_amt, cur_height, id, &None, priv_c) .unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -8518,7 +8518,7 @@ pub fn test_double_partial_claim() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(hash.0); nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 2); + check_added_monitors(&nodes[0], 2); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); @@ -8529,7 +8529,7 @@ pub fn test_double_partial_claim() { // At this point nodes[3] has received one half of the payment, and the user goes to handle // that PaymentClaimable event they got hours ago and never handled...we should refuse to claim. nodes[3].node.claim_funds(payment_preimage); - check_added_monitors!(nodes[3], 0); + check_added_monitors(&nodes[3], 0); assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty()); } @@ -8621,11 +8621,11 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e nodes[0].node.funding_transaction_generated(chan_id, node_b_id, tx.clone()).unwrap(); nodes[1].node.handle_funding_created(node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id)); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &node_a_id); nodes[0].node.handle_funding_signed(node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id)); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); expect_channel_pending_event(&nodes[0], &node_b_id); let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); @@ -8725,7 +8725,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -8763,7 +8763,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e *feerate_lock = *feerate_lock * 10; } nodes[0].node.timer_tick_occurred(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); nodes[0].logger.assert_log_contains("lightning::ln::channel", "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure", 1); } @@ -8893,7 +8893,7 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.remove(0)); @@ -8904,7 +8904,7 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our total dust exposure at {} over the limit {} on counterparty commitment tx", 2535000, 2530000), 1); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); // Clear the failed htlc let updates = get_htlc_update_msgs!(nodes[0], node_b_id); @@ -9058,7 +9058,7 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.remove(0)); @@ -9069,7 +9069,7 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) nodes[1].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our total dust exposure at {} over the limit {} on counterparty commitment tx", expected_dust_exposure_msat, expected_dust_exposure_msat - 1), 1); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); // Clear the failed htlc let updates = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -9287,7 +9287,7 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash let route = get_route!(nodes[0], payment_parameters, recv_value).unwrap(); let onion = RecipientOnionFields::secret_only(payment_secret); nodes[0].node.send_payment_with_route(route, hash, onion, PaymentId(hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let mut payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -9304,7 +9304,7 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash let fail_type = HTLCHandlingFailureType::Receive { payment_hash: hash }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [fail_type]); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); let fail_updates = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[0].node.handle_update_fail_htlc(node_b_id, &fail_updates.update_fail_htlcs[0]); @@ -9376,16 +9376,16 @@ pub fn test_disconnects_peer_awaiting_response_ticks() { // We'll start by performing a fee update with Alice (nodes[0]) on the channel. *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2; nodes[0].node.timer_tick_occurred(); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&&nodes[0], 1); let alice_fee_update = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_fee(node_a_id, alice_fee_update.update_fee.as_ref().unwrap()); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &alice_fee_update.commitment_signed); - check_added_monitors!(&nodes[1], 1); + check_added_monitors(&&nodes[1], 1); // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`. let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bob_revoke_and_ack); - check_added_monitors!(&nodes[0], 1); + check_added_monitors(&&nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bob_commitment_signed); check_added_monitors(&nodes[0], 1); @@ -9684,11 +9684,11 @@ fn do_test_multi_post_event_actions(do_reload: bool) { let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000); nodes[1].node.claim_funds(our_payment_preimage); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000); nodes[2].node.claim_funds(payment_preimage_2); - check_added_monitors!(nodes[2], 1); + check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000); for dest in &[1, 2] { @@ -10125,11 +10125,11 @@ pub fn test_manual_funding_abandon() { .node .unsafe_manual_funding_transaction_generated(temp_channel_id, node_b_id, funding_outpoint) .unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); nodes[1].node.handle_funding_created(node_a_id, &funding_created); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &node_a_id); let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); @@ -10173,16 +10173,16 @@ pub fn test_funding_signed_event() { .node .unsafe_manual_funding_transaction_generated(temp_channel_id, node_b_id, funding_outpoint) .unwrap(); - check_added_monitors!(nodes[0], 0); + check_added_monitors(&nodes[0], 0); let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); nodes[1].node.handle_funding_created(node_a_id, &funding_created); - check_added_monitors!(nodes[1], 1); + check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &node_a_id); let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); nodes[0].node.handle_funding_signed(node_b_id, &funding_signed); - check_added_monitors!(nodes[0], 1); + check_added_monitors(&nodes[0], 1); let events = &nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match &events[0] { From 726ec928b6098e7dc4933bf279b87956bc3b6250 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 28 Apr 2025 02:22:25 +0000 Subject: [PATCH 25/25] Run `rustfmt` on `functional_tests.rs` --- lightning/src/ln/functional_tests.rs | 3336 +++++++++++++++++++------- 1 file changed, 2413 insertions(+), 923 deletions(-) diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 4e305ab5a51..0c0bb0713cb 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -12,53 +12,76 @@ //! claim outputs on-chain. use crate::chain; -use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor; -use crate::chain::channelmonitor::{Balance, ChannelMonitorUpdateStep, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE}; +use crate::chain::channelmonitor::{ + Balance, ChannelMonitorUpdateStep, ANTI_REORG_DELAY, CLTV_CLAIM_BUFFER, + COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE, LATENCY_GRACE_PERIOD_BLOCKS, +}; use crate::chain::transaction::OutPoint; -use crate::ln::onion_utils::LocalHTLCFailureReason; -use crate::sign::{EntropySource, OutputSpender, SignerProvider}; +use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; use crate::events::bump_transaction::WalletSource; -use crate::events::{Event, FundingInfo, PathFailure, PaymentPurpose, ClosureReason, HTLCHandlingFailureType, PaymentFailureReason}; +use crate::events::{ + ClosureReason, Event, FundingInfo, HTLCHandlingFailureType, PathFailure, PaymentFailureReason, + PaymentPurpose, +}; +use crate::ln::chan_utils::{ + commitment_tx_base_weight, htlc_success_tx_weight, htlc_timeout_tx_weight, + COMMITMENT_TX_WEIGHT_PER_HTLC, OFFERED_HTLC_SCRIPT_WEIGHT, +}; +use crate::ln::channel::{ + get_holder_selected_channel_reserve_satoshis, Channel, ChannelError, InboundV1Channel, + OutboundV1Channel, COINBASE_MATURITY, DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, + MIN_CHAN_DUST_LIMIT_SATOSHIS, +}; +use crate::ln::channelmanager::{ + self, PaymentId, RAACommitmentOrder, RecipientOnionFields, BREAKDOWN_TIMEOUT, + DISABLE_GOSSIP_TICKS, ENABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA, +}; +use crate::ln::msgs; +use crate::ln::msgs::{ + AcceptChannel, BaseMessageHandler, ChannelMessageHandler, ErrorAction, MessageSendEvent, + RoutingMessageHandler, +}; +use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::ln::types::ChannelId; -use crate::types::payment::{PaymentSecret, PaymentHash}; -use crate::ln::channel::{get_holder_selected_channel_reserve_satoshis, Channel, InboundV1Channel, OutboundV1Channel, COINBASE_MATURITY}; -use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; -use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError, MIN_CHAN_DUST_LIMIT_SATOSHIS}; use crate::ln::{chan_utils, onion_utils}; -use crate::ln::chan_utils::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight}; use crate::routing::gossip::{NetworkGraph, NetworkUpdate}; -use crate::routing::router::{Path, PaymentParameters, Route, RouteHop, get_route, RouteParameters}; +use crate::routing::router::{ + get_route, Path, PaymentParameters, Route, RouteHop, RouteParameters, +}; +use crate::sign::{EntropySource, OutputSpender, SignerProvider}; use crate::types::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures}; -use crate::ln::msgs; -use crate::ln::msgs::{AcceptChannel, BaseMessageHandler, ChannelMessageHandler, RoutingMessageHandler, ErrorAction, MessageSendEvent}; -use crate::util::test_channel_signer::TestChannelSigner; -use crate::util::test_utils::{self, TestLogger, WatchtowerPersister}; +use crate::types::payment::{PaymentHash, PaymentSecret}; +use crate::util::config::{ + ChannelConfigOverrides, ChannelConfigUpdate, ChannelHandshakeConfigUpdate, MaxDustHTLCExposure, + UserConfig, +}; use crate::util::errors::APIError; -use crate::util::ser::{Writeable, ReadableArgs}; +use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::string::UntrustedString; -use crate::util::config::{ChannelConfigOverrides, ChannelHandshakeConfigUpdate, ChannelConfigUpdate, MaxDustHTLCExposure, UserConfig}; +use crate::util::test_channel_signer::TestChannelSigner; +use crate::util::test_utils::{self, TestLogger, WatchtowerPersister}; +use bitcoin::constants::ChainHash; use bitcoin::hash_types::BlockHash; use bitcoin::locktime::absolute::LockTime; -use bitcoin::script::{Builder, ScriptBuf}; -use bitcoin::opcodes; -use bitcoin::constants::ChainHash; use bitcoin::network::Network; -use bitcoin::{Amount, Sequence, Transaction, TxIn, TxOut, Witness}; -use bitcoin::OutPoint as BitcoinOutPoint; +use bitcoin::opcodes; +use bitcoin::script::{Builder, ScriptBuf}; use bitcoin::transaction::Version; +use bitcoin::OutPoint as BitcoinOutPoint; +use bitcoin::{Amount, Sequence, Transaction, TxIn, TxOut, Witness}; use bitcoin::secp256k1::Secp256k1; -use bitcoin::secp256k1::{PublicKey,SecretKey}; +use bitcoin::secp256k1::{PublicKey, SecretKey}; use crate::io; use crate::prelude::*; +use crate::sync::{Arc, Mutex, RwLock}; use alloc::collections::BTreeSet; -use core::iter::repeat; use bitcoin::hashes::Hash; -use crate::sync::{Arc, Mutex, RwLock}; +use core::iter::repeat; use lightning_macros::xtest; use crate::ln::functional_test_utils::*; @@ -90,7 +113,10 @@ fn test_channel_resumption_fail_post_funding() { nodes[0].node.funding_transaction_generated(temp_chan_id, node_b_id, tx).unwrap(); nodes[0].node.peer_disconnected(node_b_id); - check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(new_chan_id, true, ClosureReason::DisconnectedPeer)]); + check_closed_events( + &nodes[0], + &[ExpectedCloseEvent::from_id_reason(new_chan_id, true, ClosureReason::DisconnectedPeer)], + ); // After ddf75afd16 we'd panic on reconnection if we exchanged funding info, so test that // explicitly here. @@ -120,52 +146,114 @@ pub fn test_insane_channel_opens() { // Instantiate channel parameters where we push the maximum msats given our // funding satoshis let channel_value_sat = 31337; // same as funding satoshis - let channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg); + let channel_reserve_satoshis = + get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg); let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000; // Have node0 initiate a channel to node1 with aforementioned parameters nodes[0].node.create_channel(node_b_id, channel_value_sat, push_msat, 42, None, None).unwrap(); // Extract the channel open message from node0 to node1 - let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + let open_channel_message = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); // Test helper that asserts we get the correct error string given a mutator // that supposedly makes the channel open message insane - let insane_open_helper = |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| { - let open_channel_mutated = message_mutator(open_channel_message.clone()); - nodes[1].node.handle_open_channel(node_a_id, &open_channel_mutated); - let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), 1); - let expected_regex = regex::Regex::new(expected_error_str).unwrap(); - if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] { - match action { - &ErrorAction::SendErrorMessage { .. } => { - nodes[1].logger.assert_log_regex("lightning::ln::channelmanager", expected_regex, 1); - }, - _ => panic!("unexpected event!"), + let insane_open_helper = + |expected_error_str: &str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| { + let open_channel_mutated = message_mutator(open_channel_message.clone()); + nodes[1].node.handle_open_channel(node_a_id, &open_channel_mutated); + let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + let expected_regex = regex::Regex::new(expected_error_str).unwrap(); + if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] { + match action { + &ErrorAction::SendErrorMessage { .. } => { + nodes[1].logger.assert_log_regex( + "lightning::ln::channelmanager", + expected_regex, + 1, + ); + }, + _ => panic!("unexpected event!"), + } + } else { + assert!(false); } - } else { assert!(false); } - }; + }; use crate::ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT; // Test all mutations that would make the channel open message insane - insane_open_helper(format!("Per our config, funding must be at most {}. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; msg }); - insane_open_helper(format!("Funding must be smaller than the total bitcoin supply. It was {}", TOTAL_BITCOIN_SUPPLY_SATOSHIS).as_str(), |mut msg| { msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; msg }); + insane_open_helper( + format!( + "Per our config, funding must be at most {}. It was {}", + TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1, + TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2 + ) + .as_str(), + |mut msg| { + msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS + 2; + msg + }, + ); + insane_open_helper( + format!( + "Funding must be smaller than the total bitcoin supply. It was {}", + TOTAL_BITCOIN_SUPPLY_SATOSHIS + ) + .as_str(), + |mut msg| { + msg.common_fields.funding_satoshis = TOTAL_BITCOIN_SUPPLY_SATOSHIS; + msg + }, + ); - insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.common_fields.funding_satoshis + 1; msg }); + insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { + msg.channel_reserve_satoshis = msg.common_fields.funding_satoshis + 1; + msg + }); - insane_open_helper(r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", |mut msg| { msg.push_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg }); + insane_open_helper( + r"push_msat \d+ was larger than channel amount minus reserve \(\d+\)", + |mut msg| { + msg.push_msat = + (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; + msg + }, + ); - insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.common_fields.dust_limit_satoshis = msg.common_fields.funding_satoshis + 1 ; msg }); + insane_open_helper("Peer never wants payout outputs?", |mut msg| { + msg.common_fields.dust_limit_satoshis = msg.common_fields.funding_satoshis + 1; + msg + }); - insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.common_fields.htlc_minimum_msat = (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg }); + insane_open_helper( + r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", + |mut msg| { + msg.common_fields.htlc_minimum_msat = + (msg.common_fields.funding_satoshis - msg.channel_reserve_satoshis) * 1000; + msg + }, + ); - insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.common_fields.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg }); + insane_open_helper( + "They wanted our payments to be delayed by a needlessly long period", + |mut msg| { + msg.common_fields.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; + msg + }, + ); - insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.common_fields.max_accepted_htlcs = 0; msg }); + insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { + msg.common_fields.max_accepted_htlcs = 0; + msg + }); - insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.common_fields.max_accepted_htlcs = 484; msg }); + insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { + msg.common_fields.max_accepted_htlcs = 484; + msg + }); } #[xtest(feature = "_externalize_tests")] @@ -183,12 +271,23 @@ pub fn test_funding_exceeds_no_wumbo_limit() { let node_b_id = nodes[1].node.get_our_node_id(); - match nodes[0].node.create_channel(node_b_id, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, 0, 42, None, None) { + match nodes[0].node.create_channel( + node_b_id, + MAX_FUNDING_SATOSHIS_NO_WUMBO + 1, + 0, + 42, + None, + None, + ) { Err(APIError::APIMisuseError { err }) => { - let exp_err = format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, MAX_FUNDING_SATOSHIS_NO_WUMBO + 1); + let exp_err = format!( + "funding_value must not exceed {}, it was {}", + MAX_FUNDING_SATOSHIS_NO_WUMBO, + MAX_FUNDING_SATOSHIS_NO_WUMBO + 1 + ); assert_eq!(err, exp_err); }, - _ => panic!() + _ => panic!(), } } @@ -206,35 +305,50 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - if steps & 0b1000_0000 != 0{ + if steps & 0b1000_0000 != 0 { let block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new()); connect_block(&nodes[0], &block); connect_block(&nodes[1], &block); } - if steps & 0x0f == 0 { return; } + if steps & 0x0f == 0 { + return; + } nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); - if steps & 0x0f == 1 { return; } + if steps & 0x0f == 1 { + return; + } nodes[1].node.handle_open_channel(node_a_id, &open_channel); let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); - if steps & 0x0f == 2 { return; } + if steps & 0x0f == 2 { + return; + } nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); - let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &node_b_id, 100000, 42); + let (temporary_channel_id, tx, _) = + create_funding_transaction(&nodes[0], &node_b_id, 100000, 42); - if steps & 0x0f == 3 { return; } - nodes[0].node.funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()).unwrap(); + if steps & 0x0f == 3 { + return; + } + nodes[0] + .node + .funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()) + .unwrap(); check_added_monitors(&nodes[0], 0); let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); let channel_id = ChannelId::v1_from_funding_txid( - funding_created.funding_txid.as_byte_array(), funding_created.funding_output_index + funding_created.funding_txid.as_byte_array(), + funding_created.funding_output_index, ); - if steps & 0x0f == 4 { return; } + if steps & 0x0f == 4 { + return; + } nodes[1].node.handle_funding_created(node_a_id, &funding_created); { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); @@ -246,7 +360,9 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); - if steps & 0x0f == 5 { return; } + if steps & 0x0f == 5 { + return; + } nodes[0].node.handle_funding_signed(node_b_id, &funding_signed); { let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap(); @@ -259,10 +375,14 @@ fn do_test_sanity_on_in_flight_opens(steps: u8) { let events_4 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_4.len(), 0); - if steps & 0x0f == 6 { return; } + if steps & 0x0f == 6 { + return; + } create_chan_between_nodes_with_value_confirm_first(&nodes[0], &nodes[1], &tx, 2); - if steps & 0x0f == 7 { return; } + if steps & 0x0f == 7 { + return; + } confirm_transaction_at(&nodes[0], &tx, 2); connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH); create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]); @@ -365,13 +485,18 @@ pub fn fake_network_test() { cltv_expiry_delta: TEST_FINAL_CLTV, maybe_announced_channel: true, }); - hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000; - hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000; - let payment_params = PaymentParameters::from_node_id( - node_b_id, TEST_FINAL_CLTV - ).with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000; + hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000; + let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, 1000000); - let route = Route { paths: vec![Path { hops, blinded_tail: None }], route_params: Some(route_params.clone()) }; + let route = Route { + paths: vec![Path { hops, blinded_tail: None }], + route_params: Some(route_params.clone()), + }; let path: &[_] = &[&nodes[2], &nodes[3], &nodes[1]]; let payment_preimage_1 = send_along_route(&nodes[1], route, path, 1000000).0; @@ -403,9 +528,12 @@ pub fn fake_network_test() { cltv_expiry_delta: TEST_FINAL_CLTV, maybe_announced_channel: true, }); - hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000; - hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000; - let route = Route { paths: vec![Path { hops, blinded_tail: None }], route_params: Some(route_params) }; + hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000; + hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000; + let route = + Route { paths: vec![Path { hops, blinded_tail: None }], route_params: Some(route_params) }; let path: &[_] = &[&nodes[3], &nodes[2], &nodes[1]]; let payment_hash_2 = send_along_route(&nodes[1], route, path, 1000000).1; @@ -455,7 +583,8 @@ pub fn duplicate_htlc_test() { create_announced_chan_between_nodes(&nodes, 3, 4); create_announced_chan_between_nodes(&nodes, 3, 5); - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[3], &nodes[4]], 1000000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[3], &nodes[4]], 1000000); *nodes[0].network_payment_count.borrow_mut() -= 1; assert_eq!(route_payment(&nodes[1], &[&nodes[3]], 1000000).0, payment_preimage); @@ -492,8 +621,16 @@ pub fn test_duplicate_htlc_different_direction_onchain() { let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 900_000); let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_value_msats); - let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap(); - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], payment_value_msats, payment_hash, node_a_payment_secret); + let node_a_payment_secret = + nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap(); + send_along_route_with_secret( + &nodes[1], + route, + &[&[&nodes[0]]], + payment_value_msats, + payment_hash, + node_a_payment_secret, + ); // Provide preimage to node 0 by claiming payment nodes[0].node.claim_funds(payment_preimage); @@ -526,31 +663,59 @@ pub fn test_duplicate_htlc_different_direction_onchain() { check_spends!(claim_txn[1], remote_txn[0]); check_spends!(claim_txn[2], remote_txn[0]); let preimage_tx = &claim_txn[0]; - let timeout_tx = claim_txn.iter().skip(1).find(|t| t.input[0].previous_output != preimage_tx.input[0].previous_output).unwrap(); - let preimage_bump_tx = claim_txn.iter().skip(1).find(|t| t.input[0].previous_output == preimage_tx.input[0].previous_output).unwrap(); + let timeout_tx = claim_txn + .iter() + .skip(1) + .find(|t| t.input[0].previous_output != preimage_tx.input[0].previous_output) + .unwrap(); + let preimage_bump_tx = claim_txn + .iter() + .skip(1) + .find(|t| t.input[0].previous_output == preimage_tx.input[0].previous_output) + .unwrap(); assert_eq!(preimage_tx.input.len(), 1); assert_eq!(preimage_bump_tx.input.len(), 1); assert_eq!(preimage_tx.input.len(), 1); assert_eq!(preimage_tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx - assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value.to_sat(), payment_value_sats); + assert_eq!( + remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value.to_sat(), + payment_value_sats + ); assert_eq!(timeout_tx.input.len(), 1); assert_eq!(timeout_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx check_spends!(timeout_tx, remote_txn[0]); - assert_eq!(remote_txn[0].output[timeout_tx.input[0].previous_output.vout as usize].value.to_sat(), 900); + assert_eq!( + remote_txn[0].output[timeout_tx.input[0].previous_output.vout as usize].value.to_sat(), + 900 + ); let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 3); for e in events { match e { MessageSendEvent::BroadcastChannelUpdate { .. } => {}, - MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::DisconnectPeer { ref msg } } => { + MessageSendEvent::HandleError { + node_id, + action: msgs::ErrorAction::DisconnectPeer { ref msg }, + } => { assert_eq!(node_id, node_b_id); assert_eq!(msg.as_ref().unwrap().data, "Channel closed because commitment or closing transaction was confirmed on chain."); }, - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + .. + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); @@ -577,11 +742,11 @@ pub fn test_inbound_outbound_capacity_is_not_zero() { assert_eq!(channels1.len(), 1); let reserve = get_holder_selected_channel_reserve_satoshis(100_000, &default_config); - assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000); - assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000); + assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve * 1000); + assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve * 1000); - assert_eq!(channels0[0].outbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000); - assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve*1000); + assert_eq!(channels0[0].outbound_capacity_msat, 100000 * 1000 - 95000000 - reserve * 1000); + assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000 - reserve * 1000); } enum PostFailBackAction { @@ -619,11 +784,12 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); // Start every node on the same block height to make reasoning about timeouts easier - connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); - connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); - connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + connect_blocks(&nodes[0], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); // Force close the B<->C channel by timing out the HTLC let timeout_blocks = TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1; @@ -644,11 +810,10 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac connect_blocks(&nodes[0], timeout_blocks + upstream_timeout_blocks); // Check that nodes[1] fails the HTLC upstream - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCHandlingFailureType::Forward { - node_id: Some(node_c_id), - channel_id: chan_2.2 - }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] + ); check_added_monitors(&nodes[1], 1); let htlc_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates; @@ -666,11 +831,13 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac mine_transaction(&nodes[1], &node_1_txn[1]); // HTLC timeout connect_blocks(&nodes[1], ANTI_REORG_DELAY); // Expect handling another fail back event, but the HTLC is already gone - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 - }]); + }] + ); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); }, PostFailBackAction::ClaimOnChain => { @@ -693,8 +860,10 @@ fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBac }, PostFailBackAction::FailOffChain => { nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], - vec![HTLCHandlingFailureType::Receive { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[2], + vec![HTLCHandlingFailureType::Receive { payment_hash }] + ); check_added_monitors(&nodes[2], 1); let commitment_update = get_htlc_update_msgs(&nodes[2], &node_b_id); let update_fail = commitment_update.update_fail_htlcs[0].clone(); @@ -741,11 +910,11 @@ pub fn channel_monitor_network_test() { let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4); // Make sure all nodes are at the same starting height - connect_blocks(&nodes[0], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); - connect_blocks(&nodes[1], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); - connect_blocks(&nodes[2], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); - connect_blocks(&nodes[3], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1); - connect_blocks(&nodes[4], 4*CHAN_CONFIRM_DEPTH + 1 - nodes[4].best_block_info().1); + connect_blocks(&nodes[0], 4 * CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 4 * CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 4 * CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + connect_blocks(&nodes[3], 4 * CHAN_CONFIRM_DEPTH + 1 - nodes[3].best_block_info().1); + connect_blocks(&nodes[4], 4 * CHAN_CONFIRM_DEPTH + 1 - nodes[4].best_block_info().1); // Rebalance the network a bit by relaying one payment through all the channels... send_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3], &nodes[4]], 8000000); @@ -778,17 +947,24 @@ pub fn channel_monitor_network_test() { check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); // One pending HTLC is discarded by the force-close: - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000); + let (payment_preimage_1, payment_hash_1, ..) = + route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000); // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not // broadcasted until we reach the timelock time). let error_message = "Channel force-closed"; - nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &node_c_id, error_message.to_string()).unwrap(); + nodes[1] + .node + .force_close_broadcasting_latest_txn(&chan_2.2, &node_c_id, error_message.to_string()) + .unwrap(); check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); { let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::NONE); - connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1); + connect_blocks( + &nodes[1], + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1, + ); test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT); mine_transaction(&nodes[2], &node_txn[0]); check_added_monitors(&nodes[2], 1); @@ -802,24 +978,27 @@ pub fn channel_monitor_network_test() { check_closed_event!(nodes[2], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); macro_rules! claim_funds { - ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => { - { - $node.node.claim_funds($preimage); - expect_payment_claimed!($node, $payment_hash, 3_000_000); - check_added_monitors(&$node, 1); - - let events = $node.node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 1); - match events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => { - assert!(update_add_htlcs.is_empty()); - assert!(update_fail_htlcs.is_empty()); - assert_eq!(*node_id, $prev_node.node.get_our_node_id()); - }, - _ => panic!("Unexpected event"), - }; - } - } + ($node: expr, $prev_node: expr, $preimage: expr, $payment_hash: expr) => {{ + $node.node.claim_funds($preimage); + expect_payment_claimed!($node, $payment_hash, 3_000_000); + check_added_monitors(&$node, 1); + + let events = $node.node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::UpdateHTLCs { + ref node_id, + channel_id: _, + updates: + msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. }, + } => { + assert!(update_add_htlcs.is_empty()); + assert!(update_fail_htlcs.is_empty()); + assert_eq!(*node_id, $prev_node.node.get_our_node_id()); + }, + _ => panic!("Unexpected event"), + }; + }}; } // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2] @@ -853,7 +1032,8 @@ pub fn channel_monitor_network_test() { let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&chan_3.2); // One pending HTLC to time out: - let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000); + let (payment_preimage_2, payment_hash_2, ..) = + route_payment(&nodes[3], &[&nodes[4]], 3_000_000); // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for // buffer space). @@ -862,13 +1042,14 @@ pub fn channel_monitor_network_test() { let events = nodes[3].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); let close_chan_update_1 = match events[1] { - MessageSendEvent::BroadcastChannelUpdate { ref msg } => { - msg.clone() - }, + MessageSendEvent::BroadcastChannelUpdate { ref msg } => msg.clone(), _ => panic!("Unexpected event"), }; match events[0] { - MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => { + MessageSendEvent::HandleError { + action: ErrorAction::DisconnectPeer { .. }, + node_id, + } => { assert_eq!(node_id, node_e_id); }, _ => panic!("Unexpected event"), @@ -881,7 +1062,9 @@ pub fn channel_monitor_network_test() { node_txn.retain(|tx| { if tx.input[0].previous_output.txid == node2_commitment_txid { false - } else { true } + } else { + true + } }); } @@ -894,13 +1077,14 @@ pub fn channel_monitor_network_test() { let events = nodes[4].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); let close_chan_update_2 = match events[1] { - MessageSendEvent::BroadcastChannelUpdate { ref msg } => { - msg.clone() - }, + MessageSendEvent::BroadcastChannelUpdate { ref msg } => msg.clone(), _ => panic!("Unexpected event"), }; match events[0] { - MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id } => { + MessageSendEvent::HandleError { + action: ErrorAction::DisconnectPeer { .. }, + node_id, + } => { assert_eq!(node_id, node_d_id); }, _ => panic!("Unexpected event"), @@ -920,8 +1104,10 @@ pub fn channel_monitor_network_test() { assert_eq!(nodes[3].node.list_channels().len(), 0); assert_eq!(nodes[4].node.list_channels().len(), 0); - assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(chan_3.2, chan_3_mon), - Ok(ChannelMonitorUpdateStatus::Completed)); + assert_eq!( + nodes[3].chain_monitor.chain_monitor.watch_channel(chan_3.2, chan_3_mon), + Ok(ChannelMonitorUpdateStatus::Completed) + ); check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [node_id_4], 100000); } @@ -960,9 +1146,15 @@ pub fn test_justice_tx_htlc_timeout() { assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.compute_txid()); assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present assert_eq!(revoked_local_txn[1].input.len(), 1); - assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].compute_txid()); - assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout - // Revoke the old state + assert_eq!( + revoked_local_txn[1].input[0].previous_output.txid, + revoked_local_txn[0].compute_txid() + ); + assert_eq!( + revoked_local_txn[1].input[0].witness.last().unwrap().len(), + OFFERED_HTLC_SCRIPT_WEIGHT + ); // HTLC-Timeout + // Revoke the old state claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); { @@ -985,12 +1177,21 @@ pub fn test_justice_tx_htlc_timeout() { connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires // Verify broadcast of revoked HTLC-timeout - let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT); + let node_txn = test_txn_broadcast( + &nodes[0], + &chan_5, + Some(revoked_local_txn[0].clone()), + HTLCType::TIMEOUT, + ); check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); // Broadcast revoked HTLC-timeout on node 1 mine_transaction(&nodes[1], &node_txn[1]); - test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone(), revoked_local_txn[0].clone()); + test_revoked_htlc_claim_txn_broadcast( + &nodes[1], + node_txn[1].clone(), + revoked_local_txn[0].clone(), + ); } get_announce_close_broadcast_events(&nodes, 0, 1); assert_eq!(nodes[0].node.list_channels().len(), 0); @@ -1049,11 +1250,20 @@ pub fn test_justice_tx_htlc_success() { mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); - let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS); + let node_txn = test_txn_broadcast( + &nodes[1], + &chan_6, + Some(revoked_local_txn[0].clone()), + HTLCType::SUCCESS, + ); check_added_monitors(&nodes[1], 1); mine_transaction(&nodes[0], &node_txn[1]); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); - test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone(), revoked_local_txn[0].clone()); + test_revoked_htlc_claim_txn_broadcast( + &nodes[0], + node_txn[1].clone(), + revoked_local_txn[0].clone(), + ); } get_announce_close_broadcast_events(&nodes, 0, 1); assert_eq!(nodes[0].node.list_channels().len(), 0); @@ -1111,8 +1321,10 @@ fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: let chanmon_cfgs = create_chanmon_cfgs(2); let destination_script0 = chanmon_cfgs[0].keys_manager.get_destination_script([0; 32]).unwrap(); let destination_script1 = chanmon_cfgs[1].keys_manager.get_destination_script([0; 32]).unwrap(); - let persisters = vec![WatchtowerPersister::new(destination_script0), - WatchtowerPersister::new(destination_script1)]; + let persisters = vec![ + WatchtowerPersister::new(destination_script0), + WatchtowerPersister::new(destination_script1), + ]; let node_cfgs = create_node_cfgs_with_persisters(2, &chanmon_cfgs, persisters.iter().collect()); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -1136,7 +1348,8 @@ fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: // Send another payment, now revoking the previous commitment tx send_payment(&nodes[0], &[&nodes[1]], 5_000_000); - let justice_tx = persisters[1].justice_tx(channel_id, &revoked_commitment_tx.compute_txid()).unwrap(); + let justice_tx = + persisters[1].justice_tx(channel_id, &revoked_commitment_tx.compute_txid()).unwrap(); check_spends!(justice_tx, revoked_commitment_tx); mine_transactions(&nodes[1], &[revoked_commitment_tx, &justice_tx]); @@ -1153,19 +1366,23 @@ fn do_test_forming_justice_tx_from_monitor_updates(broadcast_initial_commitment: // Check that the justice tx has sent the revoked output value to nodes[1] let monitor = get_monitor!(nodes[1], channel_id); - let total_claimable_balance = monitor.get_claimable_balances().iter().fold(0, |sum, balance| { - match balance { - channelmonitor::Balance::ClaimableAwaitingConfirmations { amount_satoshis, .. } => sum + amount_satoshis, + let total_claimable_balance = + monitor.get_claimable_balances().iter().fold(0, |sum, balance| match balance { + channelmonitor::Balance::ClaimableAwaitingConfirmations { amount_satoshis, .. } => { + sum + amount_satoshis + }, _ => panic!("Unexpected balance type"), - } - }); + }); // On the first commitment, node[1]'s balance was below dust so it didn't have an output - let node1_channel_balance = if broadcast_initial_commitment { 0 } else { revoked_commitment_tx.output[0].value.to_sat() }; + let node1_channel_balance = if broadcast_initial_commitment { + 0 + } else { + revoked_commitment_tx.output[0].value.to_sat() + }; let expected_claimable_balance = node1_channel_balance + justice_tx.output[0].value.to_sat(); assert_eq!(total_claimable_balance, expected_claimable_balance); } - #[xtest(feature = "_externalize_tests")] pub fn claim_htlc_outputs() { // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx @@ -1185,7 +1402,8 @@ pub fn claim_htlc_outputs() { send_payment(&nodes[0], &[&nodes[1]], 8_000_000); // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0; - let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000); + let (_payment_preimage_2, payment_hash_2, ..) = + route_payment(&nodes[1], &[&nodes[0]], 3_000_000); // Get the will-be-revoked local txn from node[0] let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2); @@ -1193,8 +1411,14 @@ pub fn claim_htlc_outputs() { assert_eq!(revoked_local_txn[0].input.len(), 1); assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.compute_txid()); assert_eq!(revoked_local_txn[1].input.len(), 1); - assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].compute_txid()); - assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout + assert_eq!( + revoked_local_txn[1].input[0].previous_output.txid, + revoked_local_txn[0].compute_txid() + ); + assert_eq!( + revoked_local_txn[1].input[0].witness.last().unwrap().len(), + OFFERED_HTLC_SCRIPT_WEIGHT + ); // HTLC-Timeout check_spends!(revoked_local_txn[1], revoked_local_txn[0]); // Revoke the old state. @@ -1216,10 +1440,14 @@ pub fn claim_htlc_outputs() { // The ChannelMonitor should claim the accepted HTLC output separately from the offered // HTLC and to_self outputs. let accepted_claim = node_txn.iter().filter(|tx| tx.input.len() == 1).next().unwrap(); - let offered_to_self_claim = node_txn.iter().filter(|tx| tx.input.len() == 2).next().unwrap(); + let offered_to_self_claim = + node_txn.iter().filter(|tx| tx.input.len() == 2).next().unwrap(); check_spends!(accepted_claim, revoked_local_txn[0]); check_spends!(offered_to_self_claim, revoked_local_txn[0]); - assert_eq!(accepted_claim.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_eq!( + accepted_claim.input[0].witness.last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + ); let mut witness_lens = BTreeSet::new(); witness_lens.insert(offered_to_self_claim.input[0].witness.last().unwrap().len()); @@ -1439,17 +1667,11 @@ pub fn test_multiple_package_conflicts() { // macro doesn't work properly and we must process the first update_fulfill_htlc manually. let updates = get_htlc_update_msgs(&nodes[1], &node_a_id); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc( - node_b_id, - &updates.update_fulfill_htlcs[0], - ); - nodes[0] - .node - .handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); check_added_monitors(&nodes[0], 1); - let (revoke_ack, commit_signed) = - get_revoke_commit_msgs(&nodes[0], &node_b_id); + let (revoke_ack, commit_signed) = get_revoke_commit_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &revoke_ack); nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commit_signed); check_added_monitors(&nodes[1], 4); @@ -1468,10 +1690,7 @@ pub fn test_multiple_package_conflicts() { _ => panic!("Unexpected event"), }; assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc( - node_b_id, - &updates.update_fulfill_htlcs[0], - ); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); expect_payment_sent!(nodes[0], preimage_2); @@ -1531,7 +1750,8 @@ pub fn test_htlc_on_chain_success() { let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); // Ensure all nodes are at the same height - let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; + let node_max_height = + nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1); @@ -1540,8 +1760,10 @@ pub fn test_htlc_on_chain_success() { send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 8000000); send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 8000000); - let (our_payment_preimage, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); - let (our_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); + let (our_payment_preimage, payment_hash_1, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); + let (our_payment_preimage_2, payment_hash_2, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); // Broadcast legit commitment tx from C on B's chain // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain @@ -1567,8 +1789,14 @@ pub fn test_htlc_on_chain_success() { assert_eq!(node_txn.len(), 2); check_spends!(node_txn[0], commitment_tx[0]); check_spends!(node_txn[1], commitment_tx[0]); - assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_eq!( + node_txn[0].input[0].witness.clone().last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + ); + assert_eq!( + node_txn[1].input[0].witness.clone().last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + ); assert!(node_txn[0].output[0].script_pubkey.is_p2wsh()); // revokeable output assert!(node_txn[1].output[0].script_pubkey.is_p2wsh()); // revokeable output assert_eq!(node_txn[0].lock_time, LockTime::ZERO); @@ -1587,13 +1815,18 @@ pub fn test_htlc_on_chain_success() { let forwarded_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(forwarded_events.len(), 3); match forwarded_events[0] { - Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}, _ => panic!("Unexpected event"), } let chan_id = Some(chan_1.2); match forwarded_events[1] { - Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx, - next_channel_id, outbound_amount_forwarded_msat, .. + Event::PaymentForwarded { + total_fee_earned_msat, + prev_channel_id, + claim_from_onchain_tx, + next_channel_id, + outbound_amount_forwarded_msat, + .. } => { assert_eq!(total_fee_earned_msat, Some(1000)); assert_eq!(prev_channel_id, chan_id); @@ -1601,11 +1834,16 @@ pub fn test_htlc_on_chain_success() { assert_eq!(next_channel_id, Some(chan_2.2)); assert_eq!(outbound_amount_forwarded_msat, Some(3000000)); }, - _ => panic!() + _ => panic!(), } match forwarded_events[2] { - Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx, - next_channel_id, outbound_amount_forwarded_msat, .. + Event::PaymentForwarded { + total_fee_earned_msat, + prev_channel_id, + claim_from_onchain_tx, + next_channel_id, + outbound_amount_forwarded_msat, + .. } => { assert_eq!(total_fee_earned_msat, Some(1000)); assert_eq!(prev_channel_id, chan_id); @@ -1613,7 +1851,7 @@ pub fn test_htlc_on_chain_success() { assert_eq!(next_channel_id, Some(chan_2.2)); assert_eq!(outbound_amount_forwarded_msat, Some(3000000)); }, - _ => panic!() + _ => panic!(), } let mut events = nodes[1].node.get_and_clear_pending_msg_events(); { @@ -1634,7 +1872,18 @@ pub fn test_htlc_on_chain_success() { } match nodes_0_event { - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fail_htlcs, + ref update_fulfill_htlcs, + ref update_fail_malformed_htlcs, + .. + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); @@ -1651,7 +1900,7 @@ pub fn test_htlc_on_chain_success() { } macro_rules! check_tx_local_broadcast { - ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => { { + ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => {{ let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap(); // HTLC timeout claims for non-anchor channels are only aggregated when claimed from the // remote commitment transaction. @@ -1660,20 +1909,32 @@ pub fn test_htlc_on_chain_success() { for tx in node_txn.iter() { check_spends!(tx, $commitment_tx); assert_ne!(tx.lock_time, LockTime::ZERO); - assert_eq!(tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert_eq!( + tx.input[0].witness.last().unwrap().len(), + OFFERED_HTLC_SCRIPT_WEIGHT + ); assert!(tx.output[0].script_pubkey.is_p2wsh()); // revokeable output } - assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); + assert_ne!( + node_txn[0].input[0].previous_output, + node_txn[1].input[0].previous_output + ); } else { assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], $commitment_tx); assert_ne!(node_txn[0].lock_time, LockTime::ZERO); - assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_eq!( + node_txn[0].input[0].witness.last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + ); assert!(node_txn[0].output[0].script_pubkey.is_p2wpkh()); // direct payment - assert_ne!(node_txn[0].input[0].previous_output, node_txn[0].input[1].previous_output); + assert_ne!( + node_txn[0].input[0].previous_output, + node_txn[0].input[1].previous_output + ); } node_txn.clear(); - } } + }}; } // nodes[1] now broadcasts its own timeout-claim of the output that nodes[2] just claimed via success. check_tx_local_broadcast!(nodes[1], false, commitment_tx[0]); @@ -1688,21 +1949,20 @@ pub fn test_htlc_on_chain_success() { check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 100000); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); assert!(node_txn.len() == 1 || node_txn.len() == 2); // HTLC-Success, RBF bump of above aggregated HTLC txn - let commitment_spend = - if node_txn.len() == 1 { + let commitment_spend = if node_txn.len() == 1 { + &node_txn[0] + } else { + // Certain `ConnectStyle`s will cause RBF bumps of the previous HTLC transaction to be broadcast. + // FullBlockViaListen + assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); + if node_txn[0].input[0].previous_output.txid == node_a_commitment_tx[0].compute_txid() { + check_spends!(node_txn[1], commitment_tx[0]); &node_txn[0] } else { - // Certain `ConnectStyle`s will cause RBF bumps of the previous HTLC transaction to be broadcast. - // FullBlockViaListen - assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); - if node_txn[0].input[0].previous_output.txid == node_a_commitment_tx[0].compute_txid() { - check_spends!(node_txn[1], commitment_tx[0]); - &node_txn[0] - } else { - check_spends!(node_txn[0], commitment_tx[0]); - &node_txn[1] - } - }; + check_spends!(node_txn[0], commitment_tx[0]); + &node_txn[1] + } + }; check_spends!(commitment_spend, node_a_commitment_tx[0]); assert_eq!(commitment_spend.input.len(), 2); @@ -1772,20 +2032,35 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 8000000); send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 8000000); - let (_payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); + let (_payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); // Broadcast legit commitment tx from C on B's chain let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2); check_spends!(commitment_tx[0], chan_2.3); nodes[2].node.fail_htlc_backwards(&payment_hash); check_added_monitors(&nodes[2], 0); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash.clone() }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[2], + vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash.clone() }] + ); check_added_monitors(&nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + .. + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert!(!update_fail_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); @@ -1806,19 +2081,27 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { mine_transaction(&nodes[1], &commitment_tx[0]); let reason = ClosureReason::CommitmentTxConfirmed; check_closed_event(&nodes[1], 1, reason, false, &[node_c_id], 100000); - let htlc_expiry = get_monitor!(nodes[1], chan_2.2).get_claimable_balances().iter().filter_map(|bal| + let htlc_expiry = get_monitor!(nodes[1], chan_2.2) + .get_claimable_balances() + .iter() + .filter_map(|bal| { if let Balance::MaybeTimeoutClaimableHTLC { claimable_height, .. } = bal { Some(*claimable_height) } else { None } - ).next().unwrap(); + }) + .next() + .unwrap(); connect_blocks(&nodes[1], htlc_expiry - nodes[1].best_block_info().1); let timeout_tx = { let mut txn = nodes[1].tx_broadcaster.txn_broadcast(); assert_eq!(txn.len(), 1); txn.iter().for_each(|tx| check_spends!(tx, commitment_tx[0])); - assert_eq!(txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_eq!( + txn[0].clone().input[0].witness.last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + ); txn.remove(0) }; @@ -1834,12 +2117,26 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] + ); check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fail_htlcs, + ref update_fulfill_htlcs, + ref update_fail_malformed_htlcs, + .. + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert!(!update_fail_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); @@ -1862,7 +2159,10 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // 1 timeout tx assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], commitment_tx[0]); - assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT + 1); + assert_eq!( + node_txn[0].clone().input[0].witness.last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + 1 + ); } #[xtest(feature = "_externalize_tests")] @@ -1890,7 +2190,8 @@ pub fn test_simple_commitment_revoked_fail_backward() { create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); + let (payment_preimage, _payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000); // Get the will-be-revoked local txn from nodes[2] let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2); // Revoke the old state @@ -1904,12 +2205,27 @@ pub fn test_simple_commitment_revoked_fail_backward() { check_added_monitors(&nodes[1], 1); check_closed_broadcast!(nodes[1], true); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] + ); check_added_monitors(&nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fail_htlcs, + ref update_fulfill_htlcs, + ref update_fail_malformed_htlcs, + ref commitment_signed, + .. + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 1); assert!(update_fulfill_htlcs.is_empty()); @@ -1925,7 +2241,9 @@ pub fn test_simple_commitment_revoked_fail_backward() { } } -fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use_dust: bool, no_to_remote: bool) { +fn do_test_commitment_revoked_fail_backward_exhaustive( + deliver_bs_raa: bool, use_dust: bool, no_to_remote: bool, +) { // Test that if our counterparty broadcasts a revoked commitment transaction we fail all // pending HTLCs on that channel backwards even if the HTLCs aren't present in our latest // commitment transaction anymore. @@ -1955,7 +2273,8 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); let amt = if no_to_remote { 10_000 } else { 3_000_000 }; - let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], amt); + let (payment_preimage, _payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], amt); // Get the will-be-revoked local txn from nodes[2] let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2); assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 }); @@ -1970,14 +2289,19 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let chan = get_channel_ref!(nodes[2], nodes[1], per_peer_state_lock, peer_state_lock, chan_2.2); chan.context().holder_dust_limit_satoshis * 1000 - } else { 3000000 }; + } else { + 3000000 + }; let (_, first_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); let (_, second_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); let (_, third_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); nodes[2].node.fail_htlc_backwards(&first_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: first_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[2], + vec![HTLCHandlingFailureType::Receive { payment_hash: first_payment_hash }] + ); check_added_monitors(&nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); @@ -1991,7 +2315,10 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use // Drop the last RAA from 3 -> 2 nodes[2].node.fail_htlc_backwards(&second_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: second_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[2], + vec![HTLCHandlingFailureType::Receive { payment_hash: second_payment_hash }] + ); check_added_monitors(&nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); @@ -2008,7 +2335,10 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use check_added_monitors(&nodes[2], 1); nodes[2].node.fail_htlc_backwards(&third_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: third_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[2], + vec![HTLCHandlingFailureType::Receive { payment_hash: third_payment_hash }] + ); check_added_monitors(&nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); @@ -2028,7 +2358,8 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting // on nodes[2]'s RAA. - let (route, fourth_payment_hash, _, fourth_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 1000000); + let (route, fourth_payment_hash, _, fourth_payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[2], 1000000); let onion = RecipientOnionFields::secret_only(fourth_payment_secret); let id = PaymentId(fourth_payment_hash.0); nodes[1].node.send_payment_with_route(route, fourth_payment_hash, onion, id).unwrap(); @@ -2044,11 +2375,11 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::HTLCHandlingFailed { .. } => { }, + Event::HTLCHandlingFailed { .. } => {}, _ => panic!("Unexpected event"), } match events[1] { - Event::PendingHTLCsForwardable { .. } => { }, + Event::PendingHTLCsForwardable { .. } => {}, _ => panic!("Unexpected event"), }; // Deliberately don't process the pending fail-back so they all fail back at once after @@ -2086,7 +2417,18 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use if deliver_bs_raa { let nodes_2_event = remove_first_msg_event_to_node(&node_c_id, &mut events); match nodes_2_event { - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fail_htlcs, + ref update_fulfill_htlcs, + ref update_fail_malformed_htlcs, + .. + }, + .. + } => { assert_eq!(node_c_id, *node_id); assert_eq!(update_add_htlcs.len(), 1); assert!(update_fulfill_htlcs.is_empty()); @@ -2099,16 +2441,35 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let nodes_2_event = remove_first_msg_event_to_node(&node_c_id, &mut events); match nodes_2_event { - MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { channel_id, ref data }) }, .. } => { + MessageSendEvent::HandleError { + action: + ErrorAction::DisconnectPeer { msg: Some(msgs::ErrorMessage { channel_id, ref data }) }, + .. + } => { assert_eq!(channel_id, chan_2.2); - assert_eq!(data.as_str(), "Channel closed because commitment or closing transaction was confirmed on chain."); + assert_eq!( + data.as_str(), + "Channel closed because commitment or closing transaction was confirmed on chain." + ); }, _ => panic!("Unexpected event"), } let nodes_0_event = remove_first_msg_event_to_node(&node_a_id, &mut events); match nodes_0_event { - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fail_htlcs, + ref update_fulfill_htlcs, + ref update_fail_malformed_htlcs, + ref commitment_signed, + .. + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 3); assert!(update_fulfill_htlcs.is_empty()); @@ -2129,7 +2490,10 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use // If we delivered B's RAA we got an unknown preimage error, not something // that we should update our routing table for. if !deliver_bs_raa { - if let PathFailure::OnPath { network_update: Some(_) } = failure { } else { panic!("Unexpected path failure") } + if let PathFailure::OnPath { network_update: Some(_) } = failure { + } else { + panic!("Unexpected path failure") + } } }, _ => panic!("Unexpected event"), @@ -2141,7 +2505,11 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use _ => panic!("Unexpected event"), } match events[2] { - Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => { + Event::PaymentPathFailed { + ref payment_hash, + failure: PathFailure::OnPath { network_update: Some(_) }, + .. + } => { assert!(failed_htlcs.insert(payment_hash.0)); }, _ => panic!("Unexpected event"), @@ -2153,7 +2521,11 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use _ => panic!("Unexpected event"), } match events[4] { - Event::PaymentPathFailed { ref payment_hash, failure: PathFailure::OnPath { network_update: Some(_) }, .. } => { + Event::PaymentPathFailed { + ref payment_hash, + failure: PathFailure::OnPath { network_update: Some(_) }, + .. + } => { assert!(failed_htlcs.insert(payment_hash.0)); }, _ => panic!("Unexpected event"), @@ -2208,10 +2580,11 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { // Alice -> Bob: Route a payment but without Bob sending revoke_and_ack. { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000); + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 50_000); let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); - nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors(&nodes[0], 1); let payment_event = { @@ -2224,7 +2597,8 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { } // Alice -> Bob: Route another payment but now Alice waits for Bob's earlier revoke_and_ack. - let (route, failed_payment_hash, _, failed_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 50_000); + let (route, failed_payment_hash, _, failed_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 50_000); { let onion = RecipientOnionFields::secret_only(failed_payment_secret); let id = PaymentId(failed_payment_hash.0); @@ -2236,16 +2610,28 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { // Alice <- Bob: Send a malformed update_add_htlc so Alice fails the channel. { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 50_000); + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 50_000); let secp_ctx = Secp256k1::new(); let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); let current_height = nodes[1].node.best_block.read().unwrap().height + 1; let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret); let (onion_payloads, _amount_msat, cltv_expiry) = onion_utils::build_onion_payloads( - &route.paths[0], 50_000, &recipient_onion_fields, current_height, &None, None, None).unwrap(); - let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv); - let onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap(); + &route.paths[0], + 50_000, + &recipient_onion_fields, + current_height, + &None, + None, + None, + ) + .unwrap(); + let onion_keys = + onion_utils::construct_onion_keys(&secp_ctx, &route.paths[0], &session_priv); + let onion_routing_packet = + onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash) + .unwrap(); // Send a 0-msat update_add_htlc to fail the channel. let update_add_htlc = msgs::UpdateAddHTLC { @@ -2306,7 +2692,14 @@ pub fn test_htlc_ignore_latest_remote_commitment() { let funding_tx = create_announced_chan_between_nodes(&nodes, 0, 1).3; let error_message = "Channel force-closed"; route_payment(&nodes[0], &[&nodes[1]], 10000000); - nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &node_b_id, error_message.to_string()).unwrap(); + nodes[0] + .node + .force_close_broadcasting_latest_txn( + &nodes[0].node.list_channels()[0].channel_id, + &node_b_id, + error_message.to_string(), + ) + .unwrap(); connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); @@ -2344,7 +2737,8 @@ pub fn test_force_close_fail_back() { create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes(&nodes, 1, 2); - let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); + let (route, our_payment_hash, our_payment_preimage, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); let mut payment_event = { let onion = RecipientOnionFields::secret_only(our_payment_secret); @@ -2378,7 +2772,10 @@ pub fn test_force_close_fail_back() { // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!). let error_message = "Channel force-closed"; let channel_id = payment_event.commitment_msg[0].channel_id; - nodes[2].node.force_close_broadcasting_latest_txn(&channel_id, &node_b_id, error_message.to_string()).unwrap(); + nodes[2] + .node + .force_close_broadcasting_latest_txn(&channel_id, &node_b_id, error_message.to_string()) + .unwrap(); check_closed_broadcast!(nodes[2], true); check_added_monitors(&nodes[2], 1); let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; @@ -2402,15 +2799,20 @@ pub fn test_force_close_fail_back() { // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success.. { - get_monitor!(nodes[2], channel_id) - .provide_payment_preimage_unsafe_legacy( - &our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, - &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger - ); + get_monitor!(nodes[2], channel_id).provide_payment_preimage_unsafe_legacy( + &our_payment_hash, + &our_payment_preimage, + &node_cfgs[2].tx_broadcaster, + &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), + &node_cfgs[2].logger, + ); } mine_transaction(&nodes[2], &commitment_tx); let mut node_txn = nodes[2].tx_broadcaster.txn_broadcast(); - assert_eq!(node_txn.len(), if nodes[2].connect_style.borrow().updates_best_block_first() { 2 } else { 1 }); + assert_eq!( + node_txn.len(), + if nodes[2].connect_style.borrow().updates_best_block_first() { 2 } else { 1 } + ); let htlc_tx = node_txn.pop().unwrap(); assert_eq!(htlc_tx.input.len(), 1); assert_eq!(htlc_tx.input[0].previous_output.txid, commitment_tx.compute_txid()); @@ -2469,18 +2871,24 @@ pub fn test_peer_disconnected_before_funding_broadcasted() { // Open a channel between `nodes[0]` and `nodes[1]`, for which the funding transaction is never // broadcasted, even though it's created by `nodes[0]`. - let expected_temporary_channel_id = nodes[0].node.create_channel(node_b_id, 1_000_000, 500_000_000, 42, None, None).unwrap(); + let expected_temporary_channel_id = + nodes[0].node.create_channel(node_b_id, 1_000_000, 500_000_000, 42, None, None).unwrap(); let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[1].node.handle_open_channel(node_a_id, &open_channel); let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); - let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &node_b_id, 1_000_000, 42); + let (temporary_channel_id, tx, _funding_output) = + create_funding_transaction(&nodes[0], &node_b_id, 1_000_000, 42); assert_eq!(temporary_channel_id, expected_temporary_channel_id); - assert!(nodes[0].node.funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()).is_ok()); + assert!(nodes[0] + .node + .funding_transaction_generated(temporary_channel_id, node_b_id, tx.clone()) + .is_ok()); - let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); + let funding_created_msg = + get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); assert_eq!(funding_created_msg.temporary_channel_id, expected_temporary_channel_id); // Even though the funding transaction is created by `nodes[0]`, the `FundingCreated` msg is @@ -2502,10 +2910,8 @@ pub fn test_peer_disconnected_before_funding_broadcasted() { // Ensure that the channel is closed with `ClosureReason::DisconnectedPeer` and a // `DiscardFunding` event when the peers are disconnected and do not reconnect before the // funding transaction is broadcasted. - check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true - , [node_b_id], 1000000); - check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false - , [node_a_id], 1000000); + check_closed_event!(&nodes[0], 2, ClosureReason::DisconnectedPeer, true, [node_b_id], 1000000); + check_closed_event!(&nodes[1], 1, ClosureReason::DisconnectedPeer, false, [node_a_id], 1000000); } #[xtest(feature = "_externalize_tests")] @@ -2537,7 +2943,8 @@ pub fn test_simple_peer_disconnect() { nodes[1].node.peer_disconnected(node_a_id); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - let (payment_preimage_3, payment_hash_3, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); + let (payment_preimage_3, payment_hash_3, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); let payment_preimage_4 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000).0; let payment_hash_5 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000).1; let payment_hash_6 = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000).1; @@ -2547,7 +2954,7 @@ pub fn test_simple_peer_disconnect() { claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage_3) - .skip_last(true) + .skip_last(true), ); fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5); @@ -2601,7 +3008,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken let mut as_channel_ready = None; let channel_id = if messages_delivered == 0 { - let (channel_ready, chan_id, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001); + let (channel_ready, chan_id, _) = + create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001); as_channel_ready = Some(channel_ready); // nodes[1] doesn't receive the channel_ready message (it'll be re-sent on reconnect) // Note that we store it so that if we're running with `simulate_broken_lnd` we can deliver @@ -2612,7 +3020,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken }; let user_channel_id = nodes[1].node.list_channels()[0].user_channel_id; - let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000); + let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000); let payment_event = { let onion = RecipientOnionFields::secret_only(payment_secret_1); @@ -2631,9 +3040,12 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken } else { nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); if messages_delivered >= 3 { - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); check_added_monitors(&nodes[1], 1); - let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_a_id); + let (bs_revoke_and_ack, bs_commitment_signed) = + get_revoke_commit_msgs!(nodes[1], node_a_id); if messages_delivered >= 4 { nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); @@ -2641,8 +3053,11 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken check_added_monitors(&nodes[0], 1); if messages_delivered >= 5 { - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment_signed); - let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + nodes[0] + .node + .handle_commitment_signed_batch_test(node_b_id, &bs_commitment_signed); + let as_revoke_and_ack = + get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors(&nodes[0], 1); @@ -2701,17 +3116,17 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken if messages_delivered == 0 { assert_eq!(events_1.len(), 2); match events_1[0] { - Event::ChannelReady { .. } => { }, + Event::ChannelReady { .. } => {}, _ => panic!("Unexpected event"), }; match events_1[1] { - Event::PendingHTLCsForwardable { .. } => { }, + Event::PendingHTLCsForwardable { .. } => {}, _ => panic!("Unexpected event"), }; } else { assert_eq!(events_1.len(), 1); match events_1[0] { - Event::PendingHTLCsForwardable { .. } => { }, + Event::PendingHTLCsForwardable { .. } => {}, _ => panic!("Unexpected event"), }; } @@ -2725,17 +3140,26 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken let events_2 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_2.len(), 1); match events_2[0] { - Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { + Event::PaymentClaimable { + ref payment_hash, + ref purpose, + amount_msat, + receiver_node_id, + ref via_channel_ids, + .. + } => { assert_eq!(payment_hash_1, *payment_hash); assert_eq!(amount_msat, 1_000_000); assert_eq!(receiver_node_id.unwrap(), node_b_id); assert_eq!(*via_channel_ids, vec![(channel_id, Some(user_channel_id))]); match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { + payment_preimage, payment_secret, .. + } => { assert!(payment_preimage.is_none()); assert_eq!(payment_secret_1, *payment_secret); }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), } }, _ => panic!("Unexpected event"), @@ -2776,7 +3200,8 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken if messages_delivered >= 2 { nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed); check_added_monitors(&nodes[0], 1); - let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], node_b_id); + let (as_revoke_and_ack, as_commitment_signed) = + get_revoke_commit_msgs!(nodes[0], node_b_id); if messages_delivered >= 3 { nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); @@ -2784,8 +3209,11 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken check_added_monitors(&nodes[1], 1); if messages_delivered >= 4 { - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed); - let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed); + let bs_revoke_and_ack = + get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors(&nodes[1], 1); @@ -2983,10 +3411,12 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { create_announced_chan_between_nodes(&nodes, 0, 1); - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_1, payment_hash_1, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); // Now try to send a second payment which will fail to send - let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); let onion = RecipientOnionFields::secret_only(payment_secret_2); let id = PaymentId(payment_hash_2.0); nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); @@ -3006,7 +3436,19 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); match events_2[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed }, .. } => { + MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + ref commitment_signed, + }, + .. + } => { assert_eq!(*node_id, node_a_id); assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); @@ -3067,8 +3509,13 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty()); assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); assert!(as_resp.2.as_ref().unwrap().update_fee.is_none()); - nodes[1].node.handle_update_add_htlc(node_a_id, &as_resp.2.as_ref().unwrap().update_add_htlcs[0]); - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_resp.2.as_ref().unwrap().commitment_signed); + nodes[1] + .node + .handle_update_add_htlc(node_a_id, &as_resp.2.as_ref().unwrap().update_add_htlcs[0]); + nodes[1].node.handle_commitment_signed_batch_test( + node_a_id, + &as_resp.2.as_ref().unwrap().commitment_signed, + ); let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors(&nodes[1], 1); @@ -3091,13 +3538,19 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { assert!(as_commitment_signed.update_fee.is_none()); check_added_monitors(&nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_commitment_signed.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test( + node_b_id, + &bs_second_commitment_signed.commitment_signed, + ); let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors(&nodes[0], 1); - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed.commitment_signed); - let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &as_commitment_signed.commitment_signed); + let bs_second_revoke_and_ack = + get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors(&nodes[1], 1); @@ -3113,11 +3566,13 @@ pub fn test_drop_messages_peer_disconnect_dual_htlc() { Event::PaymentClaimable { ref payment_hash, ref purpose, .. } => { assert_eq!(payment_hash_2, *payment_hash); match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { + payment_preimage, payment_secret, .. + } => { assert!(payment_preimage.is_none()); assert_eq!(payment_secret_2, *payment_secret); }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), } }, _ => panic!("Unexpected event"), @@ -3145,24 +3600,46 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { create_announced_chan_between_nodes(&nodes, 0, 1); let our_payment_hash = if send_partial_mpp { - let (route, our_payment_hash, _, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000); + let (route, our_payment_hash, _, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[1], 100000); // Use the utility function send_payment_along_path to send the payment with MPP data which // indicates there are more HTLCs coming. let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match. let payment_id = PaymentId([42; 32]); let onion = RecipientOnionFields::secret_only(payment_secret); - let session_privs = nodes[0].node - .test_add_new_pending_payment(our_payment_hash, onion, payment_id, &route).unwrap(); + let session_privs = nodes[0] + .node + .test_add_new_pending_payment(our_payment_hash, onion, payment_id, &route) + .unwrap(); - nodes[0].node.test_send_payment_along_path(&route.paths[0], &our_payment_hash, - RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, - &None, session_privs[0]).unwrap(); + nodes[0] + .node + .test_send_payment_along_path( + &route.paths[0], + &our_payment_hash, + RecipientOnionFields::secret_only(payment_secret), + 200_000, + cur_height, + payment_id, + &None, + session_privs[0], + ) + .unwrap(); check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); // Now do the relevant commitment_signed/RAA dances along the path, noting that the final // hop should *not* yet generate any PaymentClaimable event(s). - pass_along_path(&nodes[0], &[&nodes[1]], 100000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None); + pass_along_path( + &nodes[0], + &[&nodes[1]], + 100000, + our_payment_hash, + Some(payment_secret), + events.drain(..).next().unwrap(), + false, + None, + ); our_payment_hash } else { route_payment(&nodes[0], &[&nodes[1]], 100000).1 @@ -3171,14 +3648,18 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { let mut block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new()); connect_block(&nodes[0], &block); connect_block(&nodes[1], &block); - let block_count = TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS; + let block_count = + TEST_FINAL_CLTV + CHAN_CONFIRM_DEPTH + 2 - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS; for _ in CHAN_CONFIRM_DEPTH + 2..block_count { block.header.prev_blockhash = block.block_hash(); connect_block(&nodes[0], &block); connect_block(&nodes[1], &block); } - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }] + ); check_added_monitors(&nodes[1], 1); let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -3217,12 +3698,13 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); // Make sure all nodes are at the same starting height - connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); - connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); - connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + connect_blocks(&nodes[0], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); // Route a first payment to get the 1 -> 2 channel in awaiting_raa... - let (route, first_payment_hash, _, first_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[2], 100000); + let (route, first_payment_hash, _, first_payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[2], 100000); let onion = RecipientOnionFields::secret_only(first_payment_secret); let id = PaymentId(first_payment_hash.0); nodes[1].node.send_payment_with_route(route, first_payment_hash, onion, id).unwrap(); @@ -3231,14 +3713,16 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { // Now attempt to route a second payment, which should be placed in the holding cell let sending_node = if forwarded_htlc { &nodes[0] } else { &nodes[1] }; - let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(sending_node, nodes[2], 100000); + let (route, second_payment_hash, _, second_payment_secret) = + get_route_and_payment_hash!(sending_node, nodes[2], 100000); let onion = RecipientOnionFields::secret_only(second_payment_secret); let id = PaymentId(second_payment_hash.0); sending_node.node.send_payment_with_route(route, second_payment_hash, onion, id).unwrap(); if forwarded_htlc { check_added_monitors(&nodes[0], 1); - let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + let payment_event = + SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -3258,7 +3742,10 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { let fail_commit = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(fail_commit.len(), 1); match fail_commit[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fail_htlcs, ref commitment_signed, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fail_htlcs, ref commitment_signed, .. }, + .. + } => { nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, true, true); }, @@ -3278,33 +3765,46 @@ pub fn test_holding_cell_htlc_add_timeouts() { } macro_rules! check_spendable_outputs { - ($node: expr, $keysinterface: expr) => { - { - let mut events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events(); - let mut txn = Vec::new(); - let mut all_outputs = Vec::new(); - let secp_ctx = Secp256k1::new(); - for event in events.drain(..) { - match event { - Event::SpendableOutputs { mut outputs, channel_id: _ } => { - for outp in outputs.drain(..) { - let script = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(); - let tx = $keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), script, 253, None, &secp_ctx); - txn.push(tx.unwrap()); - all_outputs.push(outp); - } - }, - _ => panic!("Unexpected event"), - }; - } - if all_outputs.len() > 1 { - if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, None, &secp_ctx) { - txn.push(tx); - } + ($node: expr, $keysinterface: expr) => {{ + let mut events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events(); + let mut txn = Vec::new(); + let mut all_outputs = Vec::new(); + let secp_ctx = Secp256k1::new(); + for event in events.drain(..) { + match event { + Event::SpendableOutputs { mut outputs, channel_id: _ } => { + for outp in outputs.drain(..) { + let script = + Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(); + let tx = $keysinterface.backing.spend_spendable_outputs( + &[&outp], + Vec::new(), + script, + 253, + None, + &secp_ctx, + ); + txn.push(tx.unwrap()); + all_outputs.push(outp); + } + }, + _ => panic!("Unexpected event"), + }; + } + if all_outputs.len() > 1 { + if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs( + &all_outputs.iter().map(|a| a).collect::>(), + Vec::new(), + Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), + 253, + None, + &secp_ctx, + ) { + txn.push(tx); } - txn } - } + txn + }}; } #[xtest(feature = "_externalize_tests")] @@ -3529,7 +4029,10 @@ fn do_test_static_spendable_outputs_justice_tx_revoked_commitment_tx(split_tx: b claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); if split_tx { - connect_blocks(&nodes[1], TEST_FINAL_CLTV - COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE + 1); + connect_blocks( + &nodes[1], + TEST_FINAL_CLTV - COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE + 1, + ); } mine_transaction(&nodes[1], &revoked_local_txn[0]); @@ -3596,7 +4099,10 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(revoked_htlc_txn.len(), 1); assert_eq!(revoked_htlc_txn[0].input.len(), 1); - assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert_eq!( + revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), + OFFERED_HTLC_SCRIPT_WEIGHT + ); check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]); assert_ne!(revoked_htlc_txn[0].lock_time, LockTime::ZERO); // HTLC-Timeout @@ -3672,7 +4178,10 @@ pub fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { assert_eq!(revoked_htlc_txn.len(), 1); assert_eq!(revoked_htlc_txn[0].input.len(), 1); - assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_eq!( + revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + ); check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]); // Check that the unspent (of two) outputs on revoked_local_txn[0] is a P2WPKH: @@ -3738,7 +4247,8 @@ pub fn test_onchain_to_onchain_claim() { let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); // Ensure all nodes are at the same height - let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; + let node_max_height = + nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1); @@ -3747,7 +4257,8 @@ pub fn test_onchain_to_onchain_claim() { send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 8000000); send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 8000000); - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2); check_spends!(commitment_tx[0], chan_2.3); nodes[2].node.claim_funds(payment_preimage); @@ -3767,7 +4278,10 @@ pub fn test_onchain_to_onchain_claim() { let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelMonitor: 1 (HTLC-Success tx) assert_eq!(c_txn.len(), 1); check_spends!(c_txn[0], commitment_tx[0]); - assert_eq!(c_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_eq!( + c_txn[0].input[0].witness.clone().last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + ); assert!(c_txn[0].output[0].script_pubkey.is_p2wsh()); // revokeable output assert_eq!(c_txn[0].lock_time, LockTime::ZERO); // Success tx @@ -3778,12 +4292,17 @@ pub fn test_onchain_to_onchain_claim() { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}, _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx, - next_channel_id, outbound_amount_forwarded_msat, .. + Event::PaymentForwarded { + total_fee_earned_msat, + prev_channel_id, + claim_from_onchain_tx, + next_channel_id, + outbound_amount_forwarded_msat, + .. } => { assert_eq!(total_fee_earned_msat, Some(1000)); assert_eq!(prev_channel_id, Some(chan_1.2)); @@ -3800,12 +4319,26 @@ pub fn test_onchain_to_onchain_claim() { let nodes_0_event = remove_first_msg_event_to_node(&node_a_id, &mut msg_events); match nodes_2_event { - MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { .. }, node_id: _ } => {}, + MessageSendEvent::HandleError { + action: ErrorAction::DisconnectPeer { .. }, + node_id: _, + } => {}, _ => panic!("Unexpected event"), } match nodes_0_event { - MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { + ref node_id, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + .. + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); @@ -3871,18 +4404,22 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { create_announced_chan_between_nodes(&nodes, 2, 3); create_announced_chan_between_nodes(&nodes, 2, 4); - let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; + let node_max_height = + nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; connect_blocks(&nodes[0], node_max_height * 2 - nodes[0].best_block_info().1); connect_blocks(&nodes[1], node_max_height * 2 - nodes[1].best_block_info().1); connect_blocks(&nodes[2], node_max_height * 2 - nodes[2].best_block_info().1); connect_blocks(&nodes[3], node_max_height * 2 - nodes[3].best_block_info().1); connect_blocks(&nodes[4], node_max_height * 2 - nodes[4].best_block_info().1); - let (our_payment_preimage, dup_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], 900_000); + let (our_payment_preimage, dup_payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], 900_000); - let payment_secret = nodes[4].node.create_inbound_payment_for_hash(dup_payment_hash, None, 7200, None).unwrap(); + let payment_secret = + nodes[4].node.create_inbound_payment_for_hash(dup_payment_hash, None, 7200, None).unwrap(); let payment_params = PaymentParameters::from_node_id(node_e_id, TEST_FINAL_CLTV) - .with_bolt11_features(nodes[4].node.bolt11_invoice_features()).unwrap(); + .with_bolt11_features(nodes[4].node.bolt11_invoice_features()) + .unwrap(); let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[4], payment_params, 800_000); let path: &[&[_]] = &[&[&nodes[1], &nodes[2], &nodes[4]]]; send_along_route_with_secret(&nodes[0], route, path, 800_000, dup_payment_hash, payment_secret); @@ -3916,10 +4453,12 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { assert_eq!(tx.input[1].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT + 1); // Split the HTLC claim transaction into two, one for each HTLC. - if commitment_txn[0].output[tx.input[1].previous_output.vout as usize].value.to_sat() < 850 { + if commitment_txn[0].output[tx.input[1].previous_output.vout as usize].value.to_sat() < 850 + { tx.input.remove(1); } - if commitment_txn[0].output[tx.input[0].previous_output.vout as usize].value.to_sat() < 850 { + if commitment_txn[0].output[tx.input[0].previous_output.vout as usize].value.to_sat() < 850 + { tx.input.remove(0); } assert_eq!(tx.input.len(), 1); @@ -3952,23 +4491,39 @@ pub fn test_duplicate_payment_hash_one_failure_one_success() { assert_eq!(htlc_success_txn[0].input.len(), 1); // Note that the witness script lengths are one longer than our constant as the CLTV value went // to two bytes rather than one. - assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT + 1); + assert_eq!( + htlc_success_txn[0].input[0].witness.last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + 1 + ); assert_eq!(htlc_success_txn[1].input.len(), 1); - assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT + 1); - assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_success_txn[1].input[0].previous_output); + assert_eq!( + htlc_success_txn[1].input[0].witness.last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + 1 + ); + assert_ne!( + htlc_success_txn[0].input[0].previous_output, + htlc_success_txn[1].input[0].previous_output + ); - let htlc_success_tx_to_confirm = - if htlc_success_txn[0].input[0].previous_output == htlc_timeout_tx.input[0].previous_output { - &htlc_success_txn[1] - } else { - &htlc_success_txn[0] - }; - assert_ne!(htlc_success_tx_to_confirm.input[0].previous_output, htlc_timeout_tx.input[0].previous_output); + let htlc_success_tx_to_confirm = if htlc_success_txn[0].input[0].previous_output + == htlc_timeout_tx.input[0].previous_output + { + &htlc_success_txn[1] + } else { + &htlc_success_txn[0] + }; + assert_ne!( + htlc_success_tx_to_confirm.input[0].previous_output, + htlc_timeout_tx.input[0].previous_output + ); // Mine the HTLC timeout transaction on node B. mine_transaction(&nodes[1], &htlc_timeout_tx); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] + ); let htlc_updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -4097,7 +4652,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2); let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3); let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4); - let chan_3_5 = create_announced_chan_between_nodes(&nodes, 3, 5); + let chan_3_5 = create_announced_chan_between_nodes(&nodes, 3, 5); // Rebalance and check output sanity... send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000); @@ -4269,7 +4824,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno events.last().clone().unwrap() }; match close_event { - Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}, _ => panic!("Unexpected event"), } @@ -4278,13 +4833,32 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno if deliver_last_raa { expect_pending_htlcs_forwardable_from_events!(nodes[2], events[1..2], true); - let expected_destinations: Vec = repeat(HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 }).take(3).collect(); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations); + let expected_destinations: Vec = + repeat(HTLCHandlingFailureType::Forward { + node_id: Some(node_d_id), + channel_id: chan_2_3.2, + }) + .take(3) + .collect(); + expect_htlc_handling_failed_destinations!( + nodes[2].node.get_and_clear_pending_events(), + expected_destinations + ); } else { let expected_destinations: Vec = if announce_latest { - repeat(HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 }).take(9).collect() + repeat(HTLCHandlingFailureType::Forward { + node_id: Some(node_d_id), + channel_id: chan_2_3.2, + }) + .take(9) + .collect() } else { - repeat(HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 }).take(6).collect() + repeat(HTLCHandlingFailureType::Forward { + node_id: Some(node_d_id), + channel_id: chan_2_3.2, + }) + .take(6) + .collect() }; expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations); @@ -4302,19 +4876,37 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let target = if *node_id == node_a_id { // If announce_latest, expect 0th, 1st, 4th, 8th, 10th HTLCs, else only 0th, 1st, 10th below-dust HTLCs for htlc in &updates.update_fail_htlcs { - assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 6 || if announce_latest { htlc.htlc_id == 3 || htlc.htlc_id == 5 } else { false }); + assert!( + htlc.htlc_id == 1 + || htlc.htlc_id == 2 || htlc.htlc_id == 6 + || if announce_latest { + htlc.htlc_id == 3 || htlc.htlc_id == 5 + } else { + false + } + ); } - assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 5 } else { 3 }); + assert_eq!( + updates.update_fail_htlcs.len(), + if announce_latest { 5 } else { 3 } + ); assert!(!a_done); a_done = true; &nodes[0] } else { // If announce_latest, expect 2nd, 3rd, 7th, 9th HTLCs, else only 2nd, 3rd, 9th below-dust HTLCs for htlc in &updates.update_fail_htlcs { - assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 5 || if announce_latest { htlc.htlc_id == 4 } else { false }); + assert!( + htlc.htlc_id == 1 + || htlc.htlc_id == 2 || htlc.htlc_id == 5 + || if announce_latest { htlc.htlc_id == 4 } else { false } + ); } assert_eq!(*node_id, node_b_id); - assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 4 } else { 3 }); + assert_eq!( + updates.update_fail_htlcs.len(), + if announce_latest { 4 } else { 3 } + ); &nodes[1] }; target.node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[0]); @@ -4323,7 +4915,9 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno if announce_latest { target.node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[3]); if *node_id == node_a_id { - target.node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[4]); + target + .node + .handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[4]); } } commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true); @@ -4337,7 +4931,13 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let mut as_faileds = new_hash_set(); let mut as_updates = 0; for event in as_events.iter() { - if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event { + if let &Event::PaymentPathFailed { + ref payment_hash, + ref payment_failed_permanently, + ref failure, + .. + } = event + { assert!(as_faileds.insert(*payment_hash)); if *payment_hash != hash_2 { assert_eq!(*payment_failed_permanently, deliver_last_raa); @@ -4348,7 +4948,9 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno as_updates += 1; } } else if let &Event::PaymentFailed { .. } = event { - } else { panic!("Unexpected event"); } + } else { + panic!("Unexpected event"); + } } assert!(as_faileds.contains(&hash_1)); assert!(as_faileds.contains(&hash_2)); @@ -4363,7 +4965,13 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let mut bs_faileds = new_hash_set(); let mut bs_updates = 0; for event in bs_events.iter() { - if let &Event::PaymentPathFailed { ref payment_hash, ref payment_failed_permanently, ref failure, .. } = event { + if let &Event::PaymentPathFailed { + ref payment_hash, + ref payment_failed_permanently, + ref failure, + .. + } = event + { assert!(bs_faileds.insert(*payment_hash)); if *payment_hash != hash_1 && *payment_hash != hash_5 { assert_eq!(*payment_failed_permanently, deliver_last_raa); @@ -4374,7 +4982,9 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno bs_updates += 1; } } else if let &Event::PaymentFailed { .. } = event { - } else { panic!("Unexpected event"); } + } else { + panic!("Unexpected event"); + } } assert!(bs_faileds.contains(&hash_1)); assert!(bs_faileds.contains(&hash_2)); @@ -4387,8 +4997,26 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno // get a NetworkUpdate. A should have gotten 4 HTLCs which were failed-back due to // unknown-preimage-etc, B should have gotten 2. Thus, in the // announce_latest && deliver_last_raa case, we should have 5-4=1 and 4-2=2 NetworkUpdates. - assert_eq!(as_updates, if deliver_last_raa { 1 } else if !announce_latest { 3 } else { 5 }); - assert_eq!(bs_updates, if deliver_last_raa { 2 } else if !announce_latest { 3 } else { 4 }); + assert_eq!( + as_updates, + if deliver_last_raa { + 1 + } else if !announce_latest { + 3 + } else { + 5 + } + ); + assert_eq!( + bs_updates, + if deliver_last_raa { + 2 + } else if !announce_latest { + 3 + } else { + 4 + } + ); } #[xtest(feature = "_externalize_tests")] @@ -4454,8 +5082,10 @@ pub fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32); assert_eq!(spend_txn[2].input.len(), 2); check_spends!(spend_txn[2], local_txn[0], htlc_timeout); - assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 || - spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32); + assert!( + spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 + || spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32 + ); } #[xtest(feature = "_externalize_tests")] @@ -4470,12 +5100,32 @@ pub fn test_key_derivation_params() { // We manually create the node configuration to backup the seed. let seed = [42; 32]; let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet); - let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager); + let chain_monitor = test_utils::TestChainMonitor::new( + Some(&chanmon_cfgs[0].chain_source), + &chanmon_cfgs[0].tx_broadcaster, + &chanmon_cfgs[0].logger, + &chanmon_cfgs[0].fee_estimator, + &chanmon_cfgs[0].persister, + &keys_manager, + ); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); - let router = test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[0].logger, &scorer); + let router = + test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[0].logger, &scorer); let message_router = test_utils::TestMessageRouter::new(network_graph.clone(), &keys_manager); - let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, message_router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) }; + let node = NodeCfg { + chain_source: &chanmon_cfgs[0].chain_source, + logger: &chanmon_cfgs[0].logger, + tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, + fee_estimator: &chanmon_cfgs[0].fee_estimator, + router, + message_router, + chain_monitor, + keys_manager: &keys_manager, + network_graph, + node_seed: seed, + override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)), + }; let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs); node_cfgs.remove(0); node_cfgs.insert(0, node); @@ -4493,7 +5143,8 @@ pub fn test_key_derivation_params() { assert_ne!(chan_0.3.output[0].script_pubkey, chan_1.3.output[0].script_pubkey); // Ensure all nodes are at the same height - let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; + let node_max_height = + nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1); @@ -4505,12 +5156,19 @@ pub fn test_key_derivation_params() { check_spends!(local_txn_1[0], chan_1.3); // We check funding pubkey are unique - let (from_0_funding_key_0, from_0_funding_key_1) = (PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][36..69])); - let (from_1_funding_key_0, from_1_funding_key_1) = (PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][2..35]), PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][36..69])); + let (from_0_funding_key_0, from_0_funding_key_1) = ( + PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][2..35]), + PublicKey::from_slice(&local_txn_0[0].input[0].witness.to_vec()[3][36..69]), + ); + let (from_1_funding_key_0, from_1_funding_key_1) = ( + PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][2..35]), + PublicKey::from_slice(&local_txn_1[0].input[0].witness.to_vec()[3][36..69]), + ); if from_0_funding_key_0 == from_1_funding_key_0 - || from_0_funding_key_0 == from_1_funding_key_1 - || from_0_funding_key_1 == from_1_funding_key_0 - || from_0_funding_key_1 == from_1_funding_key_1 { + || from_0_funding_key_0 == from_1_funding_key_1 + || from_0_funding_key_1 == from_1_funding_key_0 + || from_0_funding_key_1 == from_1_funding_key_1 + { panic!("Funding pubkeys aren't unique"); } @@ -4544,8 +5202,10 @@ pub fn test_key_derivation_params() { assert_eq!(spend_txn[1].input[0].sequence.0, BREAKDOWN_TIMEOUT as u32); assert_eq!(spend_txn[2].input.len(), 2); check_spends!(spend_txn[2], local_txn_1[0], htlc_timeout); - assert!(spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 || - spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32); + assert!( + spend_txn[2].input[0].sequence.0 == BREAKDOWN_TIMEOUT as u32 + || spend_txn[2].input[1].sequence.0 == BREAKDOWN_TIMEOUT as u32 + ); } #[xtest(feature = "_externalize_tests")] @@ -4593,7 +5253,8 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 }); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 }); // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being // present in B's local commitment transaction, but none of A's commitment transactions. @@ -4634,7 +5295,8 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if use_dust { 50000 } else { 3000000 }); + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], if use_dust { 50000 } else { 3000000 }); let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); @@ -4649,7 +5311,9 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) { let starting_block = nodes[1].best_block_info(); let mut block = create_dummy_block(starting_block.0, 42, Vec::new()); - for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 { + for _ in + starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + starting_block.1 + 2 + { connect_block(&nodes[0], &block); block.header.prev_blockhash = block.block_hash(); } @@ -4677,7 +5341,10 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no let htlc_value = if use_dust { 50000 } else { 3000000 }; let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value); nodes[1].node.fail_htlc_backwards(&our_payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }] + ); check_added_monitors(&nodes[1], 1); let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -4698,7 +5365,9 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no let starting_block = nodes[1].best_block_info(); let mut block = create_dummy_block(starting_block.0, 42, Vec::new()); - for _ in starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 { + for _ in + starting_block.1 + 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 2 + { connect_block(&nodes[0], &block); block.header.prev_blockhash = block.block_hash(); } @@ -4742,7 +5411,8 @@ pub fn htlc_claim_single_commitment_only_b() { #[xtest(feature = "_externalize_tests")] #[should_panic] -pub fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic +pub fn bolt2_open_channel_sending_node_checks_part1() { + //This test needs to be on its own as we are catching a panic let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -4757,16 +5427,23 @@ pub fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be } // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer. - let channel_value_satoshis=10000; - let push_msat=10001; - nodes[0].node.create_channel(node_b_id, channel_value_satoshis, push_msat, 42, None, None).unwrap(); - let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + let channel_value_satoshis = 10000; + let push_msat = 10001; + nodes[0] + .node + .create_channel(node_b_id, channel_value_satoshis, push_msat, 42, None, None) + .unwrap(); + let node0_to_1_send_open_channel = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[1].node.handle_open_channel(node_a_id, &node0_to_1_send_open_channel); get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); // Create a second channel with the same random values. This used to panic due to a colliding // channel_id, but now panics due to a colliding outbound SCID alias. - assert!(nodes[0].node.create_channel(node_b_id, channel_value_satoshis, push_msat, 42, None, None).is_err()); + assert!(nodes[0] + .node + .create_channel(node_b_id, channel_value_satoshis, push_msat, 42, None, None) + .is_err()); } #[xtest(feature = "_externalize_tests")] @@ -4779,18 +5456,22 @@ pub fn bolt2_open_channel_sending_node_checks_part2() { let node_b_id = nodes[1].node.get_our_node_id(); // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis - let channel_value_satoshis=10000; + let channel_value_satoshis = 10000; // Test when push_msat is equal to 1000 * funding_satoshis. - let push_msat=1000*channel_value_satoshis+1; - assert!(nodes[0].node.create_channel(node_b_id, channel_value_satoshis, push_msat, 42, None, None).is_err()); + let push_msat = 1000 * channel_value_satoshis + 1; + assert!(nodes[0] + .node + .create_channel(node_b_id, channel_value_satoshis, push_msat, 42, None, None) + .is_err()); nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); - let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + let node0_to_1_send_open_channel = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver. - assert!(BREAKDOWN_TIMEOUT>0); - assert!(node0_to_1_send_open_channel.common_fields.to_self_delay==BREAKDOWN_TIMEOUT); + assert!(BREAKDOWN_TIMEOUT > 0); + assert!(node0_to_1_send_open_channel.common_fields.to_self_delay == BREAKDOWN_TIMEOUT); // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within. let chain_hash = ChainHash::using_genesis_block(Network::Testnet); @@ -4808,21 +5489,25 @@ pub fn bolt2_open_channel_sane_dust_limit() { let node_b_id = nodes[1].node.get_our_node_id(); let value_sats = 1000000; - let push_msat=10001; + let push_msat = 10001; nodes[0].node.create_channel(node_b_id, value_sats, push_msat, 42, None, None).unwrap(); - let mut node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + let mut node0_to_1_send_open_channel = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); node0_to_1_send_open_channel.common_fields.dust_limit_satoshis = 547; node0_to_1_send_open_channel.channel_reserve_satoshis = 100001; nodes[1].node.handle_open_channel(node_a_id, &node0_to_1_send_open_channel); let events = nodes[1].node.get_and_clear_pending_msg_events(); let err_msg = match events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, .. } => { - msg.clone() - }, + MessageSendEvent::HandleError { + action: ErrorAction::SendErrorMessage { ref msg }, .. + } => msg.clone(), _ => panic!("Unexpected event"), }; - assert_eq!(err_msg.data, "dust_limit_satoshis (547) is greater than the implementation limit (546)"); + assert_eq!( + err_msg.data, + "dust_limit_satoshis (547) is greater than the implementation limit (546)" + ); } // Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC @@ -4853,9 +5538,10 @@ pub fn test_fail_holding_cell_htlc_upon_free() { let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let (update_msg, commitment_signed) = match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - (update_fee.as_ref(), commitment_signed) - }, + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => (update_fee.as_ref(), commitment_signed), _ => panic!("Unexpected event"), }; @@ -4867,8 +5553,10 @@ pub fn test_fail_holding_cell_htlc_upon_free() { let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan.2); // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. - let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); + let max_can_send = + 5000000 - channel_reserve - 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); // Send a payment which passes reserve checks but gets stuck in the holding cell. let onion = RecipientOnionFields::secret_only(our_payment_secret); @@ -4889,13 +5577,24 @@ pub fn test_fail_holding_cell_htlc_upon_free() { // us to surface its failure to the user. chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); - nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1); + nodes[0].logger.assert_log( + "lightning::ln::channel", + format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), + 1, + ); // Check that the payment failed to be sent out. let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match &events[0] { - &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => { + &Event::PaymentPathFailed { + ref payment_id, + ref payment_hash, + ref payment_failed_permanently, + failure: PathFailure::OnPath { network_update: None }, + ref short_channel_id, + .. + } => { assert_eq!(PaymentId(our_payment_hash.0), *payment_id.as_ref().unwrap()); assert_eq!(our_payment_hash.clone(), *payment_hash); assert_eq!(*payment_failed_permanently, false); @@ -4938,9 +5637,10 @@ pub fn test_free_and_fail_holding_cell_htlcs() { let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let (update_msg, commitment_signed) = match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - (update_fee.as_ref(), commitment_signed) - }, + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => (update_fee.as_ref(), commitment_signed), _ => panic!("Unexpected event"), }; @@ -4953,9 +5653,14 @@ pub fn test_free_and_fail_holding_cell_htlcs() { // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. let amt_1 = 20000; - let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features) - amt_1; - let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_1); - let (route_2, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], amt_2); + let amt_2 = 5000000 + - channel_reserve + - 2 * commit_tx_fee_msat(feerate, 2 + 1, &channel_type_features) + - amt_1; + let (route_1, payment_hash_1, payment_preimage_1, payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], amt_1); + let (route_2, payment_hash_2, _, payment_secret_2) = + get_route_and_payment_hash!(nodes[0], nodes[1], amt_2); // Send 2 payments which pass reserve checks but get stuck in the holding cell. let onion = RecipientOnionFields::secret_only(payment_secret_1); @@ -4983,13 +5688,24 @@ pub fn test_free_and_fail_holding_cell_htlcs() { // to surface its failure to the user. The first payment should succeed. chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); - nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1); + nodes[0].logger.assert_log( + "lightning::ln::channel", + format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), + 1, + ); // Check that the second payment failed to be sent out. let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match &events[0] { - &Event::PaymentPathFailed { ref payment_id, ref payment_hash, ref payment_failed_permanently, failure: PathFailure::OnPath { network_update: None }, ref short_channel_id, .. } => { + &Event::PaymentPathFailed { + ref payment_id, + ref payment_hash, + ref payment_failed_permanently, + failure: PathFailure::OnPath { network_update: None }, + ref short_channel_id, + .. + } => { assert_eq!(id_2, *payment_id.as_ref().unwrap()); assert_eq!(payment_hash_2.clone(), *payment_hash); assert_eq!(*payment_failed_permanently, false); @@ -5053,7 +5769,11 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { let mut config = test_default_channel_config(); config.channel_config.forwarding_fee_base_msat = 0; config.channel_config.forwarding_fee_proportional_millionths = 0; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config.clone())]); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(config.clone()), Some(config.clone()), Some(config.clone())], + ); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -5075,9 +5795,10 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let (update_msg, commitment_signed) = match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { - (update_fee.as_ref(), commitment_signed) - }, + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, + .. + } => (update_fee.as_ref(), commitment_signed), _ => panic!("Unexpected event"), }; @@ -5089,8 +5810,10 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_0_1.2); // Send a payment which passes reserve checks but gets stuck in the holding cell. - let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send); + let max_can_send = + 5000000 - channel_reserve - 2 * commit_tx_fee_msat(feerate, 1 + 1, &channel_type_features); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], max_can_send); let payment_event = { let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); @@ -5123,9 +5846,7 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { assert_eq!(events.len(), 1); let raa_msg = match &events[0] { - &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => { - msg.clone() - }, + &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => msg.clone(), _ => panic!("Unexpected event"), }; @@ -5176,7 +5897,13 @@ pub fn test_fail_holding_cell_htlc_upon_free_multihop() { _ => panic!("Unexpected event"), }; nodes[0].node.handle_revoke_and_ack(node_b_id, &raa); - expect_payment_failed_with_update!(nodes[0], our_payment_hash, false, chan_1_2.0.contents.short_channel_id, false); + expect_payment_failed_with_update!( + nodes[0], + our_payment_hash, + false, + chan_1_2.0.contents.short_channel_id, + false + ); check_added_monitors(&nodes[0], 1); } @@ -5197,7 +5924,8 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000); let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], 100000); //First hop let mut payment_event = { @@ -5225,14 +5953,28 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ check_added_monitors(&nodes[2], 0); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); + expect_htlc_handling_failed_destinations!( + nodes[2].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::InvalidOnion] + ); check_added_monitors(&nodes[2], 1); let events_3 = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events_3.len(), 1); - let update_msg : (msgs::UpdateFailMalformedHTLC, Vec) = { + let update_msg: (msgs::UpdateFailMalformedHTLC, Vec) = { match events_3[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed }, .. } => { + MessageSendEvent::UpdateHTLCs { + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + ref commitment_signed, + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert!(update_fail_htlcs.is_empty()); @@ -5248,13 +5990,27 @@ pub fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_ check_added_monitors(&nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] + ); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 1); //Confirm that handlinge the update_malformed_htlc message produces an update_fail_htlc message to be forwarded back along the route match events_4[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. }, .. } => { + MessageSendEvent::UpdateHTLCs { + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + .. + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 1); @@ -5281,7 +6037,8 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100_000); + let (route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[2], 100_000); // First hop let mut payment_event = { @@ -5305,7 +6062,10 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { check_added_monitors(&nodes[2], 0); commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable!(nodes[2]); - expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::InvalidOnion]); + expect_htlc_handling_failed_destinations!( + nodes[2].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::InvalidOnion] + ); check_added_monitors(&nodes[2], 1); let events_3 = nodes[2].node.get_and_clear_pending_msg_events(); @@ -5322,9 +6082,10 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { _ => panic!("Unexpected event"), } - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCHandlingFailureType::Forward { - node_id: Some(node_c_id), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] + ); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 1); check_added_monitors(&nodes[1], 1); @@ -5343,11 +6104,18 @@ pub fn test_channel_failed_after_message_with_badonion_node_perm_bits_set() { // Expect a PaymentPathFailed event with a ChannelFailure network update for the channel between // the node originating the error to its next hop. match events_5[0] { - Event::PaymentPathFailed { error_code, failure: PathFailure::OnPath { network_update: Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent }) }, .. + Event::PaymentPathFailed { + error_code, + failure: + PathFailure::OnPath { + network_update: + Some(NetworkUpdate::ChannelFailure { short_channel_id, is_permanent }), + }, + .. } => { assert_eq!(short_channel_id, chan_2.0.contents.short_channel_id); assert!(is_permanent); - assert_eq!(error_code, Some(0x8000|0x4000|0x2000|4)); + assert_eq!(error_code, Some(0x8000 | 0x4000 | 0x2000 | 4)); }, _ => panic!("Unexpected event"), } @@ -5386,8 +6154,8 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { }; // We route 2 dust-HTLCs between A and B - let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000); - let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000); + let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit * 1000); + let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit * 1000); route_payment(&nodes[0], &[&nodes[1]], 1000000); // Cache one local commitment tx as previous @@ -5396,7 +6164,10 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { // Fail one HTLC to prune it in the will-be-latest-local commitment tx nodes[1].node.fail_htlc_backwards(&payment_hash_2); check_added_monitors(&nodes[1], 0); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_2 }] + ); check_added_monitors(&nodes[1], 1); let remove = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -5449,7 +6220,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { assert_eq!(payment_hash, payment_hash_2); } }, - Event::PaymentFailed { .. } => {} + Event::PaymentFailed { .. } => {}, _ => panic!("Unexpected event"), } } @@ -5486,7 +6257,8 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { chan.context().holder_dust_limit_satoshis }; - let (_payment_preimage_1, dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000); + let (_payment_preimage_1, dust_hash, ..) = + route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit * 1000); let (_payment_preimage_2, non_dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000); let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2); @@ -5511,7 +6283,10 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { check_added_monitors(&nodes[0], 1); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone()); - assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert_eq!( + timeout_tx[0].input[0].witness.last().unwrap().len(), + OFFERED_HTLC_SCRIPT_WEIGHT + ); // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); mine_transaction(&nodes[0], &timeout_tx[0]); @@ -5526,15 +6301,24 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires - timeout_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..) - .filter(|tx| tx.input[0].previous_output.txid == bs_commitment_tx[0].compute_txid()).collect(); + timeout_tx = nodes[0] + .tx_broadcaster + .txn_broadcasted + .lock() + .unwrap() + .drain(..) + .filter(|tx| tx.input[0].previous_output.txid == bs_commitment_tx[0].compute_txid()) + .collect(); check_spends!(timeout_tx[0], bs_commitment_tx[0]); // For both a revoked or non-revoked commitment transaction, after ANTI_REORG_DELAY the // dust HTLC should have been failed. expect_payment_failed!(nodes[0], dust_hash, false); if !revoked { - assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); + assert_eq!( + timeout_tx[0].input[0].witness.last().unwrap().len(), + ACCEPTED_HTLC_SCRIPT_WEIGHT + ); } else { assert_eq!(timeout_tx[0].lock_time.to_consensus_u32(), 11); } @@ -5573,51 +6357,92 @@ pub fn test_user_configurable_csv_delay() { let logger = TestLogger::new(); // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new() - if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator::new(253)), - &nodes[0].keys_manager, &nodes[0].keys_manager, node_b_id, &nodes[1].node.init_features(), 1000000, 1000000, 0, - &low_our_to_self_config, 0, 42, None, &logger) - { + if let Err(error) = OutboundV1Channel::new( + &LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator::new(253)), + &nodes[0].keys_manager, + &nodes[0].keys_manager, + node_b_id, + &nodes[1].node.init_features(), + 1000000, + 1000000, + 0, + &low_our_to_self_config, + 0, + 42, + None, + &logger, + ) { match error { - APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); }, + APIError::APIMisuseError { err } => { + assert!(regex::Regex::new( + r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks" + ) + .unwrap() + .is_match(err.as_str())); + }, _ => panic!("Unexpected event"), } - } else { assert!(false) } + } else { + assert!(false) + } // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new() nodes[1].node.create_channel(node_a_id, 1000000, 1000000, 42, None, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, node_a_id); open_channel.common_fields.to_self_delay = 200; - if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator::new(253)), - &nodes[0].keys_manager, &nodes[0].keys_manager, node_b_id, &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, - &low_our_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false) - { + if let Err(error) = InboundV1Channel::new( + &LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator::new(253)), + &nodes[0].keys_manager, + &nodes[0].keys_manager, + node_b_id, + &nodes[0].node.channel_type_features(), + &nodes[1].node.init_features(), + &open_channel, + 0, + &low_our_to_self_config, + 0, + &nodes[0].logger, + /*is_0conf=*/ false, + ) { match error { ChannelError::Close((err, _)) => { - let regex = regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap(); + let regex = regex::Regex::new( + r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks", + ) + .unwrap(); assert!(regex.is_match(err.as_str())); }, _ => panic!("Unexpected event"), } - } else { assert!(false); } + } else { + assert!(false); + } // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel() nodes[0].node.create_channel(node_b_id, 1000000, 1000000, 42, None, None).unwrap(); let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[1].node.handle_open_channel(node_a_id, &open_channel); - let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + let mut accept_channel = + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); accept_channel.common_fields.to_self_delay = 200; nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); let reason_msg; - if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] { + if let MessageSendEvent::HandleError { ref action, .. } = + nodes[0].node.get_and_clear_pending_msg_events()[0] + { match action { &ErrorAction::SendErrorMessage { ref msg } => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str())); reason_msg = msg.data.clone(); }, - _ => { panic!(); } + _ => { + panic!(); + }, } - } else { panic!(); } + } else { + panic!(); + } let reason = ClosureReason::ProcessingError { err: reason_msg }; check_closed_event!(nodes[0], 1, reason, [node_b_id], 1000000); @@ -5625,10 +6450,20 @@ pub fn test_user_configurable_csv_delay() { nodes[1].node.create_channel(node_a_id, 1000000, 1000000, 42, None, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, node_a_id); open_channel.common_fields.to_self_delay = 200; - if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator::new(253)), - &nodes[0].keys_manager, &nodes[0].keys_manager, node_b_id, &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0, - &high_their_to_self_config, 0, &nodes[0].logger, /*is_0conf=*/false) - { + if let Err(error) = InboundV1Channel::new( + &LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator::new(253)), + &nodes[0].keys_manager, + &nodes[0].keys_manager, + node_b_id, + &nodes[0].node.channel_type_features(), + &nodes[1].node.init_features(), + &open_channel, + 0, + &high_their_to_self_config, + 0, + &nodes[0].logger, + /*is_0conf=*/ false, + ) { match error { ChannelError::Close((err, _)) => { let regex = regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap(); @@ -5636,7 +6471,9 @@ pub fn test_user_configurable_csv_delay() { }, _ => panic!("Unexpected event"), } - } else { assert!(false); } + } else { + assert!(false); + } } #[xtest(feature = "_externalize_tests")] @@ -5659,13 +6496,26 @@ pub fn test_check_htlc_underpaying() { let scorer = test_utils::TestScorer::new(); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, 10_000); - let route = get_route(&node_a_id, &route_params, &nodes[0].network_graph.read_only(), - None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap(); + let route = get_route( + &node_a_id, + &route_params, + &nodes[0].network_graph.read_only(), + None, + nodes[0].logger, + &scorer, + &Default::default(), + &random_seed_bytes, + ) + .unwrap(); let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]); - let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None).unwrap(); + let our_payment_secret = nodes[1] + .node + .create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, None) + .unwrap(); let onion = RecipientOnionFields::secret_only(our_payment_secret); let id = PaymentId(our_payment_hash.0); nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); @@ -5680,7 +6530,10 @@ pub fn test_check_htlc_underpaying() { // Note that we first have to wait a random delay before processing the receipt of the HTLC, // and then will wait a second random delay before failing the HTLC back: expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }] + ); // Node 3 is expecting payment of 100_000 but received 10_000, // it should fail htlc like we didn't know the preimage. @@ -5689,7 +6542,18 @@ pub fn test_check_htlc_underpaying() { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let (update_fail_htlc, commitment_signed) = match events[0] { - MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed }, .. } => { + MessageSendEvent::UpdateHTLCs { + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + ref commitment_signed, + }, + .. + } => { assert!(update_add_htlcs.is_empty()); assert!(update_fulfill_htlcs.is_empty()); assert_eq!(update_fail_htlcs.len(), 1); @@ -5744,9 +6608,12 @@ pub fn test_announce_disable_channels() { for e in msg_events { match e { MessageSendEvent::BroadcastChannelUpdate { ref msg } => { - assert_eq!(msg.contents.channel_flags & (1<<1), 1<<1); // The "channel disabled" bit should be set - // Check that each channel gets updated exactly once - if chans_disabled.insert(msg.contents.short_channel_id, msg.contents.timestamp).is_some() { + assert_eq!(msg.contents.channel_flags & (1 << 1), 1 << 1); // The "channel disabled" bit should be set + // Check that each channel gets updated exactly once + if chans_disabled + .insert(msg.contents.short_channel_id, msg.contents.timestamp) + .is_some() + { panic!("Generated ChannelUpdate for wrong chan!"); } }, @@ -5792,7 +6659,7 @@ pub fn test_announce_disable_channels() { for e in msg_events { match e { MessageSendEvent::BroadcastChannelUpdate { ref msg } => { - assert_eq!(msg.contents.channel_flags & (1<<1), 0); // The "channel disabled" bit should be off + assert_eq!(msg.contents.channel_flags & (1 << 1), 0); // The "channel disabled" bit should be off match chans_disabled.remove(&msg.contents.short_channel_id) { // Each update should have a higher timestamp than the previous one, replacing // the old one. @@ -5823,8 +6690,9 @@ pub fn test_bump_penalty_txn_on_revoked_commitment() { let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3000000).0; let payment_params = PaymentParameters::from_node_id(node_a_id, TEST_FINAL_CLTV) - .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap(); - let (route,_, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000); + .with_bolt11_features(nodes[0].node.bolt11_invoice_features()) + .unwrap(); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_params, 3000000); send_along_route(&nodes[1], route, &[&nodes[0]], 3000000); let revoked_txn = get_local_commitment_txn!(nodes[0], chan.2); @@ -5857,8 +6725,15 @@ pub fn test_bump_penalty_txn_on_revoked_commitment() { assert!(tx.input.len() == 1 || tx.input.len() == 2); assert_eq!(tx.output.len(), 1); check_spends!(tx, revoked_txn[0]); - let total_input: u64 = tx.input.iter().map(|i| revoked_txn[0].output[i.previous_output.vout as usize].value.to_sat()).sum(); - let fee_rate: u64 = (total_input - tx.output[0].value.to_sat()) * 1000 / tx.weight().to_wu(); + let total_input: u64 = tx + .input + .iter() + .map(|i| { + revoked_txn[0].output[i.previous_output.vout as usize].value.to_sat() + }) + .sum(); + let fee_rate: u64 = + (total_input - tx.output[0].value.to_sat()) * 1000 / tx.weight().to_wu(); assert_ne!(fee_rate, 0); for input in &tx.input { $fee_rates.insert(input.previous_output, fee_rate); @@ -5869,7 +6744,7 @@ pub fn test_bump_penalty_txn_on_revoked_commitment() { assert_eq!($penalty_txids.len(), 3); node_txn.clear(); } - } + }; } // One or more justice tx should have been broadcast, check it. @@ -5920,18 +6795,39 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps) - let payment_params = PaymentParameters::from_node_id(node_b_id, 50).with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + let payment_params = PaymentParameters::from_node_id(node_b_id, 50) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); let scorer = test_utils::TestScorer::new(); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000); - let route = get_route(&node_a_id, &route_params, &nodes[0].network_graph.read_only(), None, - nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap(); + let route = get_route( + &node_a_id, + &route_params, + &nodes[0].network_graph.read_only(), + None, + nodes[0].logger, + &scorer, + &Default::default(), + &random_seed_bytes, + ) + .unwrap(); let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0; let payment_params = PaymentParameters::from_node_id(node_a_id, 50) - .with_bolt11_features(nodes[0].node.bolt11_invoice_features()).unwrap(); + .with_bolt11_features(nodes[0].node.bolt11_invoice_features()) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value(payment_params, 3_000_000); - let route = get_route(&node_b_id, &route_params, &nodes[1].network_graph.read_only(), None, - nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes).unwrap(); + let route = get_route( + &node_b_id, + &route_params, + &nodes[1].network_graph.read_only(), + None, + nodes[0].logger, + &scorer, + &Default::default(), + &random_seed_bytes, + ) + .unwrap(); let failed_payment_hash = send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000).1; let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2); @@ -5942,7 +6838,10 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx - connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()])); + connect_block( + &nodes[1], + &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone()]), + ); check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 1000000); @@ -5968,12 +6867,19 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { let hash_128 = connect_blocks(&nodes[0], 40); let block_11 = create_dummy_block(hash_128, 42, vec![revoked_local_txn[0].clone()]); connect_block(&nodes[0], &block_11); - let block_129 = create_dummy_block(block_11.block_hash(), 42, vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()]); + let block_129 = create_dummy_block( + block_11.block_hash(), + 42, + vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[1].clone()], + ); connect_block(&nodes[0], &block_129); let events = nodes[0].node.get_and_clear_pending_events(); - expect_pending_htlcs_forwardable_conditions(events[0..2].to_vec(), &[HTLCHandlingFailureType::Receive { payment_hash: failed_payment_hash }]); + expect_pending_htlcs_forwardable_conditions( + events[0..2].to_vec(), + &[HTLCHandlingFailureType::Receive { payment_hash: failed_payment_hash }], + ); match events.last().unwrap() { - Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}, _ => panic!("Unexpected event"), } let first; @@ -6003,8 +6909,14 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[1].previous_output); assert_ne!(node_txn[1].input[0].previous_output, node_txn[1].input[1].previous_output); - assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[1].input[0].previous_output); - assert_eq!(node_txn[1].input[1].previous_output, revoked_htlc_txn[0].input[0].previous_output); + assert_eq!( + node_txn[1].input[0].previous_output, + revoked_htlc_txn[1].input[0].previous_output + ); + assert_eq!( + node_txn[1].input[1].previous_output, + revoked_htlc_txn[0].input[0].previous_output + ); // node_txn[3] spends the revoked outputs from the revoked_htlc_txn (which only have one // output, checked above). @@ -6014,7 +6926,8 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { first = node_txn[2].compute_txid(); // Store both feerates for later comparison - let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[2].output[0].value; + let fee_1 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value + - node_txn[2].output[0].value; feerate_1 = fee_1 * 1000 / node_txn[2].weight().to_wu(); penalty_txn = vec![node_txn[0].clone()]; node_txn.clear(); @@ -6038,7 +6951,8 @@ pub fn test_bump_penalty_txn_on_revoked_htlcs() { check_spends!(node_txn[0], revoked_htlc_txn[0], revoked_htlc_txn[1]); // Verify bumped tx is different and 25% bump heuristic assert_ne!(first, node_txn[0].compute_txid()); - let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value - node_txn[0].output[0].value; + let fee_2 = revoked_htlc_txn[0].output[0].value + revoked_htlc_txn[1].output[0].value + - node_txn[0].output[0].value; let feerate_2 = fee_2 * 1000 / node_txn[0].weight().to_wu(); assert!(feerate_2 * 100 > feerate_1 * 125); let txn = vec![node_txn[0].clone()]; @@ -6083,7 +6997,8 @@ pub fn test_bump_penalty_txn_on_remote_commitment() { let htlc_value_b_msats = 583_000; let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value_a_msats); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1]], htlc_value_a_msats); route_payment(&nodes[1], &[&nodes[0]], htlc_value_b_msats); // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC @@ -6124,14 +7039,16 @@ pub fn test_bump_penalty_txn_on_remote_commitment() { preimage = node_txn[0].compute_txid(); let index = node_txn[0].input[0].previous_output.vout; - let fee = remote_txn[0].output[index as usize].value.to_sat() - node_txn[0].output[0].value.to_sat(); + let fee = remote_txn[0].output[index as usize].value.to_sat() + - node_txn[0].output[0].value.to_sat(); feerate_preimage = fee * 1000 / node_txn[0].weight().to_wu(); - let (preimage_bump_tx, timeout_tx) = if node_txn[2].input[0].previous_output == node_txn[0].input[0].previous_output { - (node_txn[2].clone(), node_txn[1].clone()) - } else { - (node_txn[1].clone(), node_txn[2].clone()) - }; + let (preimage_bump_tx, timeout_tx) = + if node_txn[2].input[0].previous_output == node_txn[0].input[0].previous_output { + (node_txn[2].clone(), node_txn[1].clone()) + } else { + (node_txn[1].clone(), node_txn[2].clone()) + }; preimage_bump = preimage_bump_tx; check_spends!(preimage_bump, remote_txn[0]); @@ -6139,7 +7056,8 @@ pub fn test_bump_penalty_txn_on_remote_commitment() { timeout = timeout_tx.compute_txid(); let index = timeout_tx.input[0].previous_output.vout; - let fee = remote_txn[0].output[index as usize].value.to_sat() - timeout_tx.output[0].value.to_sat(); + let fee = remote_txn[0].output[index as usize].value.to_sat() + - timeout_tx.output[0].value.to_sat(); feerate_timeout = fee * 1000 / timeout_tx.weight().to_wu(); node_txn.clear(); @@ -6158,13 +7076,15 @@ pub fn test_bump_penalty_txn_on_remote_commitment() { check_spends!(preimage_bump, remote_txn[0]); let index = preimage_bump.input[0].previous_output.vout; - let fee = remote_txn[0].output[index as usize].value.to_sat() - preimage_bump.output[0].value.to_sat(); + let fee = remote_txn[0].output[index as usize].value.to_sat() + - preimage_bump.output[0].value.to_sat(); let new_feerate = fee * 1000 / preimage_bump.weight().to_wu(); assert!(new_feerate * 100 > feerate_timeout * 125); assert_ne!(timeout, preimage_bump.compute_txid()); let index = node_txn[0].input[0].previous_output.vout; - let fee = remote_txn[0].output[index as usize].value.to_sat() - node_txn[0].output[0].value.to_sat(); + let fee = remote_txn[0].output[index as usize].value.to_sat() + - node_txn[0].output[0].value.to_sat(); let new_feerate = fee * 1000 / node_txn[0].weight().to_wu(); assert!(new_feerate * 100 > feerate_preimage * 125); assert_ne!(preimage, node_txn[0].compute_txid()); @@ -6201,14 +7121,15 @@ pub fn test_counterparty_raa_skip_no_crash() { { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let mut guard = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); - let keys = guard.channel_by_id.get(&channel_id).and_then(Channel::as_funded).unwrap() - .get_signer(); + let keys = + guard.channel_by_id.get(&channel_id).and_then(Channel::as_funded).unwrap().get_signer(); const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; // Make signer believe we got a counterparty signature, so that it allows the revocation keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; - per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER).unwrap(); + per_commitment_secret = + keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER).unwrap(); // Must revoke without gaps keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; @@ -6228,11 +7149,13 @@ pub fn test_counterparty_raa_skip_no_crash() { next_local_nonce: None, }; nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); - assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack"); + assert_eq!( + check_closed_broadcast!(nodes[1], true).unwrap().data, + "Received an unexpected revoke_and_ack" + ); check_added_monitors(&nodes[1], 1); - let reason = ClosureReason::ProcessingError { - err: "Received an unexpected revoke_and_ack".to_string(), - }; + let reason = + ClosureReason::ProcessingError { err: "Received an unexpected revoke_and_ack".to_string() }; check_closed_event!(nodes[1], 1, reason, [node_a_id], 100000); } @@ -6262,7 +7185,10 @@ pub fn test_bump_txn_sanitize_tracking_maps() { // Broadcast set of revoked txn on A connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!( + nodes[0], + vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_2 }] + ); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0); mine_transaction(&nodes[0], &revoked_local_txn[0]); @@ -6303,7 +7229,8 @@ pub fn test_channel_conf_timeout() { let node_a_id = nodes[0].node.get_our_node_id(); - let _funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 100_000); + let _funding_tx = + create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 100_000); // The outbound node should wait forever for confirmation: // This matches `channel::FUNDING_CONF_DEADLINE_BLOCKS` and BOLT 2's suggested timeout, thus is @@ -6322,9 +7249,15 @@ pub fn test_channel_conf_timeout() { let close_ev = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(close_ev.len(), 1); match close_ev[0] { - MessageSendEvent::HandleError { action: ErrorAction::DisconnectPeer { ref msg }, ref node_id } => { + MessageSendEvent::HandleError { + action: ErrorAction::DisconnectPeer { ref msg }, + ref node_id, + } => { assert_eq!(*node_id, node_a_id); - assert_eq!(msg.as_ref().unwrap().data, "Channel closed because funding transaction failed to confirm within 2016 blocks"); + assert_eq!( + msg.as_ref().unwrap().data, + "Channel closed because funding transaction failed to confirm within 2016 blocks" + ); }, _ => panic!("Unexpected event"), } @@ -6343,7 +7276,10 @@ pub fn test_override_channel_config() { let mut override_config = UserConfig::default(); override_config.channel_handshake_config.our_to_self_delay = 200; - nodes[0].node.create_channel(node_b_id, 16_000_000, 12_000_000, 42, None, Some(override_config)).unwrap(); + nodes[0] + .node + .create_channel(node_b_id, 16_000_000, 12_000_000, 42, None, Some(override_config)) + .unwrap(); // Assert the channel created by node0 is using the override config. let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); @@ -6363,7 +7299,10 @@ pub fn test_override_0msat_htlc_minimum() { let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - nodes[0].node.create_channel(node_b_id, 16_000_000, 12_000_000, 42, None, Some(zero_config)).unwrap(); + nodes[0] + .node + .create_channel(node_b_id, 16_000_000, 12_000_000, 42, None, Some(zero_config)) + .unwrap(); let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); assert_eq!(res.common_fields.htlc_minimum_msat, 1); @@ -6382,16 +7321,24 @@ pub fn test_channel_update_has_correct_htlc_maximum_msat() { let mut config_30_percent = UserConfig::default(); config_30_percent.channel_handshake_config.announce_for_forwarding = true; - config_30_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 30; + config_30_percent + .channel_handshake_config + .max_inbound_htlc_value_in_flight_percent_of_channel = 30; let mut config_50_percent = UserConfig::default(); config_50_percent.channel_handshake_config.announce_for_forwarding = true; - config_50_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 50; + config_50_percent + .channel_handshake_config + .max_inbound_htlc_value_in_flight_percent_of_channel = 50; let mut config_95_percent = UserConfig::default(); config_95_percent.channel_handshake_config.announce_for_forwarding = true; - config_95_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 95; + config_95_percent + .channel_handshake_config + .max_inbound_htlc_value_in_flight_percent_of_channel = 95; let mut config_100_percent = UserConfig::default(); config_100_percent.channel_handshake_config.announce_for_forwarding = true; - config_100_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + config_100_percent + .channel_handshake_config + .max_inbound_htlc_value_in_flight_percent_of_channel = 100; let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); @@ -6410,8 +7357,10 @@ pub fn test_channel_update_has_correct_htlc_maximum_msat() { let channel_value_50_percent_msat = (channel_value_msat as f64 * 0.5) as u64; let channel_value_90_percent_msat = (channel_value_msat as f64 * 0.9) as u64; - let (node_0_chan_update, node_1_chan_update, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value_satoshis, 10001); - let (node_2_chan_update, node_3_chan_update, _, _) = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, channel_value_satoshis, 10001); + let (node_0_chan_update, node_1_chan_update, _, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value_satoshis, 10001); + let (node_2_chan_update, node_3_chan_update, _, _) = + create_announced_chan_between_nodes_with_value(&nodes, 2, 3, channel_value_satoshis, 10001); // Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as // that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`. @@ -6438,13 +7387,17 @@ pub fn test_manually_accept_inbound_channel_request() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap(); + nodes[0] + .node + .create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)) + .unwrap(); let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[1].node.handle_open_channel(node_a_id, &res); @@ -6479,7 +7432,7 @@ pub fn test_manually_accept_inbound_channel_request() { .node .accept_inbound_channel(&temporary_channel_id, &node_a_id, 23, config) .unwrap(); - } + }, _ => panic!("Unexpected event"), } @@ -6495,7 +7448,7 @@ pub fn test_manually_accept_inbound_channel_request() { assert_eq!(msg.common_fields.max_accepted_htlcs, 3); accept_channel = msg; - } + }, _ => panic!("Unexpected event"), } @@ -6557,13 +7510,17 @@ pub fn test_manually_reject_inbound_channel_request() { manually_accept_conf.manually_accept_inbound_channels = true; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap(); + nodes[0] + .node + .create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)) + .unwrap(); let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[1].node.handle_open_channel(node_a_id, &res); @@ -6579,7 +7536,7 @@ pub fn test_manually_reject_inbound_channel_request() { .node .force_close_broadcasting_latest_txn(&temporary_channel_id, &node_a_id, err) .unwrap(); - } + }, _ => panic!("Unexpected event"), } @@ -6589,7 +7546,7 @@ pub fn test_manually_reject_inbound_channel_request() { match close_msg_ev[0] { MessageSendEvent::HandleError { ref node_id, .. } => { assert_eq!(*node_id, node_a_id); - } + }, _ => panic!("Unexpected event"), } @@ -6603,13 +7560,17 @@ pub fn test_can_not_accept_inbound_channel_twice() { manually_accept_conf.manually_accept_inbound_channels = true; let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[None, Some(manually_accept_conf.clone())]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)).unwrap(); + nodes[0] + .node + .create_channel(node_b_id, 100000, 10001, 42, None, Some(manually_accept_conf)) + .unwrap(); let res = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[1].node.handle_open_channel(node_a_id, &res); @@ -6621,8 +7582,12 @@ pub fn test_can_not_accept_inbound_channel_twice() { let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { - nodes[1].node.accept_inbound_channel(&temporary_channel_id, &node_a_id, 0, None).unwrap(); - let api_res = nodes[1].node.accept_inbound_channel(&temporary_channel_id, &node_a_id, 0, None); + nodes[1] + .node + .accept_inbound_channel(&temporary_channel_id, &node_a_id, 0, None) + .unwrap(); + let api_res = + nodes[1].node.accept_inbound_channel(&temporary_channel_id, &node_a_id, 0, None); match api_res { Err(APIError::APIMisuseError { err }) => { assert_eq!(err, "No such channel awaiting to be accepted."); @@ -6630,7 +7595,7 @@ pub fn test_can_not_accept_inbound_channel_twice() { Ok(_) => panic!("Channel shouldn't be possible to be accepted twice"), Err(e) => panic!("Unexpected Error {:?}", e), } - } + }, _ => panic!("Unexpected event"), } @@ -6641,7 +7606,7 @@ pub fn test_can_not_accept_inbound_channel_twice() { match accept_msg_ev[0] { MessageSendEvent::SendAcceptChannel { ref node_id, .. } => { assert_eq!(*node_id, node_a_id); - } + }, _ => panic!("Unexpected event"), } } @@ -6713,10 +7678,8 @@ pub fn test_onion_value_mpp_set_calculation() { // Send payment let id = PaymentId(nodes[0].keys_manager.backing.get_secure_random_bytes()); let onion = RecipientOnionFields::secret_only(payment_secret); - let onion_session_privs = nodes[0] - .node - .test_add_new_pending_payment(hash, onion.clone(), id, &route) - .unwrap(); + let onion_session_privs = + nodes[0].node.test_add_new_pending_payment(hash, onion.clone(), id, &route).unwrap(); let amt = Some(total_msat); nodes[0] .node @@ -6728,31 +7691,49 @@ pub fn test_onion_value_mpp_set_calculation() { assert_eq!(events.len(), expected_paths.len()); // First path - let ev = remove_first_msg_event_to_node(&expected_paths[0][0].node.get_our_node_id(), &mut events); + let ev = + remove_first_msg_event_to_node(&expected_paths[0][0].node.get_our_node_id(), &mut events); let mut payment_event = SendEvent::from_event(ev); let mut prev_node = &nodes[0]; for (idx, &node) in expected_paths[0].iter().enumerate() { assert_eq!(node.node.get_our_node_id(), payment_event.node_id); - if idx == 0 { // routing node + if idx == 0 { + // routing node let session_priv = [3; 32]; let height = nodes[0].best_block_info().1; let session_priv = SecretKey::from_slice(&session_priv).unwrap(); - let mut onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv); + let mut onion_keys = onion_utils::construct_onion_keys( + &Secp256k1::new(), + &route.paths[0], + &session_priv, + ); let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret); - let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads(&route.paths[0], 100_000, - &recipient_onion_fields, height + 1, &None, None, None).unwrap(); + let (mut onion_payloads, _, _) = onion_utils::build_onion_payloads( + &route.paths[0], + 100_000, + &recipient_onion_fields, + height + 1, + &None, + None, + None, + ) + .unwrap(); // Edit amt_to_forward to simulate the sender having set // the final amount and the routing node taking less fee if let msgs::OutboundOnionPayload::Receive { - ref mut sender_intended_htlc_amt_msat, .. - } = onion_payloads[1] { + ref mut sender_intended_htlc_amt_msat, + .. + } = onion_payloads[1] + { *sender_intended_htlc_amt_msat = 99_000; - } else { panic!() } - let new_onion_packet = + } else { + panic!() + } + let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &hash) - .unwrap(); + .unwrap(); payment_event.msgs[0].onion_routing_packet = new_onion_packet; } @@ -6776,16 +7757,15 @@ pub fn test_onion_value_mpp_set_calculation() { } // Second path - let ev = remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events); + let ev = + remove_first_msg_event_to_node(&expected_paths[1][0].node.get_our_node_id(), &mut events); let payment_secret = Some(payment_secret); pass_along_path(&nodes[0], expected_paths[1], 101_000, hash, payment_secret, ev, true, None); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], expected_paths, preimage) - ); + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], expected_paths, preimage)); } -fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64){ +fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64) { let routing_node_count = msat_amounts.len(); let node_count = routing_node_count + 2; @@ -6813,11 +7793,13 @@ fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64){ let path = vec![&nodes[routing_node], &nodes[dst_idx]]; expected_paths.push(path); } - let expected_paths: Vec<&[&Node]> = expected_paths.iter().map(|route| route.as_slice()).collect(); + let expected_paths: Vec<&[&Node]> = + expected_paths.iter().map(|route| route.as_slice()).collect(); // Create a route for each amount let example_amount = 100000; - let (mut route, hash, preimage, payment_secret) = get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount); + let (mut route, hash, preimage, payment_secret) = + get_route_and_payment_hash!(&nodes[src_idx], nodes[dst_idx], example_amount); let sample_path = route.paths.pop().unwrap(); for i in 0..routing_node_count { let routing_node = 2 + i; @@ -6833,10 +7815,8 @@ fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64){ // Send payment with manually set total_msat let id = PaymentId(nodes[src_idx].keys_manager.backing.get_secure_random_bytes()); let onion = RecipientOnionFields::secret_only(payment_secret); - let onion_session_privs = nodes[src_idx] - .node - .test_add_new_pending_payment(hash, onion, id, &route) - .unwrap(); + let onion_session_privs = + nodes[src_idx].node.test_add_new_pending_payment(hash, onion, id, &route).unwrap(); let onion = RecipientOnionFields::secret_only(payment_secret); let amt = Some(total_msat); nodes[src_idx] @@ -6849,17 +7829,26 @@ fn do_test_overshoot_mpp(msat_amounts: &[u64], total_msat: u64){ assert_eq!(events.len(), expected_paths.len()); let mut amount_received = 0; for (path_idx, expected_path) in expected_paths.iter().enumerate() { - let ev = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events); + let ev = + remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events); let current_path_amount = msat_amounts[path_idx]; amount_received += current_path_amount; - let became_claimable_now = amount_received >= total_msat && amount_received - current_path_amount < total_msat; - pass_along_path(&nodes[src_idx], expected_path, amount_received, hash.clone(), Some(payment_secret), ev, became_claimable_now, None); + let became_claimable_now = + amount_received >= total_msat && amount_received - current_path_amount < total_msat; + pass_along_path( + &nodes[src_idx], + expected_path, + amount_received, + hash.clone(), + Some(payment_secret), + ev, + became_claimable_now, + None, + ); } - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[src_idx], &expected_paths, preimage) - ); + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[src_idx], &expected_paths, preimage)); } #[xtest(feature = "_externalize_tests")] @@ -6884,7 +7873,8 @@ pub fn test_simple_mpp() { let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id; let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id; - let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); + let (mut route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); let path = route.paths[0].clone(); route.paths.push(path); route.paths[0].hops[0].pubkey = node_b_id; @@ -6895,9 +7885,7 @@ pub fn test_simple_mpp() { route.paths[1].hops[1].short_channel_id = chan_4_id; let paths: &[&[_]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; send_along_route_with_secret(&nodes[0], route, paths, 200_000, payment_hash, payment_secret); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], paths, payment_preimage) - ); + claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], paths, payment_preimage)); } #[xtest(feature = "_externalize_tests")] @@ -6913,7 +7901,8 @@ pub fn test_preimage_storage() { create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; { - let (payment_hash, payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 7200, None).unwrap(); + let (payment_hash, payment_secret) = + nodes[1].node.create_inbound_payment(Some(100_000), 7200, None).unwrap(); let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); @@ -6931,13 +7920,11 @@ pub fn test_preimage_storage() { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentClaimable { ref purpose, .. } => { - match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, .. } => { - claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap()); - }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") - } + Event::PaymentClaimable { ref purpose, .. } => match &purpose { + PaymentPurpose::Bolt11InvoicePayment { payment_preimage, .. } => { + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap()); + }, + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), }, _ => panic!("Unexpected event"), } @@ -6958,7 +7945,8 @@ pub fn test_bad_secret_hash() { let random_hash = PaymentHash([42; 32]); let random_secret = PaymentSecret([43; 32]); - let (our_payment_hash, our_payment_secret) = nodes[1].node.create_inbound_payment(Some(100_000), 2, None).unwrap(); + let (our_payment_hash, our_payment_secret) = + nodes[1].node.create_inbound_payment(Some(100_000), 2, None).unwrap(); let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); // All the below cases should end up being handled exactly identically, so we macro the @@ -6974,19 +7962,26 @@ pub fn test_bad_secret_hash() { // We have to forward pending HTLCs once to process the receipt of the HTLC and then // again to process the pending backwards-failure of the HTLC expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Receive{ payment_hash: $payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Receive { payment_hash: $payment_hash }] + ); check_added_monitors(&nodes[1], 1); // We should fail the payment back let mut events = nodes[1].node.get_and_clear_pending_msg_events(); match events.pop().unwrap() { - MessageSendEvent::UpdateHTLCs { node_id: _, channel_id: _, updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } } => { + MessageSendEvent::UpdateHTLCs { + node_id: _, + channel_id: _, + updates: msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. }, + } => { nodes[0].node.handle_update_fail_htlc(node_b_id, &update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); }, _ => panic!("Unexpected event"), } - } + }; } let expected_err_code = LocalHTLCFailureReason::IncorrectPaymentDetails; @@ -7048,13 +8043,28 @@ pub fn test_update_err_monitor_lockdown() { let watchtower = { let new_monitor = { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan_1.2).unwrap(); - let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( - &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; + let new_monitor = + <(BlockHash, channelmonitor::ChannelMonitor)>::read( + &mut io::Cursor::new(&monitor.encode()), + (nodes[0].keys_manager, nodes[0].keys_manager), + ) + .unwrap() + .1; assert!(new_monitor == *monitor); new_monitor }; - let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(watchtower.watch_channel(chan_1.2, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + let watchtower = test_utils::TestChainMonitor::new( + Some(&chain_source), + &chanmon_cfgs[0].tx_broadcaster, + &logger, + &chanmon_cfgs[0].fee_estimator, + &persister, + &node_cfgs[0].keys_manager, + ); + assert_eq!( + watchtower.watch_channel(chan_1.2, new_monitor), + Ok(ChannelMonitorUpdateStatus::Completed) + ); watchtower }; let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()); @@ -7078,10 +8088,20 @@ pub fn test_update_err_monitor_lockdown() { get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_1.2); if let Some(channel) = chan_ref.as_funded_mut() { assert_eq!(updates.commitment_signed.len(), 1); - if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed[0], &node_cfgs[0].logger) { - assert_eq!(watchtower.chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::InProgress); - assert_eq!(nodes[0].chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::Completed); - } else { assert!(false); } + if let Ok(Some(update)) = + channel.commitment_signed(&updates.commitment_signed[0], &node_cfgs[0].logger) + { + assert_eq!( + watchtower.chain_monitor.update_channel(chan_1.2, &update), + ChannelMonitorUpdateStatus::InProgress + ); + assert_eq!( + nodes[0].chain_monitor.update_channel(chan_1.2, &update), + ChannelMonitorUpdateStatus::Completed + ); + } else { + assert!(false); + } } else { assert!(false); } @@ -7120,25 +8140,41 @@ pub fn test_concurrent_monitor_claim() { let chain_source = test_utils::TestChainSource::new(Network::Testnet); let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice")); let persister = test_utils::TestPersister::new(); - let alice_broadcaster = test_utils::TestBroadcaster::with_blocks( - Arc::new(Mutex::new(nodes[0].blocks.lock().unwrap().clone())), - ); + let alice_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::new(Mutex::new( + nodes[0].blocks.lock().unwrap().clone(), + ))); let watchtower_alice = { let new_monitor = { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan_1.2).unwrap(); - let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( - &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; + let new_monitor = + <(BlockHash, channelmonitor::ChannelMonitor)>::read( + &mut io::Cursor::new(&monitor.encode()), + (nodes[0].keys_manager, nodes[0].keys_manager), + ) + .unwrap() + .1; assert!(new_monitor == *monitor); new_monitor }; - let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &alice_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(watchtower.watch_channel(chan_1.2, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + let watchtower = test_utils::TestChainMonitor::new( + Some(&chain_source), + &alice_broadcaster, + &logger, + &chanmon_cfgs[0].fee_estimator, + &persister, + &node_cfgs[0].keys_manager, + ); + assert_eq!( + watchtower.watch_channel(chan_1.2, new_monitor), + Ok(ChannelMonitorUpdateStatus::Completed) + ); watchtower }; let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()); // Make Alice aware of enough blocks that it doesn't think we're violating transaction lock time // requirements here. - const HTLC_TIMEOUT_BROADCAST: u32 = CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS; + const HTLC_TIMEOUT_BROADCAST: u32 = + CHAN_CONFIRM_DEPTH + 1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS; let next_block = (block.clone(), HTLC_TIMEOUT_BROADCAST); alice_broadcaster.blocks.lock().unwrap().resize((HTLC_TIMEOUT_BROADCAST) as usize, next_block); watchtower_alice.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST); @@ -7155,24 +8191,41 @@ pub fn test_concurrent_monitor_claim() { let chain_source = test_utils::TestChainSource::new(Network::Testnet); let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob")); let persister = test_utils::TestPersister::new(); - let bob_broadcaster = test_utils::TestBroadcaster::with_blocks(Arc::clone(&alice_broadcaster.blocks)); + let bob_broadcaster = + test_utils::TestBroadcaster::with_blocks(Arc::clone(&alice_broadcaster.blocks)); let watchtower_bob = { let new_monitor = { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan_1.2).unwrap(); - let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( - &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; + let new_monitor = + <(BlockHash, channelmonitor::ChannelMonitor)>::read( + &mut io::Cursor::new(&monitor.encode()), + (nodes[0].keys_manager, nodes[0].keys_manager), + ) + .unwrap() + .1; assert!(new_monitor == *monitor); new_monitor }; - let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &bob_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(watchtower.watch_channel(chan_1.2, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + let watchtower = test_utils::TestChainMonitor::new( + Some(&chain_source), + &bob_broadcaster, + &logger, + &chanmon_cfgs[0].fee_estimator, + &persister, + &node_cfgs[0].keys_manager, + ); + assert_eq!( + watchtower.watch_channel(chan_1.2, new_monitor), + Ok(ChannelMonitorUpdateStatus::Completed) + ); watchtower }; let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()); watchtower_bob.chain_monitor.block_connected(&block, HTLC_TIMEOUT_BROADCAST - 1); // Route another payment to generate another update with still previous HTLC pending - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 3000000); + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 3000000); let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); @@ -7188,12 +8241,25 @@ pub fn test_concurrent_monitor_claim() { get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_1.2); if let Some(channel) = chan_ref.as_funded_mut() { assert_eq!(updates.commitment_signed.len(), 1); - if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed[0], &node_cfgs[0].logger) { + if let Ok(Some(update)) = + channel.commitment_signed(&updates.commitment_signed[0], &node_cfgs[0].logger) + { // Watchtower Alice should already have seen the block and reject the update - assert_eq!(watchtower_alice.chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::InProgress); - assert_eq!(watchtower_bob.chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::Completed); - assert_eq!(nodes[0].chain_monitor.update_channel(chan_1.2, &update), ChannelMonitorUpdateStatus::Completed); - } else { assert!(false); } + assert_eq!( + watchtower_alice.chain_monitor.update_channel(chan_1.2, &update), + ChannelMonitorUpdateStatus::InProgress + ); + assert_eq!( + watchtower_bob.chain_monitor.update_channel(chan_1.2, &update), + ChannelMonitorUpdateStatus::Completed + ); + assert_eq!( + nodes[0].chain_monitor.update_channel(chan_1.2, &update), + ChannelMonitorUpdateStatus::Completed + ); + } else { + assert!(false); + } } else { assert!(false); } @@ -7202,7 +8268,10 @@ pub fn test_concurrent_monitor_claim() { check_added_monitors(&nodes[0], 1); //// Provide one more block to watchtower Bob, expect broadcast of commitment and HTLC-Timeout - watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST); + watchtower_bob.chain_monitor.block_connected( + &create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), + HTLC_TIMEOUT_BROADCAST, + ); // Watchtower Bob should have broadcast a commitment/HTLC-timeout let bob_state_y; @@ -7216,9 +8285,11 @@ pub fn test_concurrent_monitor_claim() { let height = HTLC_TIMEOUT_BROADCAST + 1; connect_blocks(&nodes[0], height - nodes[0].best_block_info().1); check_closed_broadcast(&nodes[0], 1, true); - check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false, - [node_b_id], 100000); - watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height); + check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false, [node_b_id], 100000); + watchtower_alice.chain_monitor.block_connected( + &create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), + height, + ); check_added_monitors(&nodes[0], 1); { let htlc_txn = alice_broadcaster.txn_broadcast(); @@ -7261,16 +8332,19 @@ pub fn test_pre_lockin_no_chan_closed_update() { nodes[0].node.funding_transaction_generated(temp_channel_id, node_b_id, tx.clone()).unwrap(); check_added_monitors(&nodes[0], 0); - let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); - let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }); + let funding_created_msg = + get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); + let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { + txid: funding_created_msg.funding_txid, + index: funding_created_msg.funding_output_index, + }); let err_msg = msgs::ErrorMessage { channel_id, data: "Hi".to_owned() }; nodes[0].node.handle_error(node_b_id, &err_msg); assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty()); - let reason = ClosureReason::CounterpartyForceClosed { - peer_msg: UntrustedString("Hi".to_string()), - }; + let reason = + ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }; check_closed_event!(nodes[0], 2, reason, true, [node_b_id], 100000); } @@ -7305,7 +8379,11 @@ pub fn test_htlc_no_detection() { connect_block(&nodes[0], &block); // We deliberately connect the local tx twice as this should provoke a failure calling // this test before #653 fix. - chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &block, nodes[0].best_block_info().1 + 1); + chain::Listen::block_connected( + &nodes[0].chain_monitor.chain_monitor, + &block, + nodes[0].best_block_info().1 + 1, + ); check_closed_broadcast!(nodes[0], true); check_added_monitors(&nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [node_b_id], 100000); @@ -7320,12 +8398,17 @@ pub fn test_htlc_no_detection() { node_txn[0].clone() }; - connect_block(&nodes[0], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![htlc_timeout.clone()])); + connect_block( + &nodes[0], + &create_dummy_block(nodes[0].best_block_hash(), 42, vec![htlc_timeout.clone()]), + ); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[0], our_payment_hash, false); } -fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) { +fn do_test_onchain_htlc_settlement_after_close( + broadcast_alice: bool, go_onchain_before_fulfill: bool, +) { // If we route an HTLC, then learn the HTLC's preimage after the upstream channel has been // force-closed, we must claim that HTLC on-chain. (Given an HTLC forwarded from Alice --> Bob --> // Carol, Alice would be the upstream node, and Carol the downstream.) @@ -7354,7 +8437,8 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain // Steps (1) and (2): // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back. - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); // Check that Alice's commitment transaction now contains an output for this HTLC. let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2); @@ -7389,9 +8473,12 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain if go_onchain_before_fulfill { let txn_to_broadcast = match broadcast_alice { true => alice_txn.clone(), - false => get_local_commitment_txn!(nodes[1], chan_ab.2) + false => get_local_commitment_txn!(nodes[1], chan_ab.2), }; - connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()])); + connect_block( + &nodes[1], + &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]), + ); if broadcast_alice { check_closed_broadcast!(nodes[1], true); check_added_monitors(&nodes[1], 1); @@ -7471,9 +8558,14 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain // If this test requires the force-closed channel to not be on-chain until after the fulfill, // here's where we put said channel's commitment tx on-chain. let mut txn_to_broadcast = alice_txn.clone(); - if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); } + if !broadcast_alice { + txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); + } if !go_onchain_before_fulfill { - connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()])); + connect_block( + &nodes[1], + &create_dummy_block(nodes[1].best_block_hash(), 42, vec![txn_to_broadcast[0].clone()]), + ); // If Bob was the one to force-close, he will have already passed these checks earlier. if broadcast_alice { check_closed_broadcast!(nodes[1], true); @@ -7508,7 +8600,10 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain check_spends!(bob_txn[0], txn_to_broadcast[0]); assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); } else { - assert_eq!(bob_txn.len(), if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 }); + assert_eq!( + bob_txn.len(), + if nodes[1].connect_style.borrow().updates_best_block_first() { 3 } else { 2 } + ); let htlc_tx = bob_txn.pop().unwrap(); check_spends!(htlc_tx, txn_to_broadcast[0]); assert_eq!(htlc_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT + 1); @@ -7538,15 +8633,18 @@ pub fn test_duplicate_temporary_channel_id_from_different_peers() { // Create an first channel channel nodes[1].node.create_channel(node_a_id, 100000, 10001, 42, None, None).unwrap(); - let mut open_chan_msg_chan_1_0 = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, node_a_id); + let mut open_chan_msg_chan_1_0 = + get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, node_a_id); // Create an second channel nodes[2].node.create_channel(node_a_id, 100000, 10001, 43, None, None).unwrap(); - let mut open_chan_msg_chan_2_0 = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, node_a_id); + let mut open_chan_msg_chan_2_0 = + get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, node_a_id); // Modify the `OpenChannel` from `nodes[2]` to `nodes[0]` to ensure that it uses the same // `temporary_channel_id` as the `OpenChannel` from nodes[1] to nodes[0]. - open_chan_msg_chan_2_0.common_fields.temporary_channel_id = open_chan_msg_chan_1_0.common_fields.temporary_channel_id; + open_chan_msg_chan_2_0.common_fields.temporary_channel_id = + open_chan_msg_chan_1_0.common_fields.temporary_channel_id; // Assert that `nodes[0]` can accept both `OpenChannel` requests, even though they use the same // `temporary_channel_id` as they are from different peers. @@ -7557,7 +8655,10 @@ pub fn test_duplicate_temporary_channel_id_from_different_peers() { match &events[0] { MessageSendEvent::SendAcceptChannel { node_id, msg } => { assert_eq!(node_id, &node_b_id); - assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id); + assert_eq!( + msg.common_fields.temporary_channel_id, + open_chan_msg_chan_1_0.common_fields.temporary_channel_id + ); }, _ => panic!("Unexpected event"), } @@ -7570,7 +8671,10 @@ pub fn test_duplicate_temporary_channel_id_from_different_peers() { match &events[0] { MessageSendEvent::SendAcceptChannel { node_id, msg } => { assert_eq!(node_id, &node_c_id); - assert_eq!(msg.common_fields.temporary_channel_id, open_chan_msg_chan_1_0.common_fields.temporary_channel_id); + assert_eq!( + msg.common_fields.temporary_channel_id, + open_chan_msg_chan_1_0.common_fields.temporary_channel_id + ); }, _ => panic!("Unexpected event"), } @@ -7599,13 +8703,12 @@ pub fn test_peer_funding_sidechannel() { let temp_chan_id_ab = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0); let temp_chan_id_ca = exchange_open_accept_chan(&nodes[1], &nodes[0], 1_000_000, 0); - let (_, tx, funding_output) = - create_funding_transaction(&nodes[0], &node_b_id, 1_000_000, 42); + let (_, tx, funding_output) = create_funding_transaction(&nodes[0], &node_b_id, 1_000_000, 42); let cs_funding_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(cs_funding_events.len(), 1); match cs_funding_events[0] { - Event::FundingGenerationReady { .. } => {} + Event::FundingGenerationReady { .. } => {}, _ => panic!("Unexpected event {:?}", cs_funding_events), } @@ -7614,7 +8717,8 @@ pub fn test_peer_funding_sidechannel() { .node .funding_transaction_generated_unchecked(temp_chan_id_ca, node_a_id, tx.clone(), output_idx) .unwrap(); - let funding_created_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, node_a_id); + let funding_created_msg = + get_event_msg!(nodes[1], MessageSendEvent::SendFundingCreated, node_a_id); nodes[0].node.handle_funding_created(node_b_id, &funding_created_msg); get_event_msg!(nodes[0], MessageSendEvent::SendFundingSigned, node_b_id); expect_channel_pending_event(&nodes[0], &node_b_id); @@ -7663,9 +8767,11 @@ pub fn test_duplicate_conflicting_funding_from_second_peer() { nodes[0].node.funding_transaction_generated(temp_chan_id, node_b_id, tx.clone()).unwrap(); - let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); + let mut funding_created_msg = + get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); - let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); + let funding_signed_msg = + get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &node_a_id); @@ -7696,7 +8802,8 @@ pub fn test_duplicate_funding_err_in_funding() { let node_c_id = nodes[2].node.get_our_node_id(); let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]); - let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.compute_txid(), index: 0 }; + let real_chan_funding_txo = + chain::transaction::OutPoint { txid: funding_tx.compute_txid(), index: 0 }; assert_eq!(ChannelId::v1_from_funding_outpoint(real_chan_funding_txo), real_channel_id); nodes[2].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); @@ -7704,7 +8811,8 @@ pub fn test_duplicate_funding_err_in_funding() { let node_c_temp_chan_id = open_chan_msg.common_fields.temporary_channel_id; open_chan_msg.common_fields.temporary_channel_id = real_channel_id; nodes[1].node.handle_open_channel(node_c_id, &open_chan_msg); - let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_c_id); + let mut accept_chan_msg = + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_c_id); accept_chan_msg.common_fields.temporary_channel_id = node_c_temp_chan_id; nodes[2].node.handle_accept_channel(node_b_id, &accept_chan_msg); @@ -7714,7 +8822,8 @@ pub fn test_duplicate_funding_err_in_funding() { nodes[2].node.funding_transaction_generated(node_c_temp_chan_id, node_b_id, fund_tx).unwrap(); - let mut funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, node_b_id); + let mut funding_created_msg = + get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, node_b_id); funding_created_msg.temporary_channel_id = real_channel_id; // Make the signature invalid by changing the funding output funding_created_msg.funding_output_index += 10; @@ -7747,7 +8856,10 @@ pub fn test_duplicate_chan_id() { nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[1].node.handle_open_channel(node_a_id, &open_chan_msg); - nodes[0].node.handle_accept_channel(node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id)); + nodes[0].node.handle_accept_channel( + node_b_id, + &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), + ); // Try to create a second channel with the same temporary_channel_id as the first and check // that it is rejected. @@ -7756,7 +8868,10 @@ pub fn test_duplicate_chan_id() { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { + MessageSendEvent::HandleError { + action: ErrorAction::SendErrorMessage { ref msg }, + node_id, + } => { // Technically, at this point, nodes[1] would be justified in thinking both the // first (valid) and second (invalid) channels are closed, given they both have // the same non-temporary channel_id. However, currently we do not, so we just @@ -7774,9 +8889,11 @@ pub fn test_duplicate_chan_id() { nodes[0].node.funding_transaction_generated(temp_channel_id, node_b_id, tx.clone()).unwrap(); check_added_monitors(&nodes[0], 0); - let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); + let mut funding_created_msg = + get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); let channel_id = ChannelId::v1_from_funding_txid( - funding_created_msg.funding_txid.as_byte_array(), funding_created_msg.funding_output_index + funding_created_msg.funding_txid.as_byte_array(), + funding_created_msg.funding_output_index, ); nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); @@ -7788,9 +8905,13 @@ pub fn test_duplicate_chan_id() { } expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); + let funding_signed_msg = + get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); - let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }; + let funding_outpoint = crate::chain::transaction::OutPoint { + txid: funding_created_msg.funding_txid, + index: funding_created_msg.funding_output_index, + }; let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint); // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a @@ -7805,7 +8926,10 @@ pub fn test_duplicate_chan_id() { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { + MessageSendEvent::HandleError { + action: ErrorAction::SendErrorMessage { ref msg }, + node_id, + } => { // Technically, at this point, nodes[1] would be justified in thinking both // channels are closed, but currently we do not, so we just move forward with it. assert_eq!(msg.channel_id, open_chan_msg.common_fields.temporary_channel_id); @@ -7819,7 +8943,10 @@ pub fn test_duplicate_chan_id() { nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[1].node.handle_open_channel(node_a_id, &open_chan_2_msg); - nodes[0].node.handle_accept_channel(node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id)); + nodes[0].node.handle_accept_channel( + node_b_id, + &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), + ); create_funding_transaction(&nodes[0], &node_b_id, 100000, 42); // Get and check the FundingGenerationReady event let funding_created = { @@ -7834,10 +8961,13 @@ pub fn test_duplicate_chan_id() { if let Some(mut chan) = channel.as_unfunded_outbound_v1_mut() { let logger = test_utils::TestLogger::new(); - chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap() + chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger) + .map_err(|_| ()) + .unwrap() } else { panic!("Unexpected Channel phase") - }.unwrap() + } + .unwrap() }; check_added_monitors(&nodes[0], 0); nodes[1].node.handle_funding_created(node_a_id, &funding_created); @@ -7846,7 +8976,7 @@ pub fn test_duplicate_chan_id() { check_added_monitors(&nodes[1], 0); let reason = ClosureReason::ProcessingError { - err: "Already had channel with the new channel_id".to_owned() + err: "Already had channel with the new channel_id".to_owned(), }; let close_event = ExpectedCloseEvent::from_id_reason(funding_created.temporary_channel_id, false, reason); @@ -7857,7 +8987,10 @@ pub fn test_duplicate_chan_id() { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { + MessageSendEvent::HandleError { + action: ErrorAction::SendErrorMessage { ref msg }, + node_id, + } => { // Technically, at this point, nodes[1] would be justified in thinking both // channels are closed, but currently we do not, so we just move forward with it. assert_eq!(msg.channel_id, channel_id); @@ -7883,8 +9016,10 @@ pub fn test_duplicate_chan_id() { assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx); - let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); - let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready); + let (channel_ready, _) = + create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); + let (announcement, as_update, bs_update) = + create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready); update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update); send_payment(&nodes[0], &[&nodes[1]], 8000000); @@ -7914,11 +9049,17 @@ pub fn test_error_chans_closed() { assert_eq!(nodes[2].node.list_usable_channels().len(), 1); // Closing a channel from a different peer has no effect - nodes[0].node.handle_error(node_b_id, &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() }); + nodes[0].node.handle_error( + node_b_id, + &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() }, + ); assert_eq!(nodes[0].node.list_usable_channels().len(), 3); // Closing one channel doesn't impact others - nodes[0].node.handle_error(node_b_id, &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() }); + nodes[0].node.handle_error( + node_b_id, + &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() }, + ); check_added_monitors(&nodes[0], 1); check_closed_broadcast!(nodes[0], false); @@ -7928,12 +9069,21 @@ pub fn test_error_chans_closed() { assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1); assert_eq!(nodes[0].node.list_usable_channels().len(), 2); - assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2); - assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2); + assert!( + nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 + || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2 + ); + assert!( + nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 + || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2 + ); // A null channel ID should close all channels let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); - nodes[0].node.handle_error(node_b_id, &msgs::ErrorMessage { channel_id: ChannelId::new_zero(), data: "ERR".to_owned() }); + nodes[0].node.handle_error( + node_b_id, + &msgs::ErrorMessage { channel_id: ChannelId::new_zero(), data: "ERR".to_owned() }, + ); check_added_monitors(&nodes[0], 2); let reason = @@ -7985,28 +9135,45 @@ pub fn test_invalid_funding_tx() { let node_b_id = nodes[1].node.get_our_node_id(); nodes[0].node.create_channel(node_b_id, 100_000, 10_000, 42, None, None).unwrap(); - nodes[1].node.handle_open_channel(node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id)); - nodes[0].node.handle_accept_channel(node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id)); + nodes[1].node.handle_open_channel( + node_a_id, + &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id), + ); + nodes[0].node.handle_accept_channel( + node_b_id, + &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), + ); - let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); + let (temporary_channel_id, mut tx, _) = + create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); // Create a witness program which can be spent by a 4-empty-stack-elements witness and which is // 136 bytes long. This matches our "accepted HTLC preimage spend" matching, previously causing // a panic as we'd try to extract a 32 byte preimage from a witness element without checking // its length. - let mut wit_program: Vec = channelmonitor::deliberately_bogus_accepted_htlc_witness_program(); + let mut wit_program: Vec = + channelmonitor::deliberately_bogus_accepted_htlc_witness_program(); let wit_program_script: ScriptBuf = wit_program.into(); for output in tx.output.iter_mut() { // Make the confirmed funding transaction have a bogus script_pubkey output.script_pubkey = ScriptBuf::new_p2wsh(&wit_program_script.wscript_hash()); } - nodes[0].node.funding_transaction_generated_unchecked(temporary_channel_id, node_b_id, tx.clone(), 0).unwrap(); - nodes[1].node.handle_funding_created(node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id)); + nodes[0] + .node + .funding_transaction_generated_unchecked(temporary_channel_id, node_b_id, tx.clone(), 0) + .unwrap(); + nodes[1].node.handle_funding_created( + node_a_id, + &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id), + ); check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &node_a_id); - nodes[0].node.handle_funding_signed(node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id)); + nodes[0].node.handle_funding_signed( + node_b_id, + &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id), + ); check_added_monitors(&nodes[0], 1); expect_channel_pending_event(&nodes[0], &node_b_id); @@ -8029,29 +9196,38 @@ pub fn test_invalid_funding_tx() { if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] { assert_eq!(*node_id, node_a_id); if let msgs::ErrorAction::DisconnectPeer { msg } = action { - assert_eq!(msg.as_ref().unwrap().data, "Channel closed because of an exception: ".to_owned() + expected_err); - } else { panic!(); } - } else { panic!(); } + assert_eq!( + msg.as_ref().unwrap().data, + "Channel closed because of an exception: ".to_owned() + expected_err + ); + } else { + panic!(); + } + } else { + panic!(); + } assert_eq!(nodes[1].node.list_channels().len(), 0); // Now confirm a spend of the (bogus) funding transaction. As long as the witness is 5 elements // long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing // as its not 32 bytes long. let mut spend_tx = Transaction { - version: Version::TWO, lock_time: LockTime::ZERO, - input: tx.output.iter().enumerate().map(|(idx, _)| TxIn { - previous_output: BitcoinOutPoint { - txid: tx.compute_txid(), - vout: idx as u32, - }, - script_sig: ScriptBuf::new(), - sequence: Sequence::ENABLE_RBF_NO_LOCKTIME, - witness: Witness::from_slice(&channelmonitor::deliberately_bogus_accepted_htlc_witness()) - }).collect(), - output: vec![TxOut { - value: Amount::from_sat(1000), - script_pubkey: ScriptBuf::new(), - }] + version: Version::TWO, + lock_time: LockTime::ZERO, + input: tx + .output + .iter() + .enumerate() + .map(|(idx, _)| TxIn { + previous_output: BitcoinOutPoint { txid: tx.compute_txid(), vout: idx as u32 }, + script_sig: ScriptBuf::new(), + sequence: Sequence::ENABLE_RBF_NO_LOCKTIME, + witness: Witness::from_slice( + &channelmonitor::deliberately_bogus_accepted_htlc_witness(), + ), + }) + .collect(), + output: vec![TxOut { value: Amount::from_sat(1000), script_pubkey: ScriptBuf::new() }], }; check_spends!(spend_tx, tx); mine_transaction(&nodes[1], &spend_tx); @@ -8083,7 +9259,8 @@ pub fn test_coinbase_funding_tx() { nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); // Create the coinbase funding transaction. - let (channel_id, tx, _) = create_coinbase_funding_transaction(&nodes[0], &node_b_id, 100000, 42); + let (channel_id, tx, _) = + create_coinbase_funding_transaction(&nodes[0], &node_b_id, 100000, 42); nodes[0].node.funding_transaction_generated(channel_id, node_b_id, tx.clone()).unwrap(); check_added_monitors(&nodes[0], 0); @@ -8110,7 +9287,10 @@ pub fn test_coinbase_funding_tx() { // Now connect one more block which results in 100 confirmations of the coinbase transaction. connect_blocks(&nodes[0], 1); // There should now be a `channel_ready` which can be handled. - let _ = &nodes[1].node.handle_channel_ready(node_a_id, &get_event_msg!(&nodes[0], MessageSendEvent::SendChannelReady, node_b_id)); + let _ = &nodes[1].node.handle_channel_ready( + node_a_id, + &get_event_msg!(&nodes[0], MessageSendEvent::SendChannelReady, node_b_id), + ); confirm_transaction_at(&nodes[1], &tx, 1); connect_blocks(&nodes[1], COINBASE_MATURITY - 2); @@ -8169,7 +9349,10 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS); } nodes[1].chain_monitor.chain_monitor.transactions_confirmed( - &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height); + &nodes[1].get_block_header(conf_height), + &[(0, &node_txn[0])], + conf_height, + ); if test_height_before_timelock { // If we confirmed the close transaction, but timelocks have not yet expired, we should not // generate any events or broadcast any transactions @@ -8190,8 +9373,14 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // should immediately fail-backwards the HTLC to the previous hop, without waiting for an // additional block built on top of the current chain. nodes[1].chain_monitor.chain_monitor.transactions_confirmed( - &nodes[1].get_block_header(conf_height + 1), &[(0, htlc_tx)], conf_height + 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: channel_id }]); + &nodes[1].get_block_header(conf_height + 1), + &[(0, htlc_tx)], + conf_height + 1, + ); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id }] + ); check_added_monitors(&nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -8208,15 +9397,20 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // We should also generate a SpendableOutputs event with the to_self output (once the // timelock is up). - connect_blocks(&nodes[1], (BREAKDOWN_TIMEOUT as u32) - TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - 1); + connect_blocks( + &nodes[1], + (BREAKDOWN_TIMEOUT as u32) - TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - 1, + ); let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); assert_eq!(descriptor_spend_txn.len(), 1); // When the HTLC times out on the A<->B edge, the B<->C channel will fail the HTLC back to // avoid the A<->B channel closing (even though it already has). This will generate a // spurious HTLCHandlingFailed event. - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], - vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id }] + ); } } @@ -8238,10 +9432,12 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001); let payment_params = PaymentParameters::from_node_id(node_b_id, TEST_FINAL_CLTV) - .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()) + .unwrap(); let route = get_route!(nodes[0], payment_params, 10_000).unwrap(); - let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]); + let (our_payment_preimage, our_payment_hash, our_payment_secret) = + get_payment_preimage_hash!(&nodes[1]); { let onion = RecipientOnionFields::secret_only(our_payment_secret); @@ -8285,7 +9481,10 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }, HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }, ]; - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], expected_destinations); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!( + nodes[1], + expected_destinations + ); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[1], 1); @@ -8298,13 +9497,28 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { let failure_events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(failure_events.len(), 4); - if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); } - if let Event::PaymentFailed { .. } = failure_events[1] {} else { panic!(); } - if let Event::PaymentPathFailed { .. } = failure_events[2] {} else { panic!(); } - if let Event::PaymentFailed { .. } = failure_events[3] {} else { panic!(); } + if let Event::PaymentPathFailed { .. } = failure_events[0] { + } else { + panic!(); + } + if let Event::PaymentFailed { .. } = failure_events[1] { + } else { + panic!(); + } + if let Event::PaymentPathFailed { .. } = failure_events[2] { + } else { + panic!(); + } + if let Event::PaymentFailed { .. } = failure_events[3] { + } else { + panic!(); + } } else { // Let the second HTLC fail and claim the first - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!( + nodes[1], + vec![HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash }] + ); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors(&nodes[1], 1); @@ -8352,16 +9566,20 @@ pub fn test_inconsistent_mpp_params() { create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0); - let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0); + let chan_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0); let payment_params = PaymentParameters::from_node_id(node_d_id, TEST_FINAL_CLTV) - .with_bolt11_features(nodes[3].node.bolt11_invoice_features()).unwrap(); + .with_bolt11_features(nodes[3].node.bolt11_invoice_features()) + .unwrap(); let mut route = get_route!(nodes[0], payment_params, 15_000_000).unwrap(); assert_eq!(route.paths.len(), 2); route.paths.sort_by(|path_a, _| { // Sort the path so that the path through nodes[1] comes first if path_a.hops[0].pubkey == node_b_id { - core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } + core::cmp::Ordering::Less + } else { + core::cmp::Ordering::Greater + } }); let (preimage, hash, payment_secret) = get_payment_preimage_hash!(&nodes[3]); @@ -8439,7 +9657,10 @@ pub fn test_inconsistent_mpp_params() { nodes[2].node.handle_update_fail_htlc(node_d_id, &fail_updates_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[2], + vec![HTLCHandlingFailureType::Forward { node_id: Some(node_d_id), channel_id: chan_2_3.2 }] + ); check_added_monitors(&nodes[2], 1); let fail_updates_2 = get_htlc_update_msgs!(nodes[2], node_a_id); @@ -8464,9 +9685,7 @@ pub fn test_inconsistent_mpp_params() { let path_b = &[&nodes[2], &nodes[3]]; pass_along_path(&nodes[0], path_b, real_amt, hash, Some(payment_secret), event, true, None); - do_claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[path_a, path_b], preimage) - ); + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], &[path_a, path_b], preimage)); expect_payment_sent(&nodes[0], preimage, Some(None), true, true); } @@ -8488,12 +9707,16 @@ pub fn test_double_partial_claim() { create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0); - let (mut route, hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000); + let (mut route, hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000); assert_eq!(route.paths.len(), 2); route.paths.sort_by(|path_a, _| { // Sort the path so that the path through nodes[1] comes first if path_a.hops[0].pubkey == node_b_id { - core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } + core::cmp::Ordering::Less + } else { + core::cmp::Ordering::Greater + } }); let paths: &[&[_]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]]; @@ -8544,7 +9767,10 @@ enum ExposureEvent { AtUpdateFeeOutbound, } -fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, multiplier_dust_limit: bool, apply_excess_fee: bool) { +fn do_test_max_dust_htlc_exposure( + dust_outbound_balance: bool, exposure_breach_event: ExposureEvent, on_holder_tx: bool, + multiplier_dust_limit: bool, apply_excess_fee: bool, +) { // Test that we properly reject dust HTLC violating our `max_dust_htlc_exposure_msat` // policy. // @@ -8563,19 +9789,25 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e // We hard-code the feerate values here but they're re-calculated furter down and asserted. // If the values ever change below these constants should simply be updated. const AT_FEE_OUTBOUND_HTLCS: u64 = 20; - let nondust_htlc_count_in_limit = - if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound { + let nondust_htlc_count_in_limit = if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound + { AT_FEE_OUTBOUND_HTLCS - } else { 0 }; + } else { + 0 + }; let initial_feerate = if apply_excess_fee { 253 * 2 } else { 253 }; let expected_dust_buffer_feerate = initial_feerate + 2530; - let mut commitment_tx_cost_msat = commit_tx_fee_msat(initial_feerate - 253, nondust_htlc_count_in_limit, &ChannelTypeFeatures::empty()); - commitment_tx_cost_msat += - if on_holder_tx { - htlc_success_tx_weight(&ChannelTypeFeatures::empty()) - } else { - htlc_timeout_tx_weight(&ChannelTypeFeatures::empty()) - } * (initial_feerate as u64 - 253) * nondust_htlc_count_in_limit; + let mut commitment_tx_cost_msat = commit_tx_fee_msat( + initial_feerate - 253, + nondust_htlc_count_in_limit, + &ChannelTypeFeatures::empty(), + ); + commitment_tx_cost_msat += if on_holder_tx { + htlc_success_tx_weight(&ChannelTypeFeatures::empty()) + } else { + htlc_timeout_tx_weight(&ChannelTypeFeatures::empty()) + } * (initial_feerate as u64 - 253) + * nondust_htlc_count_in_limit; { let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); *feerate_lock = initial_feerate; @@ -8585,7 +9817,9 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e // to get roughly the same initial value as the default setting when this test was // originally written. MaxDustHTLCExposure::FeeRateMultiplier((5_000_000 + commitment_tx_cost_msat) / 253) - } else { MaxDustHTLCExposure::FixedLimitMsat(5_000_000 + commitment_tx_cost_msat) }; + } else { + MaxDustHTLCExposure::FixedLimitMsat(5_000_000 + commitment_tx_cost_msat) + }; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -8601,7 +9835,8 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e open_channel.common_fields.dust_limit_satoshis = 546; } nodes[1].node.handle_open_channel(node_a_id, &open_channel); - let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + let mut accept_channel = + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); @@ -8620,16 +9855,24 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e } nodes[0].node.funding_transaction_generated(chan_id, node_b_id, tx.clone()).unwrap(); - nodes[1].node.handle_funding_created(node_a_id, &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id)); + nodes[1].node.handle_funding_created( + node_a_id, + &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id), + ); check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &node_a_id); - nodes[0].node.handle_funding_signed(node_b_id, &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id)); + nodes[0].node.handle_funding_signed( + node_b_id, + &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id), + ); check_added_monitors(&nodes[0], 1); expect_channel_pending_event(&nodes[0], &node_b_id); - let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); - let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready); + let (channel_ready, channel_id) = + create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx); + let (announcement, as_update, bs_update) = + create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready); update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update); { @@ -8645,24 +9888,39 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(&node_b_id).unwrap().lock().unwrap(); let chan = chan_lock.channel_by_id.get(&channel_id).unwrap(); - (chan.context().get_dust_buffer_feerate(None) as u64, - chan.context().get_max_dust_htlc_exposure_msat(253)) + ( + chan.context().get_dust_buffer_feerate(None) as u64, + chan.context().get_max_dust_htlc_exposure_msat(253), + ) }; assert_eq!(dust_buffer_feerate, expected_dust_buffer_feerate as u64); - let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - 1) * 1000; - let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat; + let dust_outbound_htlc_on_holder_tx_msat: u64 = + (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + + open_channel.common_fields.dust_limit_satoshis + - 1) * 1000; + let dust_outbound_htlc_on_holder_tx: u64 = + max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat; // Substract 3 sats for multiplier and 2 sats for fixed limit to make sure we are 50% below the dust limit. // This is to make sure we fully use the dust limit. If we don't, we could end up with `dust_ibd_htlc_on_holder_tx` being 1 // while `max_dust_htlc_exposure_msat` is not equal to `dust_outbound_htlc_on_holder_tx_msat`. - let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000; - let dust_inbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat; + let dust_inbound_htlc_on_holder_tx_msat: u64 = + (dust_buffer_feerate * htlc_success_tx_weight(&channel_type_features) / 1000 + + open_channel.common_fields.dust_limit_satoshis + - if multiplier_dust_limit { 3 } else { 2 }) + * 1000; + let dust_inbound_htlc_on_holder_tx: u64 = + max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat; // This test was written with a fixed dust value here, which we retain, but assert that it is, // indeed, dust on both transactions. let dust_htlc_on_counterparty_tx: u64 = 4; let dust_htlc_on_counterparty_tx_msat: u64 = 1_250_000; - let calcd_dust_htlc_on_counterparty_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.common_fields.dust_limit_satoshis - if multiplier_dust_limit { 3 } else { 2 }) * 1000; + let calcd_dust_htlc_on_counterparty_tx_msat: u64 = + (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + + open_channel.common_fields.dust_limit_satoshis + - if multiplier_dust_limit { 3 } else { 2 }) + * 1000; assert!(dust_htlc_on_counterparty_tx_msat < dust_inbound_htlc_on_holder_tx_msat); assert!(dust_htlc_on_counterparty_tx_msat < calcd_dust_htlc_on_counterparty_tx_msat); @@ -8699,8 +9957,11 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e } if exposure_breach_event == ExposureEvent::AtHTLCForward { - route.paths[0].hops.last_mut().unwrap().fee_msat = - if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 }; + route.paths[0].hops.last_mut().unwrap().fee_msat = if on_holder_tx { + dust_outbound_htlc_on_holder_tx_msat + } else { + dust_htlc_on_counterparty_tx_msat + 1 + }; // With default dust exposure: 5000 sats if on_holder_tx { let onion = RecipientOnionFields::secret_only(payment_secret); @@ -8733,12 +9994,18 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e nodes[0].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[0], nodes[1], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[0]); - expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); + expect_htlc_handling_failed_destinations!( + nodes[0].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash }] + ); // With default dust exposure: 5000 sats if on_holder_tx { // Outbound dust balance: 6399 sats - let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1); - let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat; + let dust_inbound_overflow = + dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1); + let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat + * dust_outbound_htlc_on_holder_tx + + dust_inbound_htlc_on_holder_tx_msat; nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1); } else { // Outbound dust balance: 5200 sats @@ -8764,7 +10031,11 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e } nodes[0].node.timer_tick_occurred(); check_added_monitors(&nodes[0], 1); - nodes[0].logger.assert_log_contains("lightning::ln::channel", "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure", 1); + nodes[0].logger.assert_log_contains( + "lightning::ln::channel", + "Cannot afford to send new feerate at 2530 without infringing max dust htlc exposure", + 1, + ); } let _ = nodes[0].node.get_and_clear_pending_msg_events(); @@ -8772,24 +10043,98 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e added_monitors.clear(); } -fn do_test_max_dust_htlc_exposure_by_threshold_type(multiplier_dust_limit: bool, apply_excess_fee: bool) { - do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, true, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, false, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(true, ExposureEvent::AtHTLCReception, true, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(false, ExposureEvent::AtHTLCForward, false, multiplier_dust_limit, apply_excess_fee); +fn do_test_max_dust_htlc_exposure_by_threshold_type( + multiplier_dust_limit: bool, apply_excess_fee: bool, +) { + do_test_max_dust_htlc_exposure( + true, + ExposureEvent::AtHTLCForward, + true, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + false, + ExposureEvent::AtHTLCForward, + true, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + false, + ExposureEvent::AtHTLCReception, + true, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + false, + ExposureEvent::AtHTLCReception, + false, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + true, + ExposureEvent::AtHTLCForward, + false, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + true, + ExposureEvent::AtHTLCReception, + false, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + true, + ExposureEvent::AtHTLCReception, + true, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + false, + ExposureEvent::AtHTLCForward, + false, + multiplier_dust_limit, + apply_excess_fee, + ); if !multiplier_dust_limit && !apply_excess_fee { // Because non-dust HTLC transaction fees are included in the dust exposure, trying to // increase the fee to hit a higher dust exposure with a // `MaxDustHTLCExposure::FeeRateMultiplier` is no longer super practical, so we skip these // in the `multiplier_dust_limit` case. - do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(true, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, false, multiplier_dust_limit, apply_excess_fee); - do_test_max_dust_htlc_exposure(false, ExposureEvent::AtUpdateFeeOutbound, true, multiplier_dust_limit, apply_excess_fee); + do_test_max_dust_htlc_exposure( + true, + ExposureEvent::AtUpdateFeeOutbound, + true, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + true, + ExposureEvent::AtUpdateFeeOutbound, + false, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + false, + ExposureEvent::AtUpdateFeeOutbound, + false, + multiplier_dust_limit, + apply_excess_fee, + ); + do_test_max_dust_htlc_exposure( + false, + ExposureEvent::AtUpdateFeeOutbound, + true, + multiplier_dust_limit, + apply_excess_fee, + ); } } @@ -8820,8 +10165,7 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { let mut config = test_default_channel_config(); // Set the dust limit to the default value - config.channel_config.max_dust_htlc_exposure = - MaxDustHTLCExposure::FeeRateMultiplier(10_000); + config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(10_000); // Make sure the HTLC limits don't get in the way let chan_ty = ChannelTypeFeatures::only_static_remote_key(); config.channel_handshake_limits.min_max_accepted_htlcs = chan_utils::max_htlcs(&chan_ty); @@ -8829,7 +10173,11 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { config.channel_handshake_config.our_htlc_minimum_msat = 1; config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; - let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config.clone()), Some(config.clone()), Some(config)]); + let node_chanmgrs = create_node_chanmgrs( + 3, + &node_cfgs, + &[Some(config.clone()), Some(config.clone()), Some(config)], + ); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -8837,7 +10185,8 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { let node_c_id = nodes[2].node.get_our_node_id(); // Leave enough on the funder side to let it pay the mining fees for a commit tx with tons of htlcs - let chan_id_1 = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1_000_000, 750_000_000).2; + let chan_id_1 = + create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1_000_000, 750_000_000).2; // First get the channel one HTLC_VALUE HTLC away from the dust limit by sending dust HTLCs // repeatedly until we run out of space. @@ -8847,18 +10196,27 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { while nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat == 0 { route_payment(&nodes[0], &[&nodes[1]], HTLC_VALUE); } - assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat, 0, - "We don't want to run out of ability to send because of some non-dust limit"); - assert!(nodes[0].node.list_channels()[0].pending_outbound_htlcs.len() < 10, - "We should be able to fill our dust limit without too many HTLCs"); + assert_ne!( + nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat, + 0, + "We don't want to run out of ability to send because of some non-dust limit" + ); + assert!( + nodes[0].node.list_channels()[0].pending_outbound_htlcs.len() < 10, + "We should be able to fill our dust limit without too many HTLCs" + ); let dust_limit = nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat; claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); - assert_ne!(nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat, 0, - "Make sure we are able to send once we clear one HTLC"); + assert_ne!( + nodes[0].node.list_channels()[0].next_outbound_htlc_minimum_msat, + 0, + "Make sure we are able to send once we clear one HTLC" + ); // Skip the router complaint when node 0 will attempt to pay node 1 - let (route_0_1, payment_hash_0_1, _, payment_secret_0_1) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_limit * 2); + let (route_0_1, payment_hash_0_1, _, payment_secret_0_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], dust_limit * 2); assert_eq!(nodes[0].node.list_channels().len(), 1); assert_eq!(nodes[1].node.list_channels().len(), 1); @@ -8870,8 +10228,10 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { let commitment_tx_per_htlc_cost = htlc_success_tx_weight(&ChannelTypeFeatures::empty()) * EXCESS_FEERATE as u64; let max_htlcs_remaining = dust_limit * 2 / commitment_tx_per_htlc_cost; - assert!(max_htlcs_remaining < chan_utils::max_htlcs(&chan_ty).into(), - "We should be able to fill our dust limit without too many HTLCs"); + assert!( + max_htlcs_remaining < chan_utils::max_htlcs(&chan_ty).into(), + "We should be able to fill our dust limit without too many HTLCs" + ); for i in 0..max_htlcs_remaining + 1 { assert_ne!(i, max_htlcs_remaining); if nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat <= dust_limit { @@ -8889,7 +10249,8 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { assert_eq!(nodes[1].node.list_channels()[0].pending_outbound_htlcs.len(), 0); // Send an additional non-dust htlc from 1 to 0, and check the complaint - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], dust_limit * 2); + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], dust_limit * 2); let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); @@ -8900,7 +10261,10 @@ pub fn test_nondust_htlc_excess_fees_are_dust() { nodes[0].node.handle_update_add_htlc(node_b_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[0], nodes[1], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[0]); - expect_htlc_handling_failed_destinations!(nodes[0].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); + expect_htlc_handling_failed_destinations!( + nodes[0].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash }] + ); nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our total dust exposure at {} over the limit {} on counterparty commitment tx", 2535000, 2530000), 1); @@ -9002,9 +10366,12 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) // Set `expected_dust_exposure_msat` to match the calculation in `FundedChannel::can_accept_incoming_htlc` // only_static_remote_key: 500_492 + 22 * (724 + 172) / 1000 * 1000 + 22 * 663 / 1000 * 1000 = 533_492 // anchors_zero_htlc_fee: 500_492 + 22 * (1_124 + 172) / 1000 * 1000 = 528_492 - let mut expected_dust_exposure_msat = BASE_DUST_EXPOSURE_MSAT + EXCESS_FEERATE * (commitment_tx_base_weight(&features) + COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000; + let mut expected_dust_exposure_msat = BASE_DUST_EXPOSURE_MSAT + + EXCESS_FEERATE * (commitment_tx_base_weight(&features) + COMMITMENT_TX_WEIGHT_PER_HTLC) + / 1000 * 1000; if features == ChannelTypeFeatures::only_static_remote_key() { - expected_dust_exposure_msat += EXCESS_FEERATE * htlc_timeout_tx_weight(&features) / 1000 * 1000; + expected_dust_exposure_msat += + EXCESS_FEERATE * htlc_timeout_tx_weight(&features) / 1000 * 1000; assert_eq!(expected_dust_exposure_msat, 533_492); } else { assert_eq!(expected_dust_exposure_msat, 528_492); @@ -9019,10 +10386,12 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) // Set node 1's max dust htlc exposure to 1msat below `expected_dust_exposure_msat` let mut fixed_limit_config = default_config.clone(); - fixed_limit_config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FixedLimitMsat(expected_dust_exposure_msat - 1); + fixed_limit_config.channel_config.max_dust_htlc_exposure = + MaxDustHTLCExposure::FixedLimitMsat(expected_dust_exposure_msat - 1); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(default_config), Some(fixed_limit_config)]); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(default_config), Some(fixed_limit_config)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -9038,7 +10407,8 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) }; // Skip the router complaint when node 1 will attempt to pay node 0 - let (route_1_0, payment_hash_1_0, _, payment_secret_1_0) = get_route_and_payment_hash!(nodes[1], nodes[0], NON_DUST_HTLC_MSAT); + let (route_1_0, payment_hash_1_0, _, payment_secret_1_0) = + get_route_and_payment_hash!(nodes[1], nodes[0], NON_DUST_HTLC_MSAT); // Bring node 1's dust htlc exposure up to `BASE_DUST_EXPOSURE_MSAT` for _ in 0..DUST_HTLC_COUNT { @@ -9054,7 +10424,8 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) assert_eq!(nodes[1].node.list_channels()[0].pending_inbound_htlcs.len(), DUST_HTLC_COUNT); // Send an additional non-dust htlc from 0 to 1, and check the complaint - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], NON_DUST_HTLC_MSAT); + let (route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], NON_DUST_HTLC_MSAT); let onion = RecipientOnionFields::secret_only(payment_secret); let id = PaymentId(payment_hash.0); nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); @@ -9065,7 +10436,10 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash }]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCHandlingFailureType::Receive { payment_hash }] + ); nodes[1].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our total dust exposure at {} over the limit {} on counterparty commitment tx", expected_dust_exposure_msat, expected_dust_exposure_msat - 1), 1); @@ -9092,7 +10466,9 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) // Set node 1's max dust htlc exposure equal to the `expected_dust_exposure_msat` let config = ChannelConfigUpdate { - max_dust_htlc_exposure_msat: Some(MaxDustHTLCExposure::FixedLimitMsat(expected_dust_exposure_msat)), + max_dust_htlc_exposure_msat: Some(MaxDustHTLCExposure::FixedLimitMsat( + expected_dust_exposure_msat, + )), ..ChannelConfigUpdate::default() }; nodes[1].node.update_partial_channel_config(&node_a_id, &[chan_id], &config).unwrap(); @@ -9111,7 +10487,11 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) // The `expected_dust_exposure_msat` for the outbound htlc changes in the non-anchor case, as the htlc success and timeout transactions have different weights // only_static_remote_key: 500_492 + 22 * (724 + 172) / 1000 * 1000 + 22 * 703 / 1000 * 1000 = 534_492 if features == ChannelTypeFeatures::only_static_remote_key() { - expected_dust_exposure_msat = BASE_DUST_EXPOSURE_MSAT + EXCESS_FEERATE * (commitment_tx_base_weight(&features) + COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000 + EXCESS_FEERATE * htlc_success_tx_weight(&features) / 1000 * 1000; + expected_dust_exposure_msat = BASE_DUST_EXPOSURE_MSAT + + EXCESS_FEERATE + * (commitment_tx_base_weight(&features) + COMMITMENT_TX_WEIGHT_PER_HTLC) + / 1000 * 1000 + + EXCESS_FEERATE * htlc_success_tx_weight(&features) / 1000 * 1000; assert_eq!(expected_dust_exposure_msat, 534_492); } else { assert_eq!(expected_dust_exposure_msat, 528_492); @@ -9119,7 +10499,9 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) // Set node 1's max dust htlc exposure to 1msat below `expected_dust_exposure_msat` let update = ChannelConfigUpdate { - max_dust_htlc_exposure_msat: Some(MaxDustHTLCExposure::FixedLimitMsat(expected_dust_exposure_msat - 1)), + max_dust_htlc_exposure_msat: Some(MaxDustHTLCExposure::FixedLimitMsat( + expected_dust_exposure_msat - 1, + )), ..ChannelConfigUpdate::default() }; nodes[1].node.update_partial_channel_config(&node_a_id, &[chan_id], &update).unwrap(); @@ -9131,7 +10513,8 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) unwrap_send_err!(nodes[1], res, true, APIError::ChannelUnavailable { .. }, {}); let dust_limit = if features == ChannelTypeFeatures::only_static_remote_key() { - MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000 + htlc_success_tx_weight(&features) * node_1_dust_buffer_feerate / 1000 * 1000 + MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000 + + htlc_success_tx_weight(&features) * node_1_dust_buffer_feerate / 1000 * 1000 } else { MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000 }; @@ -9149,7 +10532,9 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) // Set node 1's max dust htlc exposure equal to `expected_dust_exposure_msat` let update = ChannelConfigUpdate { - max_dust_htlc_exposure_msat: Some(MaxDustHTLCExposure::FixedLimitMsat(expected_dust_exposure_msat)), + max_dust_htlc_exposure_msat: Some(MaxDustHTLCExposure::FixedLimitMsat( + expected_dust_exposure_msat, + )), ..ChannelConfigUpdate::default() }; nodes[1].node.update_partial_channel_config(&node_a_id, &[chan_id], &update).unwrap(); @@ -9169,7 +10554,9 @@ fn do_test_nondust_htlc_fees_dust_exposure_delta(features: ChannelTypeFeatures) #[test] fn test_nondust_htlc_fees_dust_exposure_delta() { do_test_nondust_htlc_fees_dust_exposure_delta(ChannelTypeFeatures::only_static_remote_key()); - do_test_nondust_htlc_fees_dust_exposure_delta(ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies()); + do_test_nondust_htlc_fees_dust_exposure_delta( + ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), + ); } #[xtest(feature = "_externalize_tests")] @@ -9182,24 +10569,38 @@ pub fn test_non_final_funding_tx() { let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - let temp_channel_id = nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); - let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + let temp_channel_id = + nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); + let open_channel_message = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); - let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + let accept_channel_message = + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); let best_height = nodes[0].node.best_block.read().unwrap().height; let chan_id = *nodes[0].network_chan_count.borrow(); let events = nodes[0].node.get_and_clear_pending_events(); - let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::ScriptBuf::new(), sequence: Sequence(1), witness: Witness::from_slice(&[&[1]]) }; + let input = TxIn { + previous_output: BitcoinOutPoint::null(), + script_sig: bitcoin::ScriptBuf::new(), + sequence: Sequence(1), + witness: Witness::from_slice(&[&[1]]), + }; assert_eq!(events.len(), 1); let mut tx = match events[0] { Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => { // Timelock the transaction _beyond_ the best client height + 1. - Transaction { version: Version(chan_id as i32), lock_time: LockTime::from_height(best_height + 2).unwrap(), input: vec![input], output: vec![TxOut { - value: Amount::from_sat(*channel_value_satoshis), script_pubkey: output_script.clone(), - }]} + Transaction { + version: Version(chan_id as i32), + lock_time: LockTime::from_height(best_height + 2).unwrap(), + input: vec![input], + output: vec![TxOut { + value: Amount::from_sat(*channel_value_satoshis), + script_pubkey: output_script.clone(), + }], + } }, _ => panic!("Unexpected event"), }; @@ -9208,7 +10609,7 @@ pub fn test_non_final_funding_tx() { Err(APIError::APIMisuseError { err }) => { assert_eq!(format!("Funding transaction absolute timelock is non-final"), err); }, - _ => panic!() + _ => panic!(), } let err = "Error in transaction funding: Misuse error: Funding transaction absolute timelock is non-final".to_owned(); let reason = ClosureReason::ProcessingError { err }; @@ -9227,24 +10628,38 @@ pub fn test_non_final_funding_tx_within_headroom() { let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - let temp_channel_id = nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); - let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + let temp_channel_id = + nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); + let open_channel_message = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); - let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + let accept_channel_message = + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); let best_height = nodes[0].node.best_block.read().unwrap().height; let chan_id = *nodes[0].network_chan_count.borrow(); let events = nodes[0].node.get_and_clear_pending_events(); - let input = TxIn { previous_output: BitcoinOutPoint::null(), script_sig: bitcoin::ScriptBuf::new(), sequence: Sequence(1), witness: Witness::from_slice(&[[1]]) }; + let input = TxIn { + previous_output: BitcoinOutPoint::null(), + script_sig: bitcoin::ScriptBuf::new(), + sequence: Sequence(1), + witness: Witness::from_slice(&[[1]]), + }; assert_eq!(events.len(), 1); let mut tx = match events[0] { Event::FundingGenerationReady { ref channel_value_satoshis, ref output_script, .. } => { // Timelock the transaction within a +1 headroom from the best block. - Transaction { version: Version(chan_id as i32), lock_time: LockTime::from_consensus(best_height + 1), input: vec![input], output: vec![TxOut { - value: Amount::from_sat(*channel_value_satoshis), script_pubkey: output_script.clone(), - }]} + Transaction { + version: Version(chan_id as i32), + lock_time: LockTime::from_consensus(best_height + 1), + input: vec![input], + output: vec![TxOut { + value: Amount::from_sat(*channel_value_satoshis), + script_pubkey: output_script.clone(), + }], + } }, _ => panic!("Unexpected event"), }; @@ -9277,11 +10692,10 @@ fn do_payment_with_custom_min_final_cltv_expiry(valid_delta: bool, use_user_hash get_payment_preimage_hash!(nodes[1], Some(recv_value), Some(min_cltv_expiry_delta)); (hash, payment_preimage, payment_secret) } else { - let (hash, payment_secret) = - nodes[1] - .node - .create_inbound_payment(Some(recv_value), 7200, Some(min_cltv_expiry_delta)) - .unwrap(); + let (hash, payment_secret) = nodes[1] + .node + .create_inbound_payment(Some(recv_value), 7200, Some(min_cltv_expiry_delta)) + .unwrap(); (hash, nodes[1].node.get_payment_preimage(hash, payment_secret).unwrap(), payment_secret) }; let route = get_route!(nodes[0], payment_parameters, recv_value).unwrap(); @@ -9336,17 +10750,18 @@ pub fn test_disconnects_peer_awaiting_response_ticks() { // Asserts a disconnect event is queued to the user. let check_disconnect_event = |node: &Node, should_disconnect: bool| { - let disconnect_event = node.node.get_and_clear_pending_msg_events().iter().find_map(|event| - if let MessageSendEvent::HandleError { action, .. } = event { - if let msgs::ErrorAction::DisconnectPeerWithWarning { .. } = action { - Some(()) + let disconnect_event = + node.node.get_and_clear_pending_msg_events().iter().find_map(|event| { + if let MessageSendEvent::HandleError { action, .. } = event { + if let msgs::ErrorAction::DisconnectPeerWithWarning { .. } = action { + Some(()) + } else { + None + } } else { None } - } else { - None - } - ); + }); assert_eq!(disconnect_event.is_some(), should_disconnect); }; @@ -9379,7 +10794,9 @@ pub fn test_disconnects_peer_awaiting_response_ticks() { check_added_monitors(&&nodes[0], 1); let alice_fee_update = get_htlc_update_msgs(&nodes[0], &node_b_id); nodes[1].node.handle_update_fee(node_a_id, alice_fee_update.update_fee.as_ref().unwrap()); - nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &alice_fee_update.commitment_signed); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &alice_fee_update.commitment_signed); check_added_monitors(&&nodes[1], 1); // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`. @@ -9392,7 +10809,8 @@ pub fn test_disconnects_peer_awaiting_response_ticks() { // Alice then needs to send her final `RevokeAndACK` to complete the commitment dance. We // pretend Bob hasn't received the message and check whether he'll disconnect Alice after // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`. - let alice_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + let alice_revoke_and_ack = + get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); check_disconnect(&nodes[1]); // Now, we'll reconnect them to test awaiting a `ChannelReestablish` message. @@ -9402,32 +10820,40 @@ pub fn test_disconnects_peer_awaiting_response_ticks() { nodes[0].node.peer_disconnected(node_b_id); nodes[1].node.peer_disconnected(node_a_id); let bob_init = msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, }; nodes[0].node.peer_connected(node_b_id, &bob_init, true).unwrap(); let alice_init = msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None + features: nodes[0].node.init_features(), + networks: None, + remote_network_address: None, }; nodes[1].node.peer_connected(node_a_id, &alice_init, true).unwrap(); // Upon reconnection, Alice sends her `ChannelReestablish` to Bob. Alice, however, hasn't // received Bob's yet, so she should disconnect him after reaching // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`. - let alice_channel_reestablish = get_event_msg!( - nodes[0], MessageSendEvent::SendChannelReestablish, node_b_id - ); + let alice_channel_reestablish = + get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, node_b_id); nodes[1].node.handle_channel_reestablish(node_a_id, &alice_channel_reestablish); check_disconnect(&nodes[0]); // Bob now sends his `ChannelReestablish` to Alice to resume the channel and consider it "live". - let bob_channel_reestablish = nodes[1].node.get_and_clear_pending_msg_events().iter().find_map(|event| - if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event { - assert_eq!(*node_id, node_a_id); - Some(msg.clone()) - } else { - None - } - ).unwrap(); + let bob_channel_reestablish = nodes[1] + .node + .get_and_clear_pending_msg_events() + .iter() + .find_map(|event| { + if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event { + assert_eq!(*node_id, node_a_id); + Some(msg.clone()) + } else { + None + } + }) + .unwrap(); nodes[0].node.handle_channel_reestablish(node_b_id, &bob_channel_reestablish); // Sanity check that Alice won't disconnect Bob since she's no longer waiting for any messages. @@ -9464,10 +10890,13 @@ pub fn test_remove_expired_outbound_unfunded_channels() { let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - let temp_channel_id = nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); - let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + let temp_channel_id = + nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); + let open_channel_message = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); - let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + let accept_channel_message = + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); let events = nodes[0].node.get_and_clear_pending_events(); @@ -9500,8 +10929,14 @@ pub fn test_remove_expired_outbound_unfunded_channels() { let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); match msg_events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { - assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake"); + MessageSendEvent::HandleError { + action: ErrorAction::SendErrorMessage { ref msg }, + node_id: _, + } => { + assert_eq!( + msg.data, + "Force-closing pending channel due to timeout awaiting establishment handshake" + ); }, _ => panic!("Unexpected event"), } @@ -9519,10 +10954,13 @@ pub fn test_remove_expired_inbound_unfunded_channels() { let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - let temp_channel_id = nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); - let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); + let temp_channel_id = + nodes[0].node.create_channel(node_b_id, 100_000, 0, 42, None, None).unwrap(); + let open_channel_message = + get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); nodes[1].node.handle_open_channel(node_a_id, &open_channel_message); - let accept_channel_message = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); + let accept_channel_message = + get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel_message); let events = nodes[0].node.get_and_clear_pending_events(); @@ -9555,8 +10993,14 @@ pub fn test_remove_expired_inbound_unfunded_channels() { let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); match msg_events[0] { - MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { - assert_eq!(msg.data, "Force-closing pending channel due to timeout awaiting establishment handshake"); + MessageSendEvent::HandleError { + action: ErrorAction::SendErrorMessage { ref msg }, + node_id: _, + } => { + assert_eq!( + msg.data, + "Force-closing pending channel due to timeout awaiting establishment handshake" + ); }, _ => panic!("Unexpected event"), } @@ -9578,7 +11022,8 @@ pub fn test_channel_close_when_not_timely_accepted() { // Simulate peer-disconnects mid-handshake // The channel is initiated from the node 0 side, // but the nodes disconnect before node 1 could send accept channel - let create_chan_id = nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); + let create_chan_id = + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id); @@ -9625,7 +11070,8 @@ pub fn test_rebroadcast_open_channel_when_reconnect_mid_handshake() { // Simulate peer-disconnects mid-handshake // The channel is initiated from the node 0 side, // but the nodes disconnect before node 1 could send accept channel - let create_chan_id = nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); + let create_chan_id = + nodes[0].node.create_channel(node_b_id, 100000, 10001, 42, None, None).unwrap(); let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id); assert_eq!(open_channel_msg.common_fields.temporary_channel_id, create_chan_id); @@ -9680,8 +11126,10 @@ fn do_test_multi_post_event_actions(do_reload: bool) { send_payment(&nodes[0], &[&nodes[1]], 1_000_000); send_payment(&nodes[0], &[&nodes[2]], 1_000_000); - let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000); + let (our_payment_preimage, our_payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_2, payment_hash_2, ..) = + route_payment(&nodes[0], &[&nodes[2]], 1_000_000); nodes[1].node.claim_funds(our_payment_preimage); check_added_monitors(&nodes[1], 1); @@ -9732,12 +11180,22 @@ fn do_test_multi_post_event_actions(do_reload: bool) { assert_eq!(events.len(), 4); if let Event::PaymentSent { payment_preimage, .. } = events[0] { assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2); - } else { panic!(); } + } else { + panic!(); + } if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2); - } else { panic!(); } - if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); } - if let Event::PaymentPathSuccessful { .. } = events[3] {} else { panic!(); } + } else { + panic!(); + } + if let Event::PaymentPathSuccessful { .. } = events[2] { + } else { + panic!(); + } + if let Event::PaymentPathSuccessful { .. } = events[3] { + } else { + panic!(); + } // After the events are processed, the ChannelMonitorUpdates will be released and, upon their // completion, we'll respond to nodes[1] with an RAA + CS. @@ -9763,17 +11221,18 @@ pub fn test_batch_channel_open() { let node_c_id = nodes[2].node.get_our_node_id(); // Initiate channel opening and create the batch channel funding transaction. - let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[ - (&nodes[1], 100_000, 0, 42, None), - (&nodes[2], 200_000, 0, 43, None), - ]); + let (tx, funding_created_msgs) = create_batch_channel_funding( + &nodes[0], + &[(&nodes[1], 100_000, 0, 42, None), (&nodes[2], 200_000, 0, 43, None)], + ); // Go through the funding_created and funding_signed flow with node 1. nodes[1].node.handle_funding_created(node_a_id, &funding_created_msgs[0]); check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); + let funding_signed_msg = + get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); check_added_monitors(&nodes[0], 1); @@ -9785,7 +11244,8 @@ pub fn test_batch_channel_open() { check_added_monitors(&nodes[2], 1); expect_channel_pending_event(&nodes[2], &node_a_id); - let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, node_a_id); + let funding_signed_msg = + get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, node_a_id); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_funding_signed(node_c_id, &funding_signed_msg); check_added_monitors(&nodes[0], 1); @@ -9796,9 +11256,9 @@ pub fn test_batch_channel_open() { assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); // Complete the persistence of the monitor. - nodes[0].chain_monitor.complete_sole_pending_chan_update( - &ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.compute_txid(), index: 1 }) - ); + nodes[0].chain_monitor.complete_sole_pending_chan_update(&ChannelId::v1_from_funding_outpoint( + OutPoint { txid: tx.compute_txid(), index: 1 }, + )); let events = nodes[0].node.get_and_clear_pending_events(); // The transaction should only have been broadcast now. @@ -9836,17 +11296,18 @@ pub fn test_close_in_funding_batch() { let node_b_id = nodes[1].node.get_our_node_id(); // Initiate channel opening and create the batch channel funding transaction. - let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[ - (&nodes[1], 100_000, 0, 42, None), - (&nodes[2], 200_000, 0, 43, None), - ]); + let (tx, funding_created_msgs) = create_batch_channel_funding( + &nodes[0], + &[(&nodes[1], 100_000, 0, 42, None), (&nodes[2], 200_000, 0, 43, None)], + ); // Go through the funding_created and funding_signed flow with node 1. nodes[1].node.handle_funding_created(node_a_id, &funding_created_msgs[0]); check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); + let funding_signed_msg = + get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); check_added_monitors(&nodes[0], 1); @@ -9868,7 +11329,10 @@ pub fn test_close_in_funding_batch() { let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap(); assert_eq!(monitor_updates_1.len(), 1); assert_eq!(monitor_updates_1[0].updates.len(), 1); - assert!(matches!(monitor_updates_1[0].updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. })); + assert!(matches!( + monitor_updates_1[0].updates[0], + ChannelMonitorUpdateStep::ChannelForceClosed { .. } + )); } let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -9887,22 +11351,25 @@ pub fn test_close_in_funding_batch() { } // All channels in the batch should close immediately. - check_closed_events(&nodes[0], &[ - ExpectedCloseEvent { - channel_id: Some(channel_id_1), - discard_funding: true, - channel_funding_txo: Some(funding_txo_1), - user_channel_id: Some(42), - ..Default::default() - }, - ExpectedCloseEvent { - channel_id: Some(channel_id_2), - discard_funding: true, - channel_funding_txo: Some(funding_txo_2), - user_channel_id: Some(43), - ..Default::default() - }, - ]); + check_closed_events( + &nodes[0], + &[ + ExpectedCloseEvent { + channel_id: Some(channel_id_1), + discard_funding: true, + channel_funding_txo: Some(funding_txo_1), + user_channel_id: Some(42), + ..Default::default() + }, + ExpectedCloseEvent { + channel_id: Some(channel_id_2), + discard_funding: true, + channel_funding_txo: Some(funding_txo_2), + user_channel_id: Some(43), + ..Default::default() + }, + ], + ); // Ensure the channels don't exist anymore. assert!(nodes[0].node.list_channels().is_empty()); @@ -9920,17 +11387,18 @@ pub fn test_batch_funding_close_after_funding_signed() { let node_c_id = nodes[2].node.get_our_node_id(); // Initiate channel opening and create the batch channel funding transaction. - let (tx, funding_created_msgs) = create_batch_channel_funding(&nodes[0], &[ - (&nodes[1], 100_000, 0, 42, None), - (&nodes[2], 200_000, 0, 43, None), - ]); + let (tx, funding_created_msgs) = create_batch_channel_funding( + &nodes[0], + &[(&nodes[1], 100_000, 0, 42, None), (&nodes[2], 200_000, 0, 43, None)], + ); // Go through the funding_created and funding_signed flow with node 1. nodes[1].node.handle_funding_created(node_a_id, &funding_created_msgs[0]); check_added_monitors(&nodes[1], 1); expect_channel_pending_event(&nodes[1], &node_a_id); - let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); + let funding_signed_msg = + get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); check_added_monitors(&nodes[0], 1); @@ -9939,7 +11407,8 @@ pub fn test_batch_funding_close_after_funding_signed() { check_added_monitors(&nodes[2], 1); expect_channel_pending_event(&nodes[2], &node_a_id); - let funding_signed_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, node_a_id); + let funding_signed_msg = + get_event_msg!(nodes[2], MessageSendEvent::SendFundingSigned, node_a_id); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_funding_signed(node_c_id, &funding_signed_msg); check_added_monitors(&nodes[0], 1); @@ -9960,11 +11429,17 @@ pub fn test_batch_funding_close_after_funding_signed() { let monitor_updates_1 = monitor_updates.get(&channel_id_1).unwrap(); assert_eq!(monitor_updates_1.len(), 1); assert_eq!(monitor_updates_1[0].updates.len(), 1); - assert!(matches!(monitor_updates_1[0].updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. })); + assert!(matches!( + monitor_updates_1[0].updates[0], + ChannelMonitorUpdateStep::ChannelForceClosed { .. } + )); let monitor_updates_2 = monitor_updates.get(&channel_id_2).unwrap(); assert_eq!(monitor_updates_2.len(), 1); assert_eq!(monitor_updates_2[0].updates.len(), 1); - assert!(matches!(monitor_updates_2[0].updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. })); + assert!(matches!( + monitor_updates_2[0].updates[0], + ChannelMonitorUpdateStep::ChannelForceClosed { .. } + )); } let msg_events = nodes[0].node.get_and_clear_pending_msg_events(); match msg_events[0] { @@ -9982,22 +11457,25 @@ pub fn test_batch_funding_close_after_funding_signed() { } // All channels in the batch should close immediately. - check_closed_events(&nodes[0], &[ - ExpectedCloseEvent { - channel_id: Some(channel_id_1), - discard_funding: true, - channel_funding_txo: Some(funding_txo_1), - user_channel_id: Some(42), - ..Default::default() - }, - ExpectedCloseEvent { - channel_id: Some(channel_id_2), - discard_funding: true, - channel_funding_txo: Some(funding_txo_2), - user_channel_id: Some(43), - ..Default::default() - }, - ]); + check_closed_events( + &nodes[0], + &[ + ExpectedCloseEvent { + channel_id: Some(channel_id_1), + discard_funding: true, + channel_funding_txo: Some(funding_txo_1), + user_channel_id: Some(42), + ..Default::default() + }, + ExpectedCloseEvent { + channel_id: Some(channel_id_2), + discard_funding: true, + channel_funding_txo: Some(funding_txo_2), + user_channel_id: Some(43), + ..Default::default() + }, + ], + ); // Ensure the channels don't exist anymore. assert!(nodes[0].node.list_channels().is_empty()); @@ -10010,20 +11488,24 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let mut min_depth_1_block_cfg = test_default_channel_config(); min_depth_1_block_cfg.channel_handshake_config.minimum_depth = 1; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(min_depth_1_block_cfg.clone()), Some(min_depth_1_block_cfg)]); + let node_chanmgrs = create_node_chanmgrs( + 2, + &node_cfgs, + &[Some(min_depth_1_block_cfg.clone()), Some(min_depth_1_block_cfg)], + ); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0); - let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { txid: funding_tx.compute_txid(), index: 0 }); + let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { + txid: funding_tx.compute_txid(), + index: 0, + }); assert_eq!(nodes[0].node.list_channels().len(), 1); assert_eq!(nodes[1].node.list_channels().len(), 1); - let (closing_node, other_node) = if confirm_remote_commitment { - (&nodes[1], &nodes[0]) - } else { - (&nodes[0], &nodes[1]) - }; + let (closing_node, other_node) = + if confirm_remote_commitment { (&nodes[1], &nodes[0]) } else { (&nodes[0], &nodes[1]) }; let closing_node_id = closing_node.node.get_our_node_id(); let other_node_id = other_node.node.get_our_node_id(); @@ -10032,7 +11514,10 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen let mut msg_events = closing_node.node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); match msg_events.pop().unwrap() { - MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage { .. }, .. } => {}, + MessageSendEvent::HandleError { + action: msgs::ErrorAction::SendErrorMessage { .. }, + .. + } => {}, _ => panic!("Unexpected event"), } check_added_monitors(closing_node, 1); @@ -10090,15 +11575,18 @@ pub fn test_accept_inbound_channel_errors_queued() { let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { - match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &node_a_id, 23, None) { + match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &node_a_id, 23, None) + { Err(APIError::ChannelUnavailable { err: _ }) => (), _ => panic!(), } - } + }, _ => panic!("Unexpected event"), } - assert_eq!(get_err_msg(&nodes[1], &node_a_id).channel_id, - open_channel_msg.common_fields.temporary_channel_id); + assert_eq!( + get_err_msg(&nodes[1], &node_a_id).channel_id, + open_channel_msg.common_fields.temporary_channel_id + ); } #[xtest(feature = "_externalize_tests")] @@ -10120,7 +11608,8 @@ pub fn test_manual_funding_abandon() { let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); - let (temp_channel_id, _tx, funding_outpoint) = create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); + let (temp_channel_id, _tx, funding_outpoint) = + create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); nodes[0] .node .unsafe_manual_funding_transaction_generated(temp_channel_id, node_b_id, funding_outpoint) @@ -10144,7 +11633,7 @@ pub fn test_manual_funding_abandon() { assert_eq!(*channel_id, err.channel_id); assert_eq!(*outpoint, funding_outpoint); true - } + }, _ => false, })); } @@ -10168,7 +11657,8 @@ pub fn test_funding_signed_event() { let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id); nodes[0].node.handle_accept_channel(node_b_id, &accept_channel); - let (temp_channel_id, tx, funding_outpoint) = create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); + let (temp_channel_id, tx, funding_outpoint) = + create_funding_transaction(&nodes[0], &node_b_id, 100_000, 42); nodes[0] .node .unsafe_manual_funding_transaction_generated(temp_channel_id, node_b_id, funding_outpoint)