Skip to content

Commit c7e4887

Browse files
committed
Allow quiescence-init while disconnected from peers
There are a number of things in LDK where we've been lazy and not allowed the user to initiate an action while a peer is disconnected. While it may be accurate in the sense that the action cannot be started while the peer is disconnected, it is terrible dev UX - these actions can fail without the developer being at fault and the only way for them to address it is just try again. Here we fix this dev UX shortcoming for splicing, keeping any queued post-quiescent actions around when a peer disconnects and retrying the action (and quiescence generally) when the peer reconnects.
1 parent e15c2f5 commit c7e4887

File tree

2 files changed

+134
-7
lines changed

2 files changed

+134
-7
lines changed

lightning/src/ln/channel.rs

Lines changed: 25 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -8206,7 +8206,10 @@ where
82068206

82078207
// Reset any quiescence-related state as it is implicitly terminated once disconnected.
82088208
if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
8209-
self.context.channel_state.clear_awaiting_quiescence();
8209+
if self.quiescent_action.is_some() {
8210+
// If we were trying to get quiescent, try again after reconnection.
8211+
self.context.channel_state.set_awaiting_quiescence();
8212+
}
82108213
self.context.channel_state.clear_local_stfu_sent();
82118214
self.context.channel_state.clear_remote_stfu_sent();
82128215
self.context.channel_state.clear_quiescent();
@@ -11552,9 +11555,9 @@ where
1155211555
{
1155311556
log_debug!(logger, "Attempting to initiate quiescence");
1155411557

11555-
if !self.context.is_live() {
11558+
if !self.context.is_usable() {
1155611559
return Err(ChannelError::Ignore(
11557-
"Channel is not in a live state to propose quiescence".to_owned()
11560+
"Channel is not in a usable state to propose quiescence".to_owned()
1155811561
));
1155911562
}
1156011563
if self.quiescent_action.is_some() {
@@ -11570,7 +11573,11 @@ where
1157011573
}
1157111574

1157211575
self.context.channel_state.set_awaiting_quiescence();
11573-
Ok(Some(self.send_stfu(logger)?))
11576+
if self.context.is_live() {
11577+
Ok(Some(self.send_stfu(logger)?))
11578+
} else {
11579+
Ok(None)
11580+
}
1157411581
}
1157511582

1157611583
// Assumes we are either awaiting quiescence or our counterparty has requested quiescence.
@@ -11580,7 +11587,6 @@ where
1158011587
L::Target: Logger,
1158111588
{
1158211589
debug_assert!(!self.context.channel_state.is_local_stfu_sent());
11583-
// Either state being set implies the channel is live.
1158411590
debug_assert!(
1158511591
self.context.channel_state.is_awaiting_quiescence()
1158611592
|| self.context.channel_state.is_remote_stfu_sent()
@@ -11712,6 +11718,10 @@ where
1171211718
&& self.context.channel_state.is_remote_stfu_sent())
1171311719
);
1171411720

11721+
if !self.context.is_live() {
11722+
return Ok(None);
11723+
}
11724+
1171511725
// We need to send our `stfu`, either because we're trying to initiate quiescence, or the
1171611726
// counterparty is and we've yet to send ours.
1171711727
if self.context.channel_state.is_awaiting_quiescence()
@@ -12851,7 +12861,11 @@ where
1285112861
match channel_state {
1285212862
ChannelState::AwaitingChannelReady(_) => {},
1285312863
ChannelState::ChannelReady(_) => {
12854-
channel_state.clear_awaiting_quiescence();
12864+
if self.quiescent_action.is_some() {
12865+
// If we're trying to get quiescent to do something, try again when we
12866+
// reconnect to the peer.
12867+
channel_state.set_awaiting_quiescence();
12868+
}
1285512869
channel_state.clear_local_stfu_sent();
1285612870
channel_state.clear_remote_stfu_sent();
1285712871
channel_state.clear_quiescent();
@@ -13259,6 +13273,7 @@ where
1325913273
(60, self.context.historical_scids, optional_vec), // Added in 0.2
1326013274
(61, fulfill_attribution_data, optional_vec), // Added in 0.2
1326113275
(63, holder_commitment_point_current, option), // Added in 0.2
13276+
(65, self.quiescent_action, option), // Added in 0.2
1326213277
});
1326313278

1326413279
Ok(())
@@ -13620,6 +13635,8 @@ where
1362013635

1362113636
let mut minimum_depth_override: Option<u32> = None;
1362213637

13638+
let mut quiescent_action = None;
13639+
1362313640
read_tlv_fields!(reader, {
1362413641
(0, announcement_sigs, option),
1362513642
(1, minimum_depth, option),
@@ -13663,6 +13680,7 @@ where
1366313680
(60, historical_scids, optional_vec), // Added in 0.2
1366413681
(61, fulfill_attribution_data, optional_vec), // Added in 0.2
1366513682
(63, holder_commitment_point_current_opt, option), // Added in 0.2
13683+
(65, quiescent_action, upgradable_option), // Added in 0.2
1366613684
});
1366713685

1366813686
let holder_signer = signer_provider.derive_channel_signer(channel_keys_id);
@@ -14009,7 +14027,7 @@ where
1400914027
holder_commitment_point,
1401014028
#[cfg(splicing)]
1401114029
pending_splice: None,
14012-
quiescent_action: None,
14030+
quiescent_action,
1401314031
})
1401414032
}
1401514033
}

lightning/src/ln/quiescence_tests.rs

Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -548,3 +548,112 @@ fn test_quiescence_timeout_while_waiting_for_counterparty_stfu() {
548548
};
549549
assert!(nodes[1].node.get_and_clear_pending_msg_events().iter().find_map(f).is_some());
550550
}
551+
552+
fn do_test_quiescence_during_disconnection(with_pending_claim: bool, propose_disconnected: bool) {
553+
// Test that we'll start trying for quiescence immediately after reconnection if we're waiting
554+
// to do some quiescence-required action.
555+
let chanmon_cfgs = create_chanmon_cfgs(2);
556+
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
557+
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
558+
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
559+
let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
560+
561+
let node_a_id = nodes[0].node.get_our_node_id();
562+
let node_b_id = nodes[1].node.get_our_node_id();
563+
564+
// First get both nodes off the starting state so we don't have to deal with channel_ready
565+
// retransmissions on reconect.
566+
send_payment(&nodes[0], &[&nodes[1]], 100_000);
567+
568+
let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
569+
if with_pending_claim {
570+
// Optionally reconnect with pending quiescence while there's some pending messages to
571+
// deliver.
572+
nodes[1].node.claim_funds(preimage);
573+
check_added_monitors(&nodes[1], 1);
574+
expect_payment_claimed!(nodes[1], payment_hash, 100_000);
575+
let _ = get_htlc_update_msgs(&nodes[1], &node_a_id);
576+
}
577+
578+
if !propose_disconnected {
579+
nodes[1].node.maybe_propose_quiescence(&node_a_id, &chan_id).unwrap();
580+
}
581+
582+
nodes[0].node.peer_disconnected(node_b_id);
583+
nodes[1].node.peer_disconnected(node_a_id);
584+
585+
if propose_disconnected {
586+
nodes[1].node.maybe_propose_quiescence(&node_a_id, &chan_id).unwrap();
587+
}
588+
589+
let init_msg = msgs::Init {
590+
features: nodes[1].node.init_features(),
591+
networks: None,
592+
remote_network_address: None,
593+
};
594+
nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap();
595+
nodes[1].node.peer_connected(node_a_id, &init_msg, true).unwrap();
596+
597+
let reestab_a = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, node_b_id);
598+
let reestab_b = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, node_a_id);
599+
600+
nodes[0].node.handle_channel_reestablish(node_b_id, &reestab_b);
601+
get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id);
602+
603+
nodes[1].node.handle_channel_reestablish(node_a_id, &reestab_a);
604+
let mut bs_msgs = nodes[1].node.get_and_clear_pending_msg_events();
605+
bs_msgs.retain(|msg| !matches!(msg, MessageSendEvent::SendChannelUpdate { .. }));
606+
assert_eq!(bs_msgs.len(), 1, "{bs_msgs:?}");
607+
let stfu = if with_pending_claim {
608+
// Node B should first re-send its channel update, then try to enter quiescence once that
609+
// completes...
610+
let msg = bs_msgs.pop().unwrap();
611+
if let MessageSendEvent::UpdateHTLCs { mut updates, .. } = msg {
612+
let fulfill = updates.update_fulfill_htlcs.pop().unwrap();
613+
nodes[0].node.handle_update_fulfill_htlc(node_b_id, fulfill);
614+
let cs = updates.commitment_signed;
615+
nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &cs);
616+
check_added_monitors(&nodes[0], 1);
617+
618+
let (raa, cs) = get_revoke_commit_msgs(&nodes[0], &node_b_id);
619+
nodes[1].node.handle_revoke_and_ack(node_a_id, &raa);
620+
check_added_monitors(&nodes[1], 1);
621+
nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &cs);
622+
check_added_monitors(&nodes[1], 1);
623+
624+
let mut bs_raa_stfu = nodes[1].node.get_and_clear_pending_msg_events();
625+
assert_eq!(bs_raa_stfu.len(), 2);
626+
if let MessageSendEvent::SendRevokeAndACK { msg, .. } = &bs_raa_stfu[0] {
627+
nodes[0].node.handle_revoke_and_ack(node_b_id, &msg);
628+
expect_payment_sent!(&nodes[0], preimage);
629+
} else {
630+
panic!("Unexpected first message {bs_raa_stfu:?}");
631+
}
632+
633+
bs_raa_stfu.pop().unwrap()
634+
} else {
635+
panic!("Unexpected message {msg:?}");
636+
}
637+
} else {
638+
bs_msgs.pop().unwrap()
639+
};
640+
if let MessageSendEvent::SendStfu { msg, .. } = stfu {
641+
nodes[0].node.handle_stfu(node_b_id, &msg);
642+
} else {
643+
panic!("Unexpected message {stfu:?}");
644+
}
645+
646+
let stfu_resp = get_event_msg!(nodes[0], MessageSendEvent::SendStfu, node_b_id);
647+
nodes[1].node.handle_stfu(node_a_id, &stfu_resp);
648+
649+
assert!(nodes[0].node.exit_quiescence(&node_b_id, &chan_id).unwrap());
650+
assert!(nodes[1].node.exit_quiescence(&node_a_id, &chan_id).unwrap());
651+
}
652+
653+
#[test]
654+
fn test_quiescence_during_disconnection() {
655+
do_test_quiescence_during_disconnection(false, false);
656+
do_test_quiescence_during_disconnection(true, false);
657+
do_test_quiescence_during_disconnection(false, true);
658+
do_test_quiescence_during_disconnection(true, true);
659+
}

0 commit comments

Comments
 (0)