@@ -4218,13 +4218,17 @@ fn test_glacial_peer_cant_hang() {
4218
4218
do_test_glacial_peer_cant_hang ( true ) ;
4219
4219
}
4220
4220
4221
- #[ test]
4222
- fn test_partial_claim_mon_update_compl_actions ( ) {
4221
+ fn do_test_partial_claim_mon_update_compl_actions ( reload_a : bool , reload_b : bool ) {
4223
4222
// Test that if we have an MPP claim that we ensure the preimage for the claim is retained in
4224
4223
// all the `ChannelMonitor`s until the preimage reaches every `ChannelMonitor` for a channel
4225
4224
// which was a part of the MPP.
4226
4225
let chanmon_cfgs = create_chanmon_cfgs ( 4 ) ;
4227
4226
let node_cfgs = create_node_cfgs ( 4 , & chanmon_cfgs) ;
4227
+
4228
+ let ( persister, persister_2, persister_3) ;
4229
+ let ( new_chain_mon, new_chain_mon_2, new_chain_mon_3) ;
4230
+ let ( nodes_3_reload, nodes_3_reload_2, nodes_3_reload_3) ;
4231
+
4228
4232
let node_chanmgrs = create_node_chanmgrs ( 4 , & node_cfgs, & [ None , None , None , None ] ) ;
4229
4233
let mut nodes = create_network ( 4 , & node_cfgs, & node_chanmgrs) ;
4230
4234
@@ -4253,6 +4257,8 @@ fn test_partial_claim_mon_update_compl_actions() {
4253
4257
let paths = & [ & [ & nodes[ 1 ] , & nodes[ 3 ] ] [ ..] , & [ & nodes[ 2 ] , & nodes[ 3 ] ] [ ..] ] ;
4254
4258
send_along_route_with_secret ( & nodes[ 0 ] , route, paths, 200_000 , payment_hash, payment_secret) ;
4255
4259
4260
+ // Store the monitor for channel 4 without the preimage to use on reload
4261
+ let chan_4_monitor_serialized = get_monitor ! ( nodes[ 3 ] , chan_4_id) . encode ( ) ;
4256
4262
// Claim along both paths, but only complete one of the two monitor updates.
4257
4263
chanmon_cfgs[ 3 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus :: InProgress ) ;
4258
4264
chanmon_cfgs[ 3 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus :: InProgress ) ;
@@ -4264,7 +4270,13 @@ fn test_partial_claim_mon_update_compl_actions() {
4264
4270
// Complete the 1<->3 monitor update and play the commitment_signed dance forward until it
4265
4271
// blocks.
4266
4272
nodes[ 3 ] . chain_monitor . complete_sole_pending_chan_update ( & chan_3_id) ;
4267
- expect_payment_claimed ! ( & nodes[ 3 ] , payment_hash, 200_000 ) ;
4273
+ let payment_claimed = nodes[ 3 ] . node . get_and_clear_pending_events ( ) ;
4274
+ assert_eq ! ( payment_claimed. len( ) , 1 , "{payment_claimed:?}" ) ;
4275
+ if let Event :: PaymentClaimed { payment_hash : ev_hash, .. } = & payment_claimed[ 0 ] {
4276
+ assert_eq ! ( * ev_hash, payment_hash) ;
4277
+ } else {
4278
+ panic ! ( "{payment_claimed:?}" ) ;
4279
+ }
4268
4280
let mut updates = get_htlc_update_msgs ( & nodes[ 3 ] , & node_b_id) ;
4269
4281
4270
4282
nodes[ 1 ] . node . handle_update_fulfill_htlc ( node_d_id, updates. update_fulfill_htlcs . remove ( 0 ) ) ;
@@ -4283,15 +4295,41 @@ fn test_partial_claim_mon_update_compl_actions() {
4283
4295
check_added_monitors ( & nodes[ 3 ] , 0 ) ;
4284
4296
assert ! ( nodes[ 3 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
4285
4297
4298
+ if reload_a {
4299
+ // After a reload (with the monitor not yet fully updated), the RAA should still be blocked
4300
+ // waiting until the monitor update completes.
4301
+ let node_ser = nodes[ 3 ] . node . encode ( ) ;
4302
+ let chan_3_monitor_serialized = get_monitor ! ( nodes[ 3 ] , chan_3_id) . encode ( ) ;
4303
+ let mons = & [ & chan_3_monitor_serialized[ ..] , & chan_4_monitor_serialized[ ..] ] ;
4304
+ reload_node ! ( nodes[ 3 ] , & node_ser, mons, persister, new_chain_mon, nodes_3_reload) ;
4305
+ // The final update to channel 4 should be replayed.
4306
+ persister. set_update_ret ( ChannelMonitorUpdateStatus :: InProgress ) ;
4307
+ assert ! ( nodes[ 3 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
4308
+ check_added_monitors ( & nodes[ 3 ] , 1 ) ;
4309
+
4310
+ // Because the HTLCs aren't yet cleared, the PaymentClaimed event will be replayed on
4311
+ // restart.
4312
+ let second_payment_claimed = nodes[ 3 ] . node . get_and_clear_pending_events ( ) ;
4313
+ assert_eq ! ( payment_claimed, second_payment_claimed) ;
4314
+
4315
+ nodes[ 1 ] . node . peer_disconnected ( node_d_id) ;
4316
+ nodes[ 2 ] . node . peer_disconnected ( node_d_id) ;
4317
+ reconnect_nodes ( ReconnectArgs :: new ( & nodes[ 1 ] , & nodes[ 3 ] ) ) ;
4318
+ reconnect_nodes ( ReconnectArgs :: new ( & nodes[ 2 ] , & nodes[ 3 ] ) ) ;
4319
+
4320
+ assert ! ( nodes[ 3 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
4321
+ }
4322
+
4286
4323
// Now double-check that the preimage is still in the 1<->3 channel and complete the pending
4287
4324
// monitor update, allowing node 3 to claim the payment on the 2<->3 channel. This also
4288
4325
// unblocks the 1<->3 channel, allowing node 3 to release the two blocked monitor updates and
4289
4326
// respond to the final commitment_signed.
4290
4327
assert ! ( get_monitor!( nodes[ 3 ] , chan_3_id) . get_stored_preimages( ) . contains_key( & payment_hash) ) ;
4328
+ assert ! ( nodes[ 3 ] . node. get_and_clear_pending_events( ) . is_empty( ) ) ;
4291
4329
4292
4330
nodes[ 3 ] . chain_monitor . complete_sole_pending_chan_update ( & chan_4_id) ;
4293
4331
let mut ds_msgs = nodes[ 3 ] . node . get_and_clear_pending_msg_events ( ) ;
4294
- assert_eq ! ( ds_msgs. len( ) , 2 ) ;
4332
+ assert_eq ! ( ds_msgs. len( ) , 2 , "{ds_msgs:?}" ) ;
4295
4333
check_added_monitors ( & nodes[ 3 ] , 2 ) ;
4296
4334
4297
4335
match remove_first_msg_event_to_node ( & node_b_id, & mut ds_msgs) {
@@ -4335,13 +4373,86 @@ fn test_partial_claim_mon_update_compl_actions() {
4335
4373
assert ! ( get_monitor!( nodes[ 3 ] , chan_3_id) . get_stored_preimages( ) . contains_key( & payment_hash) ) ;
4336
4374
assert ! ( get_monitor!( nodes[ 3 ] , chan_4_id) . get_stored_preimages( ) . contains_key( & payment_hash) ) ;
4337
4375
4376
+ if reload_b {
4377
+ // Ensure that the channel pause logic doesn't accidentally get restarted after a second
4378
+ // reload once the HTLCs for the first payment have been removed and the monitors
4379
+ // completed.
4380
+ let node_ser = nodes[ 3 ] . node . encode ( ) ;
4381
+ let chan_3_monitor_serialized = get_monitor ! ( nodes[ 3 ] , chan_3_id) . encode ( ) ;
4382
+ let chan_4_monitor_serialized = get_monitor ! ( nodes[ 3 ] , chan_4_id) . encode ( ) ;
4383
+ let mons = & [ & chan_3_monitor_serialized[ ..] , & chan_4_monitor_serialized[ ..] ] ;
4384
+ reload_node ! ( nodes[ 3 ] , & node_ser, mons, persister_2, new_chain_mon_2, nodes_3_reload_2) ;
4385
+ check_added_monitors ( & nodes[ 3 ] , 0 ) ;
4386
+
4387
+ nodes[ 1 ] . node . peer_disconnected ( node_d_id) ;
4388
+ nodes[ 2 ] . node . peer_disconnected ( node_d_id) ;
4389
+ reconnect_nodes ( ReconnectArgs :: new ( & nodes[ 1 ] , & nodes[ 3 ] ) ) ;
4390
+ reconnect_nodes ( ReconnectArgs :: new ( & nodes[ 2 ] , & nodes[ 3 ] ) ) ;
4391
+
4392
+ assert ! ( nodes[ 3 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
4393
+
4394
+ // Because the HTLCs aren't yet cleared, the PaymentClaimed event will be replayed on
4395
+ // restart.
4396
+ let third_payment_claimed = nodes[ 3 ] . node . get_and_clear_pending_events ( ) ;
4397
+ assert_eq ! ( payment_claimed, third_payment_claimed) ;
4398
+ }
4399
+
4338
4400
send_payment ( & nodes[ 1 ] , & [ & nodes[ 3 ] ] , 100_000 ) ;
4339
4401
assert ! ( !get_monitor!( nodes[ 3 ] , chan_3_id) . get_stored_preimages( ) . contains_key( & payment_hash) ) ;
4340
4402
4403
+ if reload_b {
4404
+ // Ensure that the channel pause logic doesn't accidentally get restarted after a second
4405
+ // reload once the HTLCs for the first payment have been removed and the monitors
4406
+ // completed, even if only one of the two monitors still knows about the first payment.
4407
+ let node_ser = nodes[ 3 ] . node . encode ( ) ;
4408
+ let chan_3_monitor_serialized = get_monitor ! ( nodes[ 3 ] , chan_3_id) . encode ( ) ;
4409
+ let chan_4_monitor_serialized = get_monitor ! ( nodes[ 3 ] , chan_4_id) . encode ( ) ;
4410
+ let mons = & [ & chan_3_monitor_serialized[ ..] , & chan_4_monitor_serialized[ ..] ] ;
4411
+ reload_node ! ( nodes[ 3 ] , & node_ser, mons, persister_3, new_chain_mon_3, nodes_3_reload_3) ;
4412
+ check_added_monitors ( & nodes[ 3 ] , 0 ) ;
4413
+
4414
+ nodes[ 1 ] . node . peer_disconnected ( node_d_id) ;
4415
+ nodes[ 2 ] . node . peer_disconnected ( node_d_id) ;
4416
+ reconnect_nodes ( ReconnectArgs :: new ( & nodes[ 1 ] , & nodes[ 3 ] ) ) ;
4417
+ reconnect_nodes ( ReconnectArgs :: new ( & nodes[ 2 ] , & nodes[ 3 ] ) ) ;
4418
+
4419
+ assert ! ( nodes[ 3 ] . node. get_and_clear_pending_msg_events( ) . is_empty( ) ) ;
4420
+
4421
+ // Because the HTLCs aren't yet cleared, the PaymentClaimed events for both payments will
4422
+ // be replayed on restart.
4423
+ // Use this as an opportunity to check the payment_ids are unique.
4424
+ let mut events = nodes[ 3 ] . node . get_and_clear_pending_events ( ) ;
4425
+ assert_eq ! ( events. len( ) , 2 ) ;
4426
+ events. retain ( |ev| * ev != payment_claimed[ 0 ] ) ;
4427
+ assert_eq ! ( events. len( ) , 1 ) ;
4428
+ if let Event :: PaymentClaimed { payment_id : original_payment_id, .. } = & payment_claimed[ 0 ] {
4429
+ assert ! ( original_payment_id. is_some( ) ) ;
4430
+ if let Event :: PaymentClaimed { amount_msat, payment_id, .. } = & events[ 0 ] {
4431
+ assert ! ( payment_id. is_some( ) ) ;
4432
+ assert_ne ! ( original_payment_id, payment_id) ;
4433
+ assert_eq ! ( * amount_msat, 100_000 ) ;
4434
+ } else {
4435
+ panic ! ( "{events:?}" ) ;
4436
+ }
4437
+ } else {
4438
+ panic ! ( "{events:?}" ) ;
4439
+ }
4440
+
4441
+ send_payment ( & nodes[ 1 ] , & [ & nodes[ 3 ] ] , 100_000 ) ;
4442
+ }
4443
+
4341
4444
send_payment ( & nodes[ 2 ] , & [ & nodes[ 3 ] ] , 100_000 ) ;
4342
4445
assert ! ( !get_monitor!( nodes[ 3 ] , chan_4_id) . get_stored_preimages( ) . contains_key( & payment_hash) ) ;
4343
4446
}
4344
4447
4448
+ #[ test]
4449
+ fn test_partial_claim_mon_update_compl_actions ( ) {
4450
+ do_test_partial_claim_mon_update_compl_actions ( true , true ) ;
4451
+ do_test_partial_claim_mon_update_compl_actions ( true , false ) ;
4452
+ do_test_partial_claim_mon_update_compl_actions ( false , true ) ;
4453
+ do_test_partial_claim_mon_update_compl_actions ( false , false ) ;
4454
+ }
4455
+
4345
4456
#[ test]
4346
4457
fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal ( ) {
4347
4458
// One of the last features for async persistence we implemented was the correct blocking of
0 commit comments