Skip to content

Commit c6103e6

Browse files
committed
Move test_peer_storage to reload_tests
In general we shouldn't be adding new tests in `channelmanager.rs`
1 parent 2ce5479 commit c6103e6

File tree

2 files changed

+131
-128
lines changed

2 files changed

+131
-128
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 0 additions & 128 deletions
Original file line numberDiff line numberDiff line change
@@ -17542,134 +17542,6 @@ mod tests {
1754217542
}
1754317543
}
1754417544

17545-
#[test]
17546-
#[cfg(peer_storage)]
17547-
fn test_peer_storage() {
17548-
let chanmon_cfgs = create_chanmon_cfgs(2);
17549-
let (persister, chain_monitor);
17550-
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
17551-
let nodes_0_deserialized;
17552-
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
17553-
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
17554-
17555-
let (_, _, cid, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
17556-
send_payment(&nodes[0], &[&nodes[1]], 1000);
17557-
let nodes_0_serialized = nodes[0].node.encode();
17558-
let old_state_monitor = get_monitor!(nodes[0], cid).encode();
17559-
send_payment(&nodes[0], &[&nodes[1]], 10000);
17560-
send_payment(&nodes[0], &[&nodes[1]], 9999);
17561-
17562-
// Update peer storage with latest commitment txns
17563-
connect_blocks(&nodes[0], 1);
17564-
connect_blocks(&nodes[0], 1);
17565-
17566-
let peer_storage_msg_events_node0 =
17567-
nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_msg_events();
17568-
let peer_storage_msg_events_node1 =
17569-
nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_msg_events();
17570-
assert_ne!(peer_storage_msg_events_node0.len(), 0);
17571-
assert_ne!(peer_storage_msg_events_node1.len(), 0);
17572-
17573-
for ps_msg in peer_storage_msg_events_node0 {
17574-
match ps_msg {
17575-
MessageSendEvent::SendPeerStorage { ref node_id, ref msg } => {
17576-
assert_eq!(*node_id, nodes[1].node.get_our_node_id());
17577-
nodes[1].node.handle_peer_storage(nodes[0].node.get_our_node_id(), msg.clone());
17578-
},
17579-
_ => panic!("Unexpected event"),
17580-
}
17581-
}
17582-
17583-
for ps_msg in peer_storage_msg_events_node1 {
17584-
match ps_msg {
17585-
MessageSendEvent::SendPeerStorage { ref node_id, ref msg } => {
17586-
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
17587-
nodes[0].node.handle_peer_storage(nodes[1].node.get_our_node_id(), msg.clone());
17588-
},
17589-
_ => panic!("Unexpected event"),
17590-
}
17591-
}
17592-
17593-
nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
17594-
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
17595-
17596-
// Reload Node!
17597-
// TODO: Handle the case where we've completely forgotten about an active channel.
17598-
reload_node!(
17599-
nodes[0],
17600-
test_default_channel_config(),
17601-
&nodes_0_serialized,
17602-
&[&old_state_monitor[..]],
17603-
persister,
17604-
chain_monitor,
17605-
nodes_0_deserialized
17606-
);
17607-
17608-
nodes[0]
17609-
.node
17610-
.peer_connected(
17611-
nodes[1].node.get_our_node_id(),
17612-
&msgs::Init {
17613-
features: nodes[1].node.init_features(),
17614-
networks: None,
17615-
remote_network_address: None,
17616-
},
17617-
true,
17618-
)
17619-
.unwrap();
17620-
17621-
nodes[1]
17622-
.node
17623-
.peer_connected(
17624-
nodes[0].node.get_our_node_id(),
17625-
&msgs::Init {
17626-
features: nodes[0].node.init_features(),
17627-
networks: None,
17628-
remote_network_address: None,
17629-
},
17630-
false,
17631-
)
17632-
.unwrap();
17633-
17634-
let node_1_events = nodes[1].node.get_and_clear_pending_msg_events();
17635-
assert_eq!(node_1_events.len(), 2);
17636-
17637-
let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
17638-
assert_eq!(node_0_events.len(), 1);
17639-
17640-
match node_0_events[0] {
17641-
MessageSendEvent::SendChannelReestablish { ref node_id, .. } => {
17642-
assert_eq!(*node_id, nodes[1].node.get_our_node_id());
17643-
// nodes[0] would send a stale channel reestablish, so there's no need to handle this.
17644-
},
17645-
_ => panic!("Unexpected event"),
17646-
}
17647-
17648-
if let MessageSendEvent::SendPeerStorageRetrieval { node_id, msg } = &node_1_events[0] {
17649-
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
17650-
// Should Panic here!
17651-
let res = std::panic::catch_unwind(|| {
17652-
nodes[0]
17653-
.node
17654-
.handle_peer_storage_retrieval(nodes[1].node.get_our_node_id(), msg.clone())
17655-
});
17656-
assert!(res.is_err());
17657-
} else {
17658-
panic!("Unexpected event {node_1_events:?}")
17659-
}
17660-
17661-
if let MessageSendEvent::SendChannelReestablish { .. } = &node_1_events[1] {
17662-
// After the `peer_storage_retreival` message would come a `channel_reestablish` (which
17663-
// would also cause nodes[0] to panic) but it already went down due to lost state so
17664-
// there's nothing to deliver.
17665-
} else {
17666-
panic!("Unexpected event {node_1_events:?}")
17667-
}
17668-
// When we panic'd, we expect to panic on `Drop`.
17669-
let res = std::panic::catch_unwind(|| drop(nodes));
17670-
assert!(res.is_err());
17671-
}
17672-
1767317545
#[test]
1767417546
#[rustfmt::skip]
1767517547
fn test_keysend_dup_payment_hash() {

lightning/src/ln/reload_tests.rs

Lines changed: 131 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1305,3 +1305,134 @@ fn test_htlc_localremoved_persistence() {
13051305
let htlc_fail_msg_after_reload = msgs.2.unwrap().update_fail_htlcs[0].clone();
13061306
assert_eq!(htlc_fail_msg, htlc_fail_msg_after_reload);
13071307
}
1308+
1309+
1310+
1311+
#[test]
1312+
#[cfg(peer_storage)]
1313+
fn test_peer_storage() {
1314+
let chanmon_cfgs = create_chanmon_cfgs(2);
1315+
let (persister, chain_monitor);
1316+
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1317+
let nodes_0_deserialized;
1318+
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1319+
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1320+
1321+
let (_, _, cid, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
1322+
send_payment(&nodes[0], &[&nodes[1]], 1000);
1323+
let nodes_0_serialized = nodes[0].node.encode();
1324+
let old_state_monitor = get_monitor!(nodes[0], cid).encode();
1325+
send_payment(&nodes[0], &[&nodes[1]], 10000);
1326+
send_payment(&nodes[0], &[&nodes[1]], 9999);
1327+
1328+
// Update peer storage with latest commitment txns
1329+
connect_blocks(&nodes[0], 1);
1330+
connect_blocks(&nodes[0], 1);
1331+
1332+
let peer_storage_msg_events_node0 =
1333+
nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_msg_events();
1334+
let peer_storage_msg_events_node1 =
1335+
nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_msg_events();
1336+
assert_ne!(peer_storage_msg_events_node0.len(), 0);
1337+
assert_ne!(peer_storage_msg_events_node1.len(), 0);
1338+
1339+
for ps_msg in peer_storage_msg_events_node0 {
1340+
match ps_msg {
1341+
MessageSendEvent::SendPeerStorage { ref node_id, ref msg } => {
1342+
assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1343+
nodes[1].node.handle_peer_storage(nodes[0].node.get_our_node_id(), msg.clone());
1344+
},
1345+
_ => panic!("Unexpected event"),
1346+
}
1347+
}
1348+
1349+
for ps_msg in peer_storage_msg_events_node1 {
1350+
match ps_msg {
1351+
MessageSendEvent::SendPeerStorage { ref node_id, ref msg } => {
1352+
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1353+
nodes[0].node.handle_peer_storage(nodes[1].node.get_our_node_id(), msg.clone());
1354+
},
1355+
_ => panic!("Unexpected event"),
1356+
}
1357+
}
1358+
1359+
nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
1360+
nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
1361+
1362+
// Reload Node!
1363+
// TODO: Handle the case where we've completely forgotten about an active channel.
1364+
reload_node!(
1365+
nodes[0],
1366+
test_default_channel_config(),
1367+
&nodes_0_serialized,
1368+
&[&old_state_monitor[..]],
1369+
persister,
1370+
chain_monitor,
1371+
nodes_0_deserialized
1372+
);
1373+
1374+
nodes[0]
1375+
.node
1376+
.peer_connected(
1377+
nodes[1].node.get_our_node_id(),
1378+
&msgs::Init {
1379+
features: nodes[1].node.init_features(),
1380+
networks: None,
1381+
remote_network_address: None,
1382+
},
1383+
true,
1384+
)
1385+
.unwrap();
1386+
1387+
nodes[1]
1388+
.node
1389+
.peer_connected(
1390+
nodes[0].node.get_our_node_id(),
1391+
&msgs::Init {
1392+
features: nodes[0].node.init_features(),
1393+
networks: None,
1394+
remote_network_address: None,
1395+
},
1396+
false,
1397+
)
1398+
.unwrap();
1399+
1400+
let node_1_events = nodes[1].node.get_and_clear_pending_msg_events();
1401+
assert_eq!(node_1_events.len(), 2);
1402+
1403+
let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
1404+
assert_eq!(node_0_events.len(), 1);
1405+
1406+
match node_0_events[0] {
1407+
MessageSendEvent::SendChannelReestablish { ref node_id, .. } => {
1408+
assert_eq!(*node_id, nodes[1].node.get_our_node_id());
1409+
// nodes[0] would send a stale channel reestablish, so there's no need to handle this.
1410+
},
1411+
_ => panic!("Unexpected event"),
1412+
}
1413+
1414+
if let MessageSendEvent::SendPeerStorageRetrieval { node_id, msg } = &node_1_events[0] {
1415+
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
1416+
// Should Panic here!
1417+
let res = std::panic::catch_unwind(|| {
1418+
nodes[0]
1419+
.node
1420+
.handle_peer_storage_retrieval(nodes[1].node.get_our_node_id(), msg.clone())
1421+
});
1422+
assert!(res.is_err());
1423+
} else {
1424+
panic!("Unexpected event {node_1_events:?}")
1425+
}
1426+
1427+
if let MessageSendEvent::SendChannelReestablish { .. } = &node_1_events[1] {
1428+
// After the `peer_storage_retreival` message would come a `channel_reestablish` (which
1429+
// would also cause nodes[0] to panic) but it already went down due to lost state so
1430+
// there's nothing to deliver.
1431+
} else {
1432+
panic!("Unexpected event {node_1_events:?}")
1433+
}
1434+
// When we panic'd, we expect to panic on `Drop`.
1435+
let res = std::panic::catch_unwind(|| drop(nodes));
1436+
assert!(res.is_err());
1437+
}
1438+

0 commit comments

Comments
 (0)