Skip to content

Commit b3c711d

Browse files
committed
Add unit tests for ChunkedMessageQueue padding
Add five new tests: - `test_chunked_message_queue_ping_padding`: verifies chunk alignment for various message sizes - `test_chunked_message_queue_small_remainder_overflow`: verifies the two-Ping edge case when remainder < `MIN_ENCRYPTED_PING_SIZE` - `test_chunked_message_queue_chunk_alignment`: verifies alignment after encrypting multiple real messages - `test_chunked_message_queue_buffer_compaction`: verifies `maybe_compact` correctly drains sent bytes - `test_chunked_message_queue_pending_msg_bytes_tracking`: verifies that `pending_msg_bytes` tracks real message bytes and is unaffected by padding Also extract a `get_test_encryptor` helper to reduce boilerplate across the new tests. Co-Authored-By: HAL 9000 Signed-off-by: Elias Rohrer <dev@tnull.de>
1 parent 46622df commit b3c711d

File tree

1 file changed

+156
-0
lines changed

1 file changed

+156
-0
lines changed

lightning/src/ln/peer_handler.rs

Lines changed: 156 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4994,6 +4994,162 @@ mod tests {
49944994
);
49954995
}
49964996

4997+
/// Helper: completes a noise handshake and returns the outbound encryptor ready for encryption.
4998+
fn get_test_encryptor() -> PeerChannelEncryptor {
4999+
let secp_ctx = Secp256k1::new();
5000+
// Inbound peer identity (the "responder").
5001+
let inbound_secret = SecretKey::from_slice(&[42; 32]).unwrap();
5002+
let inbound_pubkey =
5003+
bitcoin::secp256k1::PublicKey::from_secret_key(&secp_ctx, &inbound_secret);
5004+
let inbound_signer = crate::util::test_utils::TestNodeSigner::new(inbound_secret);
5005+
5006+
// Outbound peer identity (the "initiator").
5007+
let outbound_secret = SecretKey::from_slice(&[43; 32]).unwrap();
5008+
let outbound_signer = crate::util::test_utils::TestNodeSigner::new(outbound_secret);
5009+
5010+
let outbound_ephemeral = SecretKey::from_slice(&[44; 32]).unwrap();
5011+
let inbound_ephemeral = SecretKey::from_slice(&[45; 32]).unwrap();
5012+
5013+
let mut outbound = PeerChannelEncryptor::new_outbound(inbound_pubkey, outbound_ephemeral);
5014+
let mut inbound = PeerChannelEncryptor::new_inbound(&&inbound_signer);
5015+
5016+
let act_one = outbound.get_act_one(&secp_ctx);
5017+
let act_two = inbound
5018+
.process_act_one_with_keys(&act_one, &&inbound_signer, inbound_ephemeral, &secp_ctx)
5019+
.unwrap();
5020+
let (act_three, _) = outbound.process_act_two(&act_two, &&outbound_signer).unwrap();
5021+
let _ = inbound.process_act_three(&act_three).unwrap();
5022+
5023+
outbound
5024+
}
5025+
5026+
#[test]
5027+
fn test_chunked_message_queue_ping_padding() {
5028+
// Tests that Ping padding correctly fills the remainder of a chunk.
5029+
let mut encryptor = get_test_encryptor();
5030+
5031+
// Test various remainder sizes to ensure padding works correctly.
5032+
for msg_size in [40, 100, 500, 1000, 5000, 30000, 65535] {
5033+
let mut queue = ChunkedMessageQueue::new();
5034+
// Push a raw blob of msg_size bytes to simulate encrypted message data.
5035+
let fake_data = vec![0u8; msg_size];
5036+
queue.buffer.extend_from_slice(&fake_data);
5037+
queue.pending_msg_bytes += msg_size;
5038+
5039+
queue.pad_and_finalize_chunk(&mut encryptor);
5040+
assert_eq!(
5041+
queue.pending_bytes() % CHUNK_SIZE,
5042+
0,
5043+
"Buffer not chunk-aligned after padding for msg_size={}",
5044+
msg_size
5045+
);
5046+
assert!(
5047+
queue.pending_bytes() >= CHUNK_SIZE,
5048+
"Buffer should be at least one chunk for msg_size={}",
5049+
msg_size
5050+
);
5051+
}
5052+
}
5053+
5054+
#[test]
5055+
fn test_chunked_message_queue_small_remainder_overflow() {
5056+
// Tests the edge case where remainder < MIN_ENCRYPTED_PING_SIZE, requiring two Pings.
5057+
let mut encryptor = get_test_encryptor();
5058+
5059+
// Test remainders from 1 to MIN_ENCRYPTED_PING_SIZE-1 (the overflow cases).
5060+
for remainder in 1..MIN_ENCRYPTED_PING_SIZE {
5061+
let mut queue = ChunkedMessageQueue::new();
5062+
// Fill buffer so that exactly `remainder` bytes are left in the current chunk.
5063+
let fill_size = CHUNK_SIZE - remainder;
5064+
queue.buffer.resize(fill_size, 0);
5065+
queue.pending_msg_bytes = fill_size;
5066+
5067+
queue.pad_and_finalize_chunk(&mut encryptor);
5068+
assert_eq!(
5069+
queue.pending_bytes() % CHUNK_SIZE,
5070+
0,
5071+
"Buffer not chunk-aligned for remainder={}",
5072+
remainder
5073+
);
5074+
// Should overflow into exactly 2 chunks.
5075+
assert_eq!(
5076+
queue.pending_bytes(),
5077+
2 * CHUNK_SIZE,
5078+
"Expected 2 chunks for small remainder={}",
5079+
remainder
5080+
);
5081+
}
5082+
}
5083+
5084+
#[test]
5085+
fn test_chunked_message_queue_chunk_alignment() {
5086+
// Tests that after multiple messages the buffer stays correctly aligned after padding.
5087+
let mut encryptor = get_test_encryptor();
5088+
let mut queue = ChunkedMessageQueue::new();
5089+
5090+
// Encrypt several Ping messages of various sizes.
5091+
for pong_len in [0u16, 64, 256, 1024] {
5092+
let ping = msgs::Ping { ponglen: pong_len, byteslen: 64 };
5093+
let msg: wire::Message<()> = wire::Message::Ping(ping);
5094+
queue.encrypt_and_push_message(&mut encryptor, msg);
5095+
}
5096+
5097+
let pending_before_pad = queue.pending_bytes();
5098+
assert!(pending_before_pad > 0);
5099+
5100+
queue.pad_and_finalize_chunk(&mut encryptor);
5101+
5102+
assert_eq!(queue.pending_bytes() % CHUNK_SIZE, 0);
5103+
assert!(queue.pending_bytes() >= pending_before_pad);
5104+
}
5105+
5106+
#[test]
5107+
fn test_chunked_message_queue_buffer_compaction() {
5108+
// Tests that maybe_compact drains sent bytes appropriately.
5109+
let mut queue = ChunkedMessageQueue::new();
5110+
5111+
// Fill with 2 chunks worth of data.
5112+
queue.buffer.resize(2 * CHUNK_SIZE, 0xAB);
5113+
queue.pending_msg_bytes = 2 * CHUNK_SIZE;
5114+
assert_eq!(queue.pending_bytes(), 2 * CHUNK_SIZE);
5115+
5116+
// Simulate sending the first chunk.
5117+
queue.send_offset = CHUNK_SIZE;
5118+
queue.pending_msg_bytes = CHUNK_SIZE;
5119+
queue.maybe_compact();
5120+
5121+
// After compaction, send_offset should be 0 and buffer should be one chunk.
5122+
assert_eq!(queue.send_offset, 0);
5123+
assert_eq!(queue.buffer.len(), CHUNK_SIZE);
5124+
assert_eq!(queue.pending_bytes(), CHUNK_SIZE);
5125+
}
5126+
5127+
#[test]
5128+
fn test_chunked_message_queue_pending_msg_bytes_tracking() {
5129+
// Tests that pending_msg_bytes correctly tracks message bytes vs padding bytes.
5130+
let mut encryptor = get_test_encryptor();
5131+
let mut queue = ChunkedMessageQueue::new();
5132+
5133+
// Encrypt a small message.
5134+
let ping = msgs::Ping { ponglen: 0, byteslen: 64 };
5135+
let msg: wire::Message<()> = wire::Message::Ping(ping);
5136+
queue.encrypt_and_push_message(&mut encryptor, msg);
5137+
5138+
let msg_bytes = queue.pending_msg_bytes;
5139+
assert!(msg_bytes > 0);
5140+
assert_eq!(msg_bytes, queue.pending_bytes());
5141+
5142+
// After padding, pending_bytes increases but pending_msg_bytes stays the same.
5143+
queue.pad_and_finalize_chunk(&mut encryptor);
5144+
5145+
assert_eq!(queue.pending_msg_bytes, msg_bytes);
5146+
assert!(queue.pending_bytes() > msg_bytes);
5147+
assert_eq!(queue.pending_bytes() % CHUNK_SIZE, 0);
5148+
5149+
// total_buffered_bytes should use pending_msg_bytes, not pending_bytes.
5150+
assert_eq!(queue.total_buffered_bytes(), msg_bytes);
5151+
}
5152+
49975153
#[test]
49985154
fn test_filter_addresses() {
49995155
// Tests the filter_addresses function.

0 commit comments

Comments
 (0)