Skip to content

Commit 64e28db

Browse files
committed
Add unit tests for ChunkedMessageQueue padding
Add five new tests: - `test_chunked_message_queue_ping_padding`: verifies chunk alignment for various message sizes - `test_chunked_message_queue_small_remainder_overflow`: verifies the two-Ping edge case when remainder < `MIN_ENCRYPTED_PING_SIZE` - `test_chunked_message_queue_chunk_alignment`: verifies alignment after encrypting multiple real messages - `test_chunked_message_queue_buffer_compaction`: verifies `maybe_compact` correctly drains sent bytes - `test_chunked_message_queue_pending_msg_bytes_tracking`: verifies that `pending_msg_bytes` tracks real message bytes and is unaffected by padding Also extract a `get_test_encryptor` helper to reduce boilerplate across the new tests. Co-Authored-By: HAL 9000 Signed-off-by: Elias Rohrer <dev@tnull.de>
1 parent 46622df commit 64e28db

File tree

1 file changed

+159
-0
lines changed

1 file changed

+159
-0
lines changed

lightning/src/ln/peer_handler.rs

Lines changed: 159 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4994,6 +4994,165 @@ mod tests {
49944994
);
49954995
}
49964996

4997+
/// Helper: completes a noise handshake and returns the outbound encryptor ready for encryption.
4998+
fn get_test_encryptor() -> PeerChannelEncryptor {
4999+
use crate::ln::peer_channel_encryptor::PeerChannelEncryptor;
5000+
use bitcoin::secp256k1::{Secp256k1, SecretKey};
5001+
5002+
let secp_ctx = Secp256k1::new();
5003+
// Inbound peer identity (the "responder").
5004+
let inbound_secret = SecretKey::from_slice(&[42; 32]).unwrap();
5005+
let inbound_pubkey =
5006+
bitcoin::secp256k1::PublicKey::from_secret_key(&secp_ctx, &inbound_secret);
5007+
let inbound_signer = crate::util::test_utils::TestNodeSigner::new(inbound_secret);
5008+
5009+
// Outbound peer identity (the "initiator").
5010+
let outbound_secret = SecretKey::from_slice(&[43; 32]).unwrap();
5011+
let outbound_signer = crate::util::test_utils::TestNodeSigner::new(outbound_secret);
5012+
5013+
let outbound_ephemeral = SecretKey::from_slice(&[44; 32]).unwrap();
5014+
let inbound_ephemeral = SecretKey::from_slice(&[45; 32]).unwrap();
5015+
5016+
let mut outbound = PeerChannelEncryptor::new_outbound(inbound_pubkey, outbound_ephemeral);
5017+
let mut inbound = PeerChannelEncryptor::new_inbound(&&inbound_signer);
5018+
5019+
let act_one = outbound.get_act_one(&secp_ctx);
5020+
let act_two = inbound
5021+
.process_act_one_with_keys(&act_one, &&inbound_signer, inbound_ephemeral, &secp_ctx)
5022+
.unwrap();
5023+
let (act_three, _) = outbound.process_act_two(&act_two, &&outbound_signer).unwrap();
5024+
let _ = inbound.process_act_three(&act_three).unwrap();
5025+
5026+
outbound
5027+
}
5028+
5029+
#[test]
5030+
fn test_chunked_message_queue_ping_padding() {
5031+
// Tests that Ping padding correctly fills the remainder of a chunk.
5032+
let mut encryptor = get_test_encryptor();
5033+
5034+
// Test various remainder sizes to ensure padding works correctly.
5035+
for msg_size in [40, 100, 500, 1000, 5000, 30000, 65535] {
5036+
let mut queue = ChunkedMessageQueue::new();
5037+
// Push a raw blob of msg_size bytes to simulate encrypted message data.
5038+
let fake_data = vec![0u8; msg_size];
5039+
queue.buffer.extend_from_slice(&fake_data);
5040+
queue.pending_msg_bytes += msg_size;
5041+
5042+
queue.pad_and_finalize_chunk(&mut encryptor);
5043+
assert_eq!(
5044+
queue.pending_bytes() % CHUNK_SIZE,
5045+
0,
5046+
"Buffer not chunk-aligned after padding for msg_size={}",
5047+
msg_size
5048+
);
5049+
assert!(
5050+
queue.pending_bytes() >= CHUNK_SIZE,
5051+
"Buffer should be at least one chunk for msg_size={}",
5052+
msg_size
5053+
);
5054+
}
5055+
}
5056+
5057+
#[test]
5058+
fn test_chunked_message_queue_small_remainder_overflow() {
5059+
// Tests the edge case where remainder < MIN_ENCRYPTED_PING_SIZE, requiring two Pings.
5060+
let mut encryptor = get_test_encryptor();
5061+
5062+
// Test remainders from 1 to MIN_ENCRYPTED_PING_SIZE-1 (the overflow cases).
5063+
for remainder in 1..MIN_ENCRYPTED_PING_SIZE {
5064+
let mut queue = ChunkedMessageQueue::new();
5065+
// Fill buffer so that exactly `remainder` bytes are left in the current chunk.
5066+
let fill_size = CHUNK_SIZE - remainder;
5067+
queue.buffer.resize(fill_size, 0);
5068+
queue.pending_msg_bytes = fill_size;
5069+
5070+
queue.pad_and_finalize_chunk(&mut encryptor);
5071+
assert_eq!(
5072+
queue.pending_bytes() % CHUNK_SIZE,
5073+
0,
5074+
"Buffer not chunk-aligned for remainder={}",
5075+
remainder
5076+
);
5077+
// Should overflow into exactly 2 chunks.
5078+
assert_eq!(
5079+
queue.pending_bytes(),
5080+
2 * CHUNK_SIZE,
5081+
"Expected 2 chunks for small remainder={}",
5082+
remainder
5083+
);
5084+
}
5085+
}
5086+
5087+
#[test]
5088+
fn test_chunked_message_queue_chunk_alignment() {
5089+
// Tests that after multiple messages the buffer stays correctly aligned after padding.
5090+
let mut encryptor = get_test_encryptor();
5091+
let mut queue = ChunkedMessageQueue::new();
5092+
5093+
// Encrypt several Ping messages of various sizes.
5094+
for pong_len in [0u16, 64, 256, 1024] {
5095+
let ping = msgs::Ping { ponglen: pong_len, byteslen: 64 };
5096+
let msg: wire::Message<()> = wire::Message::Ping(ping);
5097+
queue.encrypt_and_push_message(&mut encryptor, msg);
5098+
}
5099+
5100+
let pending_before_pad = queue.pending_bytes();
5101+
assert!(pending_before_pad > 0);
5102+
5103+
queue.pad_and_finalize_chunk(&mut encryptor);
5104+
5105+
assert_eq!(queue.pending_bytes() % CHUNK_SIZE, 0);
5106+
assert!(queue.pending_bytes() >= pending_before_pad);
5107+
}
5108+
5109+
#[test]
5110+
fn test_chunked_message_queue_buffer_compaction() {
5111+
// Tests that maybe_compact drains sent bytes appropriately.
5112+
let mut queue = ChunkedMessageQueue::new();
5113+
5114+
// Fill with 2 chunks worth of data.
5115+
queue.buffer.resize(2 * CHUNK_SIZE, 0xAB);
5116+
queue.pending_msg_bytes = 2 * CHUNK_SIZE;
5117+
assert_eq!(queue.pending_bytes(), 2 * CHUNK_SIZE);
5118+
5119+
// Simulate sending the first chunk.
5120+
queue.send_offset = CHUNK_SIZE;
5121+
queue.pending_msg_bytes = CHUNK_SIZE;
5122+
queue.maybe_compact();
5123+
5124+
// After compaction, send_offset should be 0 and buffer should be one chunk.
5125+
assert_eq!(queue.send_offset, 0);
5126+
assert_eq!(queue.buffer.len(), CHUNK_SIZE);
5127+
assert_eq!(queue.pending_bytes(), CHUNK_SIZE);
5128+
}
5129+
5130+
#[test]
5131+
fn test_chunked_message_queue_pending_msg_bytes_tracking() {
5132+
// Tests that pending_msg_bytes correctly tracks message bytes vs padding bytes.
5133+
let mut encryptor = get_test_encryptor();
5134+
let mut queue = ChunkedMessageQueue::new();
5135+
5136+
// Encrypt a small message.
5137+
let ping = msgs::Ping { ponglen: 0, byteslen: 64 };
5138+
let msg: wire::Message<()> = wire::Message::Ping(ping);
5139+
queue.encrypt_and_push_message(&mut encryptor, msg);
5140+
5141+
let msg_bytes = queue.pending_msg_bytes;
5142+
assert!(msg_bytes > 0);
5143+
assert_eq!(msg_bytes, queue.pending_bytes());
5144+
5145+
// After padding, pending_bytes increases but pending_msg_bytes stays the same.
5146+
queue.pad_and_finalize_chunk(&mut encryptor);
5147+
5148+
assert_eq!(queue.pending_msg_bytes, msg_bytes);
5149+
assert!(queue.pending_bytes() > msg_bytes);
5150+
assert_eq!(queue.pending_bytes() % CHUNK_SIZE, 0);
5151+
5152+
// total_buffered_bytes should use pending_msg_bytes, not pending_bytes.
5153+
assert_eq!(queue.total_buffered_bytes(), msg_bytes);
5154+
}
5155+
49975156
#[test]
49985157
fn test_filter_addresses() {
49995158
// Tests the filter_addresses function.

0 commit comments

Comments
 (0)