diff --git a/Cargo.lock b/Cargo.lock index b2935b0e6a..464e093ff4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2397,6 +2397,7 @@ dependencies = [ "slab", "task_control", "thiserror 2.0.16", + "tracelimit", "tracing", "vm_resource", "vmcore", @@ -4413,6 +4414,7 @@ dependencies = [ "futures-concurrency", "guestmem", "inspect", + "inspect_counters", "memory_range", "mesh", "net_backend_resources", diff --git a/support/tracelimit/src/lib.rs b/support/tracelimit/src/lib.rs index 0d3d0548c4..beba5ffec1 100644 --- a/support/tracelimit/src/lib.rs +++ b/support/tracelimit/src/lib.rs @@ -295,3 +295,156 @@ macro_rules! info_ratelimited { } }; } + +/// As [`tracing::event!`], but rate limited. +/// +/// Can be called with optional parameters to customize rate limiting: +/// - `period: ` - rate limiting period in milliseconds +/// - `limit: ` - maximum events per period +/// +/// `level` is required and must be a compile-time literal identifier (ERROR, WARN, INFO, DEBUG, TRACE). +#[macro_export] +macro_rules! event_ratelimited_static { + // With both period and limit and level + (level: $level:ident, period: $period:expr, limit: $limit:expr, $($rest:tt)*) => {{ + static RATE_LIMITER: $crate::RateLimiter = $crate::RateLimiter::new_default(); + if let Ok(missed_events) = RATE_LIMITER.event_with_config(Some($period), Some($limit)) { + $crate::tracing::event!( + $crate::tracing::Level::$level, + dropped_ratelimited = missed_events, + $($rest)* + ); + } + }}; + // With only period and level + (level: $level:ident, period: $period:expr, $($rest:tt)*) => {{ + static RATE_LIMITER: $crate::RateLimiter = $crate::RateLimiter::new_default(); + if let Ok(missed_events) = RATE_LIMITER.event_with_config(Some($period), None) { + $crate::tracing::event!( + $crate::tracing::Level::$level, + dropped_ratelimited = missed_events, + $($rest)* + ); + } + }}; + // With only limit and level + (level: $level:ident, limit: $limit:expr, $($rest:tt)*) => {{ + static RATE_LIMITER: $crate::RateLimiter = $crate::RateLimiter::new_default(); + if let Ok(missed_events) = RATE_LIMITER.event_with_config(None, Some($limit)) { + $crate::tracing::event!( + $crate::tracing::Level::$level, + dropped_ratelimited = missed_events, + $($rest)* + ); + } + }}; + // Default case (only level provided) + (level: $level:ident, $($rest:tt)*) => {{ + static RATE_LIMITER: $crate::RateLimiter = $crate::RateLimiter::new_default(); + if let Ok(missed_events) = RATE_LIMITER.event() { + $crate::tracing::event!( + $crate::tracing::Level::$level, + dropped_ratelimited = missed_events, + $($rest)* + ); + } + }}; +} + +/// Helper macro for dynamically dispatching to [`event_ratelimited_static!`] based on a runtime level. +/// +/// This macro accepts a runtime `tracing::Level` expression and dispatches to the appropriate +/// compile-time level identifier. Allows the log level to be determined at runtime. +/// +/// Examples: +/// ``` +/// use tracing::Level; +/// use tracelimit::event_ratelimited; +/// event_ratelimited!(Level::ERROR, period: 1000, limit: 5, "custome period and limit"); +/// event_ratelimited!(Level::WARN, period: 10000, "custom period only"); +/// event_ratelimited!(Level::INFO, limit: 50, "custom limit only"); +/// event_ratelimited!(Level::TRACE, "simple message"); +/// ``` +#[macro_export] +macro_rules! event_ratelimited { + // With period and limit and level + ($level:expr, period: $period:expr, limit: $limit:expr, $($rest:tt)*) => { + match $level { + $crate::tracing::Level::ERROR => { + $crate::event_ratelimited_static!(level: ERROR, period: $period, limit: $limit, $($rest)*); + } + $crate::tracing::Level::WARN => { + $crate::event_ratelimited_static!(level: WARN, period: $period, limit: $limit, $($rest)*); + } + $crate::tracing::Level::INFO => { + $crate::event_ratelimited_static!(level: INFO, period: $period, limit: $limit, $($rest)*); + } + $crate::tracing::Level::DEBUG => { + $crate::event_ratelimited_static!(level: DEBUG, period: $period, limit: $limit, $($rest)*); + } + $crate::tracing::Level::TRACE => { + $crate::event_ratelimited_static!(level: TRACE, period: $period, limit: $limit, $($rest)*); + } + } + }; + // With period and level + ($level:expr, period: $period:expr, $($rest:tt)*) => { + match $level { + $crate::tracing::Level::ERROR => { + $crate::event_ratelimited_static!(level: ERROR, period: $period, $($rest)*); + } + $crate::tracing::Level::WARN => { + $crate::event_ratelimited_static!(level: WARN, period: $period, $($rest)*); + } + $crate::tracing::Level::INFO => { + $crate::event_ratelimited_static!(level: INFO, period: $period, $($rest)*); + } + $crate::tracing::Level::DEBUG => { + $crate::event_ratelimited_static!(level: DEBUG, period: $period, $($rest)*); + } + $crate::tracing::Level::TRACE => { + $crate::event_ratelimited_static!(level: TRACE, period: $period, $($rest)*); + } + } + }; + // With limit and level + ($level:expr, limit: $limit:expr, $($rest:tt)*) => { + match $level { + $crate::tracing::Level::ERROR => { + $crate::event_ratelimited_static!(level: ERROR, limit: $limit, $($rest)*); + } + $crate::tracing::Level::WARN => { + $crate::event_ratelimited_static!(level: WARN, limit: $limit, $($rest)*); + } + $crate::tracing::Level::INFO => { + $crate::event_ratelimited_static!(level: INFO, limit: $limit, $($rest)*); + } + $crate::tracing::Level::DEBUG => { + $crate::event_ratelimited_static!(level: DEBUG, limit: $limit, $($rest)*); + } + $crate::tracing::Level::TRACE => { + $crate::event_ratelimited_static!(level: TRACE, limit: $limit, $($rest)*); + } + } + }; + // Default case (only level provided) + ($level:expr, $($rest:tt)*) => { + match $level { + $crate::tracing::Level::ERROR => { + $crate::event_ratelimited_static!(level: ERROR, $($rest)*); + } + $crate::tracing::Level::WARN => { + $crate::event_ratelimited_static!(level: WARN, $($rest)*); + } + $crate::tracing::Level::INFO => { + $crate::event_ratelimited_static!(level: INFO, $($rest)*); + } + $crate::tracing::Level::DEBUG => { + $crate::event_ratelimited_static!(level: DEBUG, $($rest)*); + } + $crate::tracing::Level::TRACE => { + $crate::event_ratelimited_static!(level: TRACE, $($rest)*); + } + } + }; +} diff --git a/vm/devices/net/gdma/Cargo.toml b/vm/devices/net/gdma/Cargo.toml index 077f75623e..a2ca8dc3bc 100644 --- a/vm/devices/net/gdma/Cargo.toml +++ b/vm/devices/net/gdma/Cargo.toml @@ -23,6 +23,7 @@ vm_resource.workspace = true inspect.workspace = true task_control.workspace = true +tracelimit.workspace = true anyhow.workspace = true async-trait.workspace = true diff --git a/vm/devices/net/gdma/src/bnic.rs b/vm/devices/net/gdma/src/bnic.rs index b6d8fe0ebe..02f36ba81d 100644 --- a/vm/devices/net/gdma/src/bnic.rs +++ b/vm/devices/net/gdma/src/bnic.rs @@ -1,6 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. +use self::bnic_defs::CQE_TX_GDMA_ERR; use self::bnic_defs::CQE_TX_OKAY; use self::bnic_defs::MANA_CQE_COMPLETION; use self::bnic_defs::ManaCommandCode; @@ -560,6 +561,30 @@ impl TxRxTask { meta.offload_tcp_segmentation = true; } + // With LSO, the first SGE is the header and the rest are the payload. + // For LSO, the requirements by the GDMA hardware are: + // - The first SGE must be the header and must be <= 256 bytes. + // - There should be at least two SGEs. + // Possible test improvement: Disable the Queue to mimick the hardware behavior. + if meta.offload_tcp_segmentation { + if sqe.sgl().len() < 2 { + tracelimit::error_ratelimited!( + sgl_count = sqe.sgl().len(), + "LSO enabled, but only one SGE" + ); + self.post_tx_completion_error(); + return Ok(()); + } + if sge0.size > 256 { + tracelimit::error_ratelimited!( + sge0_size = sge0.size, + "LSO enabled and SGE[0] size > 256 bytes" + ); + self.post_tx_completion_error(); + return Ok(()); + } + } + let tx_segments = &mut self.tx_segment_buffer; tx_segments.clear(); tx_segments.push(TxSegment { @@ -582,6 +607,20 @@ impl TxRxTask { Ok(()) } + // Possible test improvement: provide proper OOB data for the GDMA error. + fn post_tx_completion_error(&mut self) { + let tx_oob = ManaTxCompOob { + cqe_hdr: ManaCqeHeader::new() + .with_client_type(MANA_CQE_COMPLETION) + .with_cqe_type(CQE_TX_GDMA_ERR), + tx_data_offset: 0, + offsets: ManaTxCompOobOffsets::new(), + reserved: [0; 12], + }; + self.queues + .post_cq(self.sq_cq_id, tx_oob.as_bytes(), self.sq_id, true); + } + fn post_tx_completion(&mut self) { let tx_oob = ManaTxCompOob { cqe_hdr: ManaCqeHeader::new() diff --git a/vm/devices/net/net_backend/Cargo.toml b/vm/devices/net/net_backend/Cargo.toml index 47748784d3..06d1d89408 100644 --- a/vm/devices/net/net_backend/Cargo.toml +++ b/vm/devices/net/net_backend/Cargo.toml @@ -24,6 +24,7 @@ futures-concurrency.workspace = true parking_lot.workspace = true tracing.workspace = true thiserror.workspace = true +inspect_counters.workspace = true [lints] workspace = true diff --git a/vm/devices/net/net_backend/src/lib.rs b/vm/devices/net/net_backend/src/lib.rs index 20c0b82332..811a31d79e 100644 --- a/vm/devices/net/net_backend/src/lib.rs +++ b/vm/devices/net/net_backend/src/lib.rs @@ -21,6 +21,7 @@ use futures_concurrency::future::Race; use guestmem::GuestMemory; use guestmem::GuestMemoryError; use inspect::InspectMut; +use inspect_counters::Counter; use mesh::rpc::Rpc; use mesh::rpc::RpcSend; use null::NullEndpoint; @@ -144,6 +145,12 @@ pub enum TxError { #[error("unrecoverable error. {0}")] Fatal(#[source] anyhow::Error), } +pub trait BackendQueueStats { + fn rx_errors(&self) -> Counter; + fn tx_errors(&self) -> Counter; + fn rx_packets(&self) -> Counter; + fn tx_packets(&self) -> Counter; +} /// A trait for sending and receiving network packets. #[async_trait] @@ -172,6 +179,11 @@ pub trait Queue: Send + InspectMut { /// Get the buffer access. fn buffer_access(&mut self) -> Option<&mut dyn BufferAccess>; + + /// Get queue statistics + fn queue_stats(&self) -> Option<&dyn BackendQueueStats> { + None // Default implementation - not all queues implement stats + } } /// A trait for providing access to guest memory buffers. diff --git a/vm/devices/net/net_mana/src/lib.rs b/vm/devices/net/net_mana/src/lib.rs index 0db14d77a7..3ffe9c67f1 100644 --- a/vm/devices/net/net_mana/src/lib.rs +++ b/vm/devices/net/net_mana/src/lib.rs @@ -38,6 +38,7 @@ use mana_driver::mana::Vport; use mana_driver::queues::Cq; use mana_driver::queues::Eq; use mana_driver::queues::Wq; +use net_backend::BackendQueueStats; use net_backend::BufferAccess; use net_backend::Endpoint; use net_backend::EndpointAction; @@ -716,8 +717,15 @@ impl ManaQueue { } } - fn trace_tx_error(&mut self, cqe_params: CqeParams, tx_oob: ManaTxCompOob, done_length: usize) { - tracelimit::error_ratelimited!( + fn trace_tx( + &mut self, + tracing_level: tracing::Level, + cqe_params: CqeParams, + tx_oob: ManaTxCompOob, + done_length: usize, + ) { + tracelimit::event_ratelimited!( + tracing_level, cqe_type = tx_oob.cqe_hdr.cqe_type(), vendor_err = tx_oob.cqe_hdr.vendor_err(), wq_number = cqe_params.wq_number(), @@ -730,10 +738,11 @@ impl ManaQueue { ); let wqe_offset = tx_oob.offsets.tx_wqe_offset(); - self.trace_tx_wqe_from_offset(wqe_offset); + self.trace_tx_wqe_from_offset(tracing_level, wqe_offset); if let Some(packet) = self.posted_tx.front() { - tracelimit::error_ratelimited!( + tracelimit::event_ratelimited!( + tracing_level, id = packet.id.0, wqe_len = packet.wqe_len, bounced_len_with_padding = packet.bounced_len_with_padding, @@ -742,7 +751,7 @@ impl ManaQueue { } } - fn trace_tx_wqe_from_offset(&mut self, wqe_offset: u32) { + fn trace_tx_wqe_from_offset(&mut self, tracing_level: tracing::Level, wqe_offset: u32) { let header_size = size_of::(); // 8 bytes let s_oob_size = size_of::(); // 8 bytes let size = header_size + s_oob_size; @@ -756,7 +765,8 @@ impl ManaQueue { } }; - tracelimit::error_ratelimited!( + tracelimit::event_ratelimited!( + tracing_level, last_vbytes = wqe_header.last_vbytes, num_sgl_entries = wqe_header.params.num_sgl_entries(), inline_client_oob_size = wqe_header.params.inline_client_oob_size(), @@ -772,7 +782,8 @@ impl ManaQueue { let tx_s_oob = ManaTxShortOob::read_from_prefix(bytes); match tx_s_oob { Ok((tx_s_oob, _)) => { - tracelimit::error_ratelimited!( + tracelimit::event_ratelimited!( + tracing_level, pkt_fmt = tx_s_oob.pkt_fmt(), is_outer_ipv4 = tx_s_oob.is_outer_ipv4(), is_outer_ipv6 = tx_s_oob.is_outer_ipv6(), @@ -1020,7 +1031,7 @@ impl Queue for ManaQueue { // CQE_TX_GDMA_ERR is how the Hardware indicates that it has disabled the queue. self.stats.tx_errors.increment(); self.stats.tx_stuck.increment(); - self.trace_tx_error(cqe.params, tx_oob, done.len()); + self.trace_tx(tracing::Level::ERROR, cqe.params, tx_oob, done.len()); // Return a TryRestart error to indicate that the queue needs to be restarted. return Err(TxError::TryRestart(anyhow::anyhow!("TX GDMA error"))); } @@ -1028,7 +1039,7 @@ impl Queue for ManaQueue { // Invalid OOB means the metadata didn't match how the Hardware parsed the packet. // This is somewhat common, usually due to Encapsulation, and only the affects the specific packet. self.stats.tx_errors.increment(); - self.trace_tx_error(cqe.params, tx_oob, done.len()); + self.trace_tx(tracing::Level::WARN, cqe.params, tx_oob, done.len()); } ty => { tracelimit::error_ratelimited!( @@ -1065,6 +1076,25 @@ impl Queue for ManaQueue { fn buffer_access(&mut self) -> Option<&mut dyn BufferAccess> { Some(self.pool.as_mut()) } + + fn queue_stats(&self) -> Option<&dyn BackendQueueStats> { + Some(&self.stats) + } +} + +impl BackendQueueStats for QueueStats { + fn rx_errors(&self) -> Counter { + self.rx_errors.clone() + } + fn tx_errors(&self) -> Counter { + self.tx_errors.clone() + } + fn rx_packets(&self) -> Counter { + self.rx_packets.clone() + } + fn tx_packets(&self) -> Counter { + self.tx_packets.clone() + } } impl ManaQueue { @@ -1519,6 +1549,7 @@ impl Inspect for ContiguousBufferManager { mod tests { use crate::GuestDmaMode; use crate::ManaEndpoint; + use crate::QueueStats; use chipset_device::mmio::ExternallyManagedMmioIntercepts; use gdma::VportConfig; use gdma_defs::bnic::ManaQueryDeviceCfgResp; @@ -1544,32 +1575,94 @@ mod tests { /// ensures that packets can be sent and received. #[async_test] async fn test_endpoint_direct_dma(driver: DefaultDriver) { - test_endpoint(driver, GuestDmaMode::DirectDma, 1138, 1).await; + send_test_packet(driver, GuestDmaMode::DirectDma, 1138, 1).await; } #[async_test] async fn test_endpoint_bounce_buffer(driver: DefaultDriver) { - test_endpoint(driver, GuestDmaMode::BounceBuffer, 1138, 1).await; + send_test_packet(driver, GuestDmaMode::BounceBuffer, 1138, 1).await; } #[async_test] async fn test_segment_coalescing(driver: DefaultDriver) { // 34 segments of 60 bytes each == 2040 - test_endpoint(driver, GuestDmaMode::DirectDma, 2040, 34).await; + send_test_packet(driver, GuestDmaMode::DirectDma, 2040, 34).await; } #[async_test] async fn test_segment_coalescing_many(driver: DefaultDriver) { // 128 segments of 16 bytes each == 2048 - test_endpoint(driver, GuestDmaMode::DirectDma, 2048, 128).await; + send_test_packet(driver, GuestDmaMode::DirectDma, 2048, 128).await; } - async fn test_endpoint( + async fn send_test_packet( driver: DefaultDriver, dma_mode: GuestDmaMode, packet_len: usize, num_segments: usize, ) { + let tx_id = 1; + let tx_metadata = net_backend::TxMetadata { + id: TxId(tx_id), + segment_count: num_segments, + len: packet_len, + ..Default::default() + }; + let expected_num_received_packets = 1; + let (data_to_send, tx_segments) = + build_tx_segments(packet_len, num_segments, tx_metadata.clone()); + + test_endpoint( + driver, + dma_mode, + packet_len, + tx_segments, + data_to_send, + expected_num_received_packets, + ) + .await; + } + + fn build_tx_segments( + packet_len: usize, + num_segments: usize, + tx_metadata: net_backend::TxMetadata, + ) -> (Vec, Vec) { + let data_to_send = (0..packet_len).map(|v| v as u8).collect::>(); + + let mut tx_segments = Vec::new(); + let segment_len = packet_len / num_segments; + assert_eq!(packet_len % num_segments, 0); + assert_eq!(data_to_send.len(), packet_len); + + tx_segments.push(TxSegment { + ty: net_backend::TxSegmentType::Head(tx_metadata.clone()), + gpa: 0, + len: segment_len as u32, + }); + + for j in 0..(num_segments - 1) { + let gpa = (j + 1) * segment_len; + tx_segments.push(TxSegment { + ty: net_backend::TxSegmentType::Tail, + gpa: gpa as u64, + len: segment_len as u32, + }); + } + + assert_eq!(tx_segments.len(), num_segments); + (data_to_send, tx_segments) + } + + async fn test_endpoint( + driver: DefaultDriver, + dma_mode: GuestDmaMode, + packet_len: usize, + tx_segments: Vec, + data_to_send: Vec, + expected_num_received_packets: usize, + ) -> QueueStats { + let tx_id = 1; let pages = 256; // 1MB let allow_dma = dma_mode == GuestDmaMode::DirectDma; let mem: DeviceTestMemory = DeviceTestMemory::new(pages * 2, allow_dma, "test_endpoint"); @@ -1614,62 +1707,53 @@ mod tests { .await .unwrap(); - for i in 0..1000 { - let sent_data = (0..packet_len).map(|v| (i + v) as u8).collect::>(); - payload_mem.write_at(0, &sent_data).unwrap(); - - let mut segments = Vec::new(); - let segment_len = packet_len / num_segments; - assert!(packet_len.is_multiple_of(num_segments)); - assert!(sent_data.len() == packet_len); - segments.push(TxSegment { - ty: net_backend::TxSegmentType::Head(net_backend::TxMetadata { - id: TxId(1), - segment_count: num_segments, - len: sent_data.len(), - ..Default::default() - }), - gpa: 0, - len: segment_len as u32, - }); - - for j in 0..(num_segments - 1) { - let gpa = (j + 1) * segment_len; - segments.push(TxSegment { - ty: net_backend::TxSegmentType::Tail, - gpa: gpa as u64, - len: segment_len as u32, - }); - } - assert!(segments.len() == num_segments); - - queues[0].tx_avail(segments.as_slice()).unwrap(); - - let mut packets = [RxId(0); 2]; - let mut done = [TxId(0); 2]; - let mut done_n = 0; - let mut packets_n = 0; - while done_n == 0 || packets_n == 0 { - poll_fn(|cx| queues[0].poll_ready(cx)).await; - packets_n += queues[0].rx_poll(&mut packets[packets_n..]).unwrap(); - done_n += queues[0].tx_poll(&mut done[done_n..]).unwrap(); + payload_mem.write_at(0, &data_to_send).unwrap(); + + queues[0].tx_avail(tx_segments.as_slice()).unwrap(); + + // Poll for completion + let mut rx_packets = [RxId(0); 2]; + let mut rx_packets_n = 0; + let mut tx_done = [TxId(0); 2]; + let mut tx_done_n = 0; + while rx_packets_n == 0 { + poll_fn(|cx| queues[0].poll_ready(cx)).await; + rx_packets_n += queues[0].rx_poll(&mut rx_packets[rx_packets_n..]).unwrap(); + // GDMA Errors generate a TryReturn error, ignored here. + tx_done_n += queues[0].tx_poll(&mut tx_done[tx_done_n..]).unwrap_or(0); + if expected_num_received_packets == 0 { + break; } - assert_eq!(packets_n, 1); - let rx_id = packets[0]; - - let mut received_data = vec![0; packet_len]; - payload_mem - .read_at(2048 * rx_id.0 as u64, &mut received_data) - .unwrap(); - assert!(received_data.len() == packet_len); - assert_eq!(&received_data[..], sent_data, "{i} {:?}", rx_id); - assert_eq!(done_n, 1); - assert_eq!(done[0].0, 1); - queues[0].rx_avail(&[rx_id]); } + assert_eq!(rx_packets_n, expected_num_received_packets); + + if expected_num_received_packets == 0 { + // If no packets were received, exit. + let stats = get_queue_stats(queues[0].queue_stats()); + drop(queues); + endpoint.stop().await; + return stats; + } + + // Check tx + assert_eq!(tx_done_n, 1); + assert_eq!(tx_done[0].0, tx_id); + + // Check rx + assert_eq!(rx_packets[0].0, 1); + let rx_id = rx_packets[0]; + let mut received_data = vec![0; packet_len]; + payload_mem + .read_at(2048 * rx_id.0 as u64, &mut received_data) + .unwrap(); + assert_eq!(received_data.len(), packet_len); + assert_eq!(&received_data[..], data_to_send, "{:?}", rx_id); + + let stats = get_queue_stats(queues[0].queue_stats()); drop(queues); endpoint.stop().await; + stats } #[async_test] @@ -1702,4 +1786,77 @@ mod tests { let thing = ManaDevice::new(&driver, device, 1, 1).await.unwrap(); let _ = thing.new_vport(0, None, &dev_config).await.unwrap(); } + + #[async_test] + async fn test_valid_packet(driver: DefaultDriver) { + let tx_id = 1; + let expected_num_received_packets = 1; + let num_segments = 1; + let packet_len = 1138; + let metadata = net_backend::TxMetadata { + id: TxId(tx_id), + segment_count: num_segments, + len: packet_len, + ..Default::default() + }; + + let (data_to_send, tx_segments) = build_tx_segments(packet_len, num_segments, metadata); + + let stats = test_endpoint( + driver, + GuestDmaMode::DirectDma, + packet_len, + tx_segments, + data_to_send, + expected_num_received_packets, + ) + .await; + + assert_eq!(stats.tx_packets.get(), 1, "tx_packets increase"); + assert_eq!(stats.rx_packets.get(), 1, "rx_packets increase"); + assert_eq!(stats.tx_errors.get(), 0, "tx_errors remain the same"); + assert_eq!(stats.rx_errors.get(), 0, "rx_errors remain the same"); + } + + #[async_test] + async fn test_tx_error_handling(driver: DefaultDriver) { + let tx_id = 1; + let expected_num_received_packets = 0; + let num_segments = 1; + let packet_len = 1138; + // LSO Enabled, but sending insufficient number of segments. + let metadata = net_backend::TxMetadata { + id: TxId(tx_id), + segment_count: num_segments, + len: packet_len, + offload_tcp_segmentation: true, + ..Default::default() + }; + + let (data_to_send, tx_segments) = build_tx_segments(packet_len, num_segments, metadata); + + let stats = test_endpoint( + driver, + GuestDmaMode::DirectDma, + packet_len, + tx_segments, + data_to_send, + expected_num_received_packets, + ) + .await; + + assert_eq!(stats.tx_errors.get(), 1, "tx_errors increase"); + assert_eq!(stats.tx_packets.get(), 0, "tx_packets stay the same"); + } + + fn get_queue_stats(queue_stats: Option<&dyn net_backend::BackendQueueStats>) -> QueueStats { + let queue_stats = queue_stats.unwrap(); + QueueStats { + rx_errors: queue_stats.rx_errors(), + tx_errors: queue_stats.tx_errors(), + rx_packets: queue_stats.rx_packets(), + tx_packets: queue_stats.tx_packets(), + ..Default::default() + } + } }