diff --git a/examples/readme.rs b/examples/readme.rs index 56db6a43..e999c7aa 100644 --- a/examples/readme.rs +++ b/examples/readme.rs @@ -1,3 +1,4 @@ +use io_uring::cqueue::EntryMarker; use io_uring::{opcode, types, IoUring}; use std::os::unix::io::AsRawFd; use std::{fs, io}; @@ -16,7 +17,7 @@ fn main() -> io::Result<()> { // that the entry pushed into submission queue is valid (e.g. fd, buffer). unsafe { ring.submission() - .push(&read_e) + .push(read_e) .expect("submission queue is full"); } diff --git a/examples/tcp_echo.rs b/examples/tcp_echo.rs index 6c6cf533..a57f5881 100644 --- a/examples/tcp_echo.rs +++ b/examples/tcp_echo.rs @@ -3,6 +3,7 @@ use std::net::TcpListener; use std::os::unix::io::{AsRawFd, RawFd}; use std::{io, ptr}; +use io_uring::cqueue::EntryMarker; use io_uring::{opcode, squeue, types, IoUring, SubmissionQueue}; use slab::Slab; @@ -42,7 +43,7 @@ impl AcceptCount { pub fn push_to(&mut self, sq: &mut SubmissionQueue<'_>) { while self.count > 0 { unsafe { - match sq.push(&self.entry) { + match sq.push(self.entry.clone()) { Ok(_) => self.count -= 1, Err(_) => break, } @@ -91,7 +92,7 @@ fn main() -> anyhow::Result<()> { match backlog.pop_front() { Some(sqe) => unsafe { - let _ = sq.push(&sqe); + let _ = sq.push(sqe); }, None => break, } @@ -127,7 +128,7 @@ fn main() -> anyhow::Result<()> { .user_data(poll_token as _); unsafe { - if sq.push(&poll_e).is_err() { + if sq.push(poll_e.clone()).is_err() { backlog.push_back(poll_e); } } @@ -150,7 +151,7 @@ fn main() -> anyhow::Result<()> { .user_data(token_index as _); unsafe { - if sq.push(&read_e).is_err() { + if sq.push(read_e.clone()).is_err() { backlog.push_back(read_e); } } @@ -181,7 +182,7 @@ fn main() -> anyhow::Result<()> { .user_data(token_index as _); unsafe { - if sq.push(&write_e).is_err() { + if sq.push(write_e.clone()).is_err() { backlog.push_back(write_e); } } @@ -222,7 +223,7 @@ fn main() -> anyhow::Result<()> { }; unsafe { - if sq.push(&entry).is_err() { + if sq.push(entry.clone()).is_err() { backlog.push_back(entry); } } diff --git a/io-uring-bench/src/iovec.rs b/io-uring-bench/src/iovec.rs index ff6174ba..a44c935a 100644 --- a/io-uring-bench/src/iovec.rs +++ b/io-uring-bench/src/iovec.rs @@ -36,7 +36,7 @@ fn bench_iovec(c: &mut Criterion) { unsafe { ring.submission() - .push(&entry.build()) + .push(entry.build()) .expect("queue is full"); } @@ -69,12 +69,12 @@ fn bench_iovec(c: &mut Criterion) { unsafe { let mut queue = ring.submission(); queue - .push(&entry.build().flags(squeue::Flags::IO_LINK)) + .push(entry.build().flags(squeue::Flags::IO_LINK)) .expect("queue is full"); for _ in 0..4 { let entry = opcode::Nop::new().build(); queue - .push(&entry.flags(squeue::Flags::IO_LINK)) + .push(entry.flags(squeue::Flags::IO_LINK)) .expect("queue is full"); } } @@ -97,7 +97,7 @@ fn bench_iovec(c: &mut Criterion) { unsafe { queue - .push(&entry.build().flags(squeue::Flags::IO_LINK)) + .push(entry.build().flags(squeue::Flags::IO_LINK)) .expect("queue is full"); } } @@ -121,7 +121,7 @@ fn bench_iovec(c: &mut Criterion) { unsafe { ring.submission() - .push(&entry.build()) + .push(entry.build()) .expect("queue is full"); } diff --git a/io-uring-bench/src/nop.rs b/io-uring-bench/src/nop.rs index 8eebaf31..1eb139a7 100644 --- a/io-uring-bench/src/nop.rs +++ b/io-uring-bench/src/nop.rs @@ -25,7 +25,7 @@ fn bench_normal(c: &mut Criterion) { let mut sq = io_uring.submission(); while queue.want() { unsafe { - match sq.push(&black_box(opcode::Nop::new()).build()) { + match sq.push(black_box(opcode::Nop::new()).build()) { Ok(_) => queue.pop(), Err(_) => break, } diff --git a/io-uring-test/src/tests/cancel.rs b/io-uring-test/src/tests/cancel.rs index c762f384..29d5a0b0 100644 --- a/io-uring-test/src/tests/cancel.rs +++ b/io-uring-test/src/tests/cancel.rs @@ -1,4 +1,5 @@ use crate::Test; +use io_uring::cqueue::EntryMarker; use io_uring::types::CancelBuilder; use io_uring::{cqueue, opcode, squeue, types, IoUring}; use std::fs::File; @@ -29,7 +30,7 @@ pub fn test_async_cancel_user_data( timeout_e.user_data(2004).into(), cancel_e.user_data(2005).into(), ]; - for sqe in &entries { + for sqe in entries.clone() { unsafe { ring.submission().push(sqe).expect("queue is full"); } @@ -179,7 +180,7 @@ pub fn test_async_cancel_fd( poll_e.user_data(2003).into(), cancel_e.user_data(2004).into(), ]; - for sqe in &entries { + for sqe in entries.clone() { unsafe { ring.submission().push(sqe).expect("queue is full"); } @@ -229,7 +230,7 @@ pub fn test_async_cancel_fd_all( poll_e.user_data(2004).into(), cancel_e.user_data(2005).into(), ]; - for sqe in &entries { + for sqe in entries.clone() { unsafe { ring.submission().push(sqe).expect("queue is full"); } diff --git a/io-uring-test/src/tests/fs.rs b/io-uring-test/src/tests/fs.rs index ad12901a..7707ee03 100644 --- a/io-uring-test/src/tests/fs.rs +++ b/io-uring-test/src/tests/fs.rs @@ -1,5 +1,6 @@ use crate::utils; use crate::Test; +use io_uring::cqueue::EntryMarker; use io_uring::{cqueue, opcode, squeue, types, IoUring}; use std::ffi::CString; use std::fs; @@ -68,7 +69,7 @@ pub fn test_file_fsync( unsafe { ring.submission() - .push(&fsync_e.build().user_data(0x03).into()) + .push(fsync_e.build().user_data(0x03).into()) .expect("queue is full"); } @@ -106,7 +107,7 @@ pub fn test_file_fsync_file_range( unsafe { ring.submission() - .push(&falloc_e.build().user_data(0x10).into()) + .push(falloc_e.build().user_data(0x10).into()) .expect("queue is full"); } @@ -178,7 +179,7 @@ pub fn test_file_openat2( unsafe { ring.submission() - .push(&open_e.build().user_data(0x11).into()) + .push(open_e.build().user_data(0x11).into()) .expect("queue is full"); } @@ -238,7 +239,7 @@ pub fn test_file_openat2_close_file_index( unsafe { ring.submission() - .push(&close_e.build().user_data(0x12).into()) + .push(close_e.build().user_data(0x12).into()) .expect("queue is full"); } @@ -533,7 +534,7 @@ pub fn test_file_cur_pos( .into(); unsafe { - ring.submission().push(&write_e).expect("queue is full"); + ring.submission().push(write_e).expect("queue is full"); } ring.submit_and_wait(1)?; @@ -545,7 +546,7 @@ pub fn test_file_cur_pos( .into(); unsafe { - ring.submission().push(&write_e).expect("queue is full"); + ring.submission().push(write_e).expect("queue is full"); } ring.submit_and_wait(2)?; @@ -554,7 +555,7 @@ pub fn test_file_cur_pos( unsafe { ring.submission() - .push(&read_e.build().user_data(0x03).into()) + .push(read_e.build().user_data(0x03).into()) .expect("queue is full"); } @@ -605,7 +606,7 @@ pub fn test_statx( .into(); unsafe { - ring.submission().push(&statx_e).expect("queue is full"); + ring.submission().push(statx_e).expect("queue is full"); } ring.submit_and_wait(1)?; @@ -647,7 +648,7 @@ pub fn test_statx( .into(); unsafe { - ring.submission().push(&statx_e).expect("queue is full"); + ring.submission().push(statx_e).expect("queue is full"); } ring.submit_and_wait(1)?; @@ -700,7 +701,7 @@ pub fn test_file_direct_write_read( unsafe { ring.submission() - .push(&splice_e.build().user_data(0x33).into()) + .push(splice_e.build().user_data(0x33).into()) .expect("queue is full"); } @@ -835,7 +836,7 @@ pub fn test_ftruncate( unsafe { ring.submission() - .push(&ftruncate_e.build().user_data(0x33).into()) + .push(ftruncate_e.build().user_data(0x33).into()) .expect("queue is full"); } @@ -855,7 +856,7 @@ pub fn test_ftruncate( unsafe { ring.submission() - .push(&ftruncate_e.build().user_data(0x34).into()) + .push(ftruncate_e.build().user_data(0x34).into()) .expect("queue is full"); } @@ -904,7 +905,7 @@ pub fn test_fixed_fd_install( let read_e = opcode::Read::new(fd, output.as_mut_ptr(), output.len() as _); unsafe { ring.submission() - .push(&read_e.build().user_data(0x01).into()) + .push(read_e.build().user_data(0x01).into()) .expect("queue is full"); } @@ -919,7 +920,7 @@ pub fn test_fixed_fd_install( unsafe { ring.submission() - .push(&fixed_fd_install_e.build().user_data(0x02).into()) + .push(fixed_fd_install_e.build().user_data(0x02).into()) .expect("queue is full"); } diff --git a/io-uring-test/src/tests/futex.rs b/io-uring-test/src/tests/futex.rs index d0253914..d632ca2c 100644 --- a/io-uring-test/src/tests/futex.rs +++ b/io-uring-test/src/tests/futex.rs @@ -1,4 +1,5 @@ use crate::Test; +use io_uring::cqueue::EntryMarker; use io_uring::types::FutexWaitV; use io_uring::{cqueue, opcode, squeue, IoUring}; use std::sync::atomic::{AtomicU32, Ordering}; @@ -60,7 +61,7 @@ pub fn test_futex_wait( unsafe { let mut queue = ring.submission(); queue - .push(&futex_wait_e.build().user_data(USER_DATA).into()) + .push(futex_wait_e.build().user_data(USER_DATA).into()) .expect("queue is full"); } @@ -122,7 +123,7 @@ pub fn test_futex_wake( unsafe { let mut queue = ring.submission(); queue - .push(&futex_wake_e.build().user_data(USER_DATA).into()) + .push(futex_wake_e.build().user_data(USER_DATA).into()) .expect("queue is full"); } ring.submit_and_wait(1)?; @@ -165,7 +166,7 @@ pub fn test_futex_waitv( unsafe { let mut queue = ring.submission(); queue - .push(&futex_waitv_e.build().user_data(USER_DATA).into()) + .push(futex_waitv_e.build().user_data(USER_DATA).into()) .expect("queue is full"); } diff --git a/io-uring-test/src/tests/net.rs b/io-uring-test/src/tests/net.rs index a8aef6be..5245a4dd 100644 --- a/io-uring-test/src/tests/net.rs +++ b/io-uring-test/src/tests/net.rs @@ -1,6 +1,7 @@ use crate::tests::register_buf_ring; use crate::utils; use crate::Test; +use io_uring::cqueue::EntryMarker; use io_uring::squeue::Flags; use io_uring::types::{BufRingEntry, Fd}; use io_uring::{cqueue, opcode, squeue, types, IoUring}; @@ -98,9 +99,9 @@ pub fn test_tcp_send_recv( .user_data(0x01) .flags(squeue::Flags::IO_LINK) .into(); - queue.push(&send_e).expect("queue is full"); + queue.push(send_e).expect("queue is full"); queue - .push(&recv_e.build().user_data(0x02).into()) + .push(recv_e.build().user_data(0x02).into()) .expect("queue is full"); } @@ -160,7 +161,7 @@ pub fn test_tcp_send_bundle( unsafe { let mut queue = ring.submission(); let send_e = send_e.build().user_data(0x01).into(); - queue.push(&send_e).expect("queue is full"); + queue.push(send_e).expect("queue is full"); } ring.submit_and_wait(1)?; @@ -213,9 +214,9 @@ pub fn test_tcp_zero_copy_send_recv( let mut queue = ring.submission(); queue .push( - &sendmsg_e + sendmsg_e .build() .user_data(0x01) .flags(squeue::Flags::IO_LINK) @@ -397,7 +398,7 @@ pub fn test_tcp_sendmsg_recvmsg( ) .expect("queue is full"); queue - .push(&recvmsg_e.build().user_data(0x02).into()) + .push(recvmsg_e.build().user_data(0x02).into()) .expect("queue is full"); } @@ -474,7 +475,7 @@ pub fn test_tcp_zero_copy_sendmsg_recvmsg( unsafe { ring.submission() - .push(&accept_e.build().user_data(0x0e).into()) + .push(accept_e.build().user_data(0x0e).into()) .expect("queue is full"); } @@ -593,7 +594,7 @@ pub fn test_tcp_accept_file_index( unsafe { ring.submission() - .push(&accept_e.build().user_data(2002).into()) + .push(accept_e.build().user_data(2002).into()) .expect("queue is full"); } @@ -668,7 +669,7 @@ pub fn test_tcp_accept_multi( unsafe { ring.submission() - .push(&cancel_e.build().user_data(2003).into()) + .push(cancel_e.build().user_data(2003).into()) .expect("queue is full"); } @@ -724,7 +725,7 @@ pub fn test_tcp_accept_multi_file_index( unsafe { ring.submission() - .push(&connect_e.build().user_data(0x0f).into()) + .push(connect_e.build().user_data(0x0f).into()) .expect("queue is full"); } @@ -850,7 +851,7 @@ pub fn test_tcp_buffer_select( unsafe { ring.submission() - .push(&provide_bufs_e.build().user_data(0x21).into()) + .push(provide_bufs_e.build().user_data(0x21).into()) .expect("queue is full"); } @@ -872,7 +873,7 @@ pub fn test_tcp_buffer_select( .into(); unsafe { - ring.submission().push(&recv_e).expect("queue is full"); + ring.submission().push(recv_e).expect("queue is full"); } ring.submit_and_wait(1)?; @@ -892,7 +893,7 @@ pub fn test_tcp_buffer_select( .into(); unsafe { - ring.submission().push(&recv_e).expect("queue is full"); + ring.submission().push(recv_e).expect("queue is full"); } ring.submit_and_wait(1)?; @@ -908,7 +909,7 @@ pub fn test_tcp_buffer_select( unsafe { ring.submission() - .push(&provide_bufs_e.build().user_data(0x24).into()) + .push(provide_bufs_e.build().user_data(0x24).into()) .expect("queue is full"); } @@ -927,7 +928,7 @@ pub fn test_tcp_buffer_select( .into(); unsafe { - ring.submission().push(&recv_e).expect("queue is full"); + ring.submission().push(recv_e).expect("queue is full"); } ring.submit_and_wait(1)?; @@ -949,7 +950,7 @@ pub fn test_tcp_buffer_select( unsafe { ring.submission() - .push(&remove_bufs_e.build().user_data(0x26).into()) + .push(remove_bufs_e.build().user_data(0x26).into()) .expect("queue is full"); } @@ -964,7 +965,7 @@ pub fn test_tcp_buffer_select( unsafe { ring.submission() - .push(&remove_bufs_e.build().user_data(0x27).into()) + .push(remove_bufs_e.build().user_data(0x27).into()) .expect("queue is full"); } @@ -1006,7 +1007,7 @@ pub fn test_tcp_buffer_select_recvmsg( unsafe { ring.submission() - .push(&provide_bufs_e.build().user_data(0x21).into()) + .push(provide_bufs_e.build().user_data(0x21).into()) .expect("queue is full"); } @@ -1195,7 +1196,7 @@ pub fn test_tcp_recv_multi( .into(); unsafe { - ring.submission().push(&recv_e).expect("queue is full"); + ring.submission().push(recv_e).expect("queue is full"); } ring.submit_and_wait(3)?; @@ -1267,7 +1268,7 @@ pub fn test_tcp_recv_bundle( .into(); unsafe { - ring.submission().push(&recv_e).expect("queue is full"); + ring.submission().push(recv_e).expect("queue is full"); } ring.submit_and_wait(1)?; @@ -1340,7 +1341,7 @@ pub fn test_tcp_recv_multi_bundle( unsafe { ring.submission() - .push(&shutdown_e.build().user_data(0x28).into()) + .push(shutdown_e.build().user_data(0x28).into()) .expect("queue is full"); } @@ -1446,7 +1447,7 @@ pub fn test_tcp_shutdown( unsafe { ring.submission() - .push(&write_e.build().into()) + .push(write_e.build().into()) .expect("queue is full"); } @@ -1485,7 +1486,7 @@ pub fn test_socket( ); unsafe { ring.submission() - .push(&socket_fd_op.build().user_data(42).into()) + .push(socket_fd_op.build().user_data(42).into()) .expect("queue is full"); } ring.submit_and_wait(1)?; @@ -1501,7 +1502,7 @@ pub fn test_socket( // Try a setsockopt. { let mut optval: libc::c_int = 0; - let mut optval_size: libc::socklen_t = std::mem::size_of_val(&optval) as libc::socklen_t; + let mut optval_size: libc::socklen_t = std::mem::size_of_val(optval) as libc::socklen_t; // Get value before. let ret = unsafe { libc::getsockopt( @@ -1521,13 +1522,13 @@ pub fn test_socket( io_uring::types::Fd(io_uring_socket.as_raw_fd()), libc::SOL_SOCKET as u32, libc::SO_REUSEADDR as u32, - &optval as *const _ as *const libc::c_void, - std::mem::size_of_val(&optval) as libc::socklen_t, + optval as *const _ as *const libc::c_void, + std::mem::size_of_val(optval) as libc::socklen_t, ) .build() .user_data(1234); unsafe { - ring.submission().push(&op.into()).expect("queue is full"); + ring.submission().push(op.into()).expect("queue is full"); } ring.submit_and_wait(1)?; let cqes: Vec = ring.completion().map(Into::into).collect(); @@ -1568,7 +1569,7 @@ pub fn test_socket( unsafe { ring.submission() .push( - &fixed_socket_op + fixed_socket_op .file_index(Some(dest_slot)) .build() .user_data(55) @@ -1625,7 +1626,7 @@ pub fn test_udp_recvmsg_multishot = ring.completion().map(Into::into).collect(); assert_eq!(cqes.len(), 1); @@ -1648,7 +1649,7 @@ pub fn test_udp_recvmsg_multishot = ring.completion().map(Into::into).collect(); assert_eq!(cqes.len(), 1); @@ -1799,7 +1800,7 @@ pub fn test_udp_recvmsg_multishot_trunc>(); - unsafe { ring.submission().push_multiple(&send_msgs)? }; + .collect::>(); + unsafe { ring.submission().push_multiple(send_msgs)? }; ring.submitter().submit().unwrap(); ring.submitter().submit_and_wait(4).unwrap(); @@ -1923,7 +1924,7 @@ pub fn test_udp_send_with_dest( .user_data(3) .into(); - unsafe { ring.submission().push_multiple(&[recv, send1, send2])? }; + unsafe { ring.submission().push_multiple([recv, send1, send2])? }; ring.submitter().submit_and_wait(3)?; let cqes: Vec = ring.completion().map(Into::into).collect(); @@ -1987,7 +1988,7 @@ pub fn test_udp_sendzc_with_dest .build() .user_data(11) .into(); - unsafe { ring.submission().push(&provide_bufs_e)? }; + unsafe { ring.submission().push(provide_bufs_e)? }; ring.submitter().submit_and_wait(1)?; let cqes: Vec = ring.completion().map(Into::into).collect(); assert_eq!(cqes.len(), 1); @@ -2000,7 +2001,7 @@ pub fn test_udp_sendzc_with_dest .build() .user_data(3) .into(); - unsafe { ring.submission().push(&recvmsg_e)? }; + unsafe { ring.submission().push(recvmsg_e)? }; ring.submitter().submit()?; let client_socket: socket2::Socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap().into(); @@ -2018,7 +2019,7 @@ pub fn test_udp_sendzc_with_dest .into(); unsafe { - ring.submission().push(&entry1)?; + ring.submission().push(entry1)?; } // Check the completion events for the two UDP messages, plus a trailing diff --git a/io-uring-test/src/tests/poll.rs b/io-uring-test/src/tests/poll.rs index 835dd3a3..330b2706 100644 --- a/io-uring-test/src/tests/poll.rs +++ b/io-uring-test/src/tests/poll.rs @@ -1,4 +1,5 @@ use crate::Test; +use io_uring::cqueue::EntryMarker; use io_uring::{cqueue, opcode, squeue, types, IoUring}; use std::fs::File; use std::io::{self, Write}; @@ -32,7 +33,7 @@ pub fn test_eventfd_poll( unsafe { let mut queue = ring.submission(); queue - .push(&poll_e.build().user_data(0x04).into()) + .push(poll_e.build().user_data(0x04).into()) .expect("queue is full"); } @@ -81,7 +82,7 @@ pub fn test_eventfd_poll_remove( unsafe { let mut queue = ring.submission(); queue - .push(&poll_e.build().user_data(0x05).into()) + .push(poll_e.build().user_data(0x05).into()) .expect("queue is full"); } @@ -94,7 +95,7 @@ pub fn test_eventfd_poll_remove( unsafe { let mut queue = ring.submission(); queue - .push(&poll_e.build().user_data(0x06).into()) + .push(poll_e.build().user_data(0x06).into()) .expect("queue is full"); } @@ -146,7 +147,7 @@ pub fn test_eventfd_poll_remove_failed( unsafe { let mut queue = ring.submission(); queue - .push(&poll_e.build().user_data(0x04).into()) + .push(poll_e.build().user_data(0x04).into()) .expect("queue is full"); } diff --git a/io-uring-test/src/tests/queue.rs b/io-uring-test/src/tests/queue.rs index 68dddad3..aede9916 100644 --- a/io-uring-test/src/tests/queue.rs +++ b/io-uring-test/src/tests/queue.rs @@ -1,5 +1,5 @@ use crate::Test; -use io_uring::{cqueue, opcode, squeue, types, IoUring}; +use io_uring::{cqueue::{self, EntryMarker}, opcode, squeue, types, IoUring}; pub fn test_nop( ring: &mut IoUring, @@ -15,7 +15,7 @@ pub fn test_nop( unsafe { let mut queue = ring.submission(); - queue.push(&nop_e).expect("queue is full"); + queue.push(nop_e).expect("queue is full"); } ring.submit_and_wait(1)?; @@ -44,21 +44,21 @@ pub fn test_batch( assert!(ring.completion().is_empty()); unsafe { - let sqes = vec![opcode::Nop::new().build().user_data(0x09).into(); 5]; + let mut sqes = vec![opcode::Nop::new().build().user_data(0x09).into(); 5]; let mut sq = ring.submission(); assert_eq!(sq.capacity(), 8); - sq.push_multiple(&sqes).unwrap(); + sq.push_multiple(sqes.clone()).unwrap(); assert_eq!(sq.len(), 5); - let ret = sq.push_multiple(&sqes); + let ret = sq.push_multiple(sqes.clone()); assert!(ret.is_err()); assert_eq!(sq.len(), 5); - sq.push_multiple(&sqes[..3]).unwrap(); + sq.push_multiple(sqes.drain(..3)).unwrap(); } ring.submit_and_wait(8)?; @@ -93,7 +93,7 @@ pub fn test_queue_split( for _ in 0..sq.capacity() { unsafe { - sq.push(&opcode::Nop::new().build().into()) + sq.push(opcode::Nop::new().build().into()) .expect("queue is full"); } } @@ -131,7 +131,7 @@ pub fn test_debug_print( let num_to_sub = sq.capacity(); for _ in 0..num_to_sub { unsafe { - sq.push(&opcode::Nop::new().build().user_data(0x42).into()) + sq.push(opcode::Nop::new().build().user_data(0x42).into()) .expect("queue is full"); } } @@ -175,7 +175,7 @@ pub fn test_msg_ring_data( unsafe { ring.submission() .push( - &opcode::MsgRingData::new(fd, result, user_data, None) + opcode::MsgRingData::new(fd, result, user_data, None) .build() .into(), ) @@ -242,7 +242,7 @@ pub fn test_msg_ring_send_fd( let dest_slot = types::DestinationSlot::try_from_slot_target(1).unwrap(); ring.submission() .push( - &opcode::MsgRingSendFd::new(fd, types::Fixed(0), dest_slot, 22) + opcode::MsgRingSendFd::new(fd, types::Fixed(0), dest_slot, 22) .build() .into(), ) @@ -276,7 +276,7 @@ pub fn test_msg_ring_send_fd( let dest_slot = types::DestinationSlot::try_from_slot_target(2).unwrap(); temp_ring .submission() - .push(&opcode::MsgRingSendFd::new(fd, types::Fixed(1), dest_slot, 44).build()) + .push(opcode::MsgRingSendFd::new(fd, types::Fixed(1), dest_slot, 44).build()) .expect("queue is full"); } temp_ring.submit_and_wait(1)?; diff --git a/io-uring-test/src/tests/register_buf_ring.rs b/io-uring-test/src/tests/register_buf_ring.rs index 9c91ebe3..e19c131b 100644 --- a/io-uring-test/src/tests/register_buf_ring.rs +++ b/io-uring-test/src/tests/register_buf_ring.rs @@ -2,6 +2,7 @@ // The entry point in this file can be found by searching for 'pub'. use crate::Test; +use io_uring::cqueue::EntryMarker; use io_uring::types; use io_uring::types::BufRingEntry; use io_uring::{cqueue, opcode, squeue, IoUring}; @@ -551,7 +552,7 @@ where .user_data(0x01) .flags(squeue::Flags::IO_LINK) .into(); - queue.push(&write_e).expect("queue is full"); + queue.push(write_e).expect("queue is full"); } assert_eq!(ring.submit_and_wait(1)?, 1); @@ -581,7 +582,7 @@ where let mut queue = ring.submission(); queue .push( - &read_e + read_e .build() .user_data(0x02) .flags(squeue::Flags::BUFFER_SELECT) diff --git a/io-uring-test/src/tests/register_buffers.rs b/io-uring-test/src/tests/register_buffers.rs index feeb3a8b..34503cdd 100644 --- a/io-uring-test/src/tests/register_buffers.rs +++ b/io-uring-test/src/tests/register_buffers.rs @@ -1,6 +1,6 @@ use crate::Test; use io_uring::{ - cqueue, + cqueue::{self, EntryMarker}, opcode::{ReadFixed, WriteFixed}, squeue, types::Fd, @@ -121,7 +121,7 @@ fn _test_register_buffers< // Safety: We have guaranteed that the buffers in `slices` are all valid for the entire // duration of this function unsafe { - submission_queue.push(&write_entry.into()).unwrap(); + submission_queue.push(write_entry.into()).unwrap(); } }); @@ -159,7 +159,7 @@ fn _test_register_buffers< // Safety: We have guaranteed that the buffers in `slices` are all valid for the entire // duration of this function unsafe { - submission_queue.push(&read_entry.into()).unwrap(); + submission_queue.push(read_entry.into()).unwrap(); } }); @@ -213,8 +213,8 @@ pub fn test_register_buffers_update .build() .user_data(USER_DATA_4); } - unsafe { ring.submission().push(&entry.into()).unwrap() }; + unsafe { ring.submission().push(entry.into()).unwrap() }; nr_submitted += ring.submit()?; } // 11 operations should have been submitted. @@ -147,7 +148,7 @@ pub fn test_register_sync_cancel_any( @@ -21,7 +21,7 @@ pub fn test_timeout( unsafe { let mut queue = ring.submission(); queue - .push(&timeout_e.build().user_data(0x09).into()) + .push(timeout_e.build().user_data(0x09).into()) .expect("queue is full"); } @@ -45,10 +45,10 @@ pub fn test_timeout( unsafe { let mut queue = ring.submission(); queue - .push(&timeout_e.build().user_data(0x0a).into()) + .push(timeout_e.build().user_data(0x0a).into()) .expect("queue is full"); queue - .push(&nop_e.build().user_data(0x0b).into()) + .push(nop_e.build().user_data(0x0b).into()) .expect("queue is full"); } @@ -98,10 +98,10 @@ pub fn test_timeout_count( unsafe { let mut queue = ring.submission(); queue - .push(&timeout_e.build().user_data(0x0c).into()) + .push(timeout_e.build().user_data(0x0c).into()) .expect("queue is full"); queue - .push(&nop_e.build().user_data(0x0d).into()) + .push(nop_e.build().user_data(0x0d).into()) .expect("queue is full"); } @@ -142,7 +142,7 @@ pub fn test_timeout_remove( unsafe { let mut queue = ring.submission(); queue - .push(&timeout_e.build().user_data(0x10).into()) + .push(timeout_e.build().user_data(0x10).into()) .expect("queue is full"); } @@ -155,7 +155,7 @@ pub fn test_timeout_remove( unsafe { let mut queue = ring.submission(); queue - .push(&timeout_e.build().user_data(0x11).into()) + .push(timeout_e.build().user_data(0x11).into()) .expect("queue is full"); } @@ -196,7 +196,7 @@ pub fn test_timeout_update( unsafe { let mut queue = ring.submission(); queue - .push(&timeout_e.build().user_data(0x10).into()) + .push(timeout_e.build().user_data(0x10).into()) .expect("queue is full"); } @@ -211,7 +211,7 @@ pub fn test_timeout_update( unsafe { let mut queue = ring.submission(); queue - .push(&timeout_e.build().user_data(0x11).into()) + .push(timeout_e.build().user_data(0x11).into()) .expect("queue is full"); } @@ -252,7 +252,7 @@ pub fn test_timeout_cancel( unsafe { let mut queue = ring.submission(); queue - .push(&timeout_e.build().user_data(0x10).into()) + .push(timeout_e.build().user_data(0x10).into()) .expect("queue is full"); } @@ -265,7 +265,7 @@ pub fn test_timeout_cancel( unsafe { let mut queue = ring.submission(); queue - .push(&timeout_e.build().user_data(0x11).into()) + .push(timeout_e.build().user_data(0x11).into()) .expect("queue is full"); } @@ -315,7 +315,7 @@ pub fn test_timeout_abs( unsafe { let mut queue = ring.submission(); queue - .push(&timeout_e.build().user_data(0x19).into()) + .push(timeout_e.build().user_data(0x19).into()) .expect("queue is full"); } @@ -365,7 +365,7 @@ pub fn test_timeout_submit_args( unsafe { ring.submission() - .push(&nop_e.build().user_data(0x1c).into()) + .push(nop_e.build().user_data(0x1c).into()) .expect("queue is full"); } diff --git a/io-uring-test/src/utils.rs b/io-uring-test/src/utils.rs index 9ac51e26..a357a87e 100644 --- a/io-uring-test/src/utils.rs +++ b/io-uring-test/src/utils.rs @@ -1,4 +1,4 @@ -use io_uring::{cqueue, opcode, squeue, types, IoUring}; +use io_uring::{cqueue::{self, EntryMarker}, opcode, squeue, types, IoUring}; use std::io::{IoSlice, IoSliceMut}; macro_rules! require { @@ -71,9 +71,9 @@ pub fn write_read( .user_data(0x01) .flags(squeue::Flags::IO_LINK) .into(); - queue.push(&write_e).expect("queue is full"); + queue.push(write_e).expect("queue is full"); queue - .push(&read_e.build().user_data(0x02).into()) + .push(read_e.build().user_data(0x02).into()) .expect("queue is full"); } @@ -115,9 +115,9 @@ pub fn writev_readv( .user_data(0x01) .flags(squeue::Flags::IO_LINK) .into(); - queue.push(&write_e).expect("queue is full"); + queue.push(write_e).expect("queue is full"); queue - .push(&read_e.build().user_data(0x02).into()) + .push(read_e.build().user_data(0x02).into()) .expect("queue is full"); } diff --git a/src/cqueue.rs b/src/cqueue.rs index cb6e4d3c..b1ff4e28 100644 --- a/src/cqueue.rs +++ b/src/cqueue.rs @@ -32,8 +32,12 @@ pub struct CompletionQueue<'a, E: EntryMarker = Entry> { /// A completion queue entry (CQE), representing a complete I/O operation. /// /// This is implemented for [`Entry`] and [`Entry32`]. -pub trait EntryMarker: Clone + Debug + Into + private::Sealed { +pub trait EntryMarker: Send + Sync + Clone + Debug + Into + private::Sealed { const BUILD_FLAGS: u32; + + fn user_data(&self) -> u64; + fn result(&self) -> i32; + fn flags(&self) -> u32; } /// A 16-byte completion queue entry (CQE), representing a complete I/O operation. @@ -149,7 +153,7 @@ impl CompletionQueue<'_, E> { } #[inline] - unsafe fn pop(&mut self) -> E { + pub unsafe fn pop(&mut self) -> E { let entry = &*self .queue .cqes @@ -191,18 +195,22 @@ impl ExactSizeIterator for CompletionQueue<'_, E> { } } -impl Entry { +impl private::Sealed for Entry {} + +impl EntryMarker for Entry { + const BUILD_FLAGS: u32 = 0; + /// The operation-specific result code. For example, for a [`Read`](crate::opcode::Read) /// operation this is equivalent to the return value of the `read(2)` system call. #[inline] - pub fn result(&self) -> i32 { + fn result(&self) -> i32 { self.0.res } /// The user data of the request, as set by /// [`Entry::user_data`](crate::squeue::Entry::user_data) on the submission queue event. #[inline] - pub fn user_data(&self) -> u64 { + fn user_data(&self) -> u64 { self.0.user_data } @@ -212,17 +220,11 @@ impl Entry { /// - Storing the selected buffer ID, if one was selected. See /// [`BUFFER_SELECT`](crate::squeue::Flags::BUFFER_SELECT) for more info. #[inline] - pub fn flags(&self) -> u32 { + fn flags(&self) -> u32 { self.0.flags } } -impl private::Sealed for Entry {} - -impl EntryMarker for Entry { - const BUILD_FLAGS: u32 = 0; -} - impl Clone for Entry { fn clone(&self) -> Entry { // io_uring_cqe doesn't implement Clone due to the 'big_cqe' incomplete array field. @@ -241,17 +243,29 @@ impl Debug for Entry { } impl Entry32 { + /// Additional data available in 32-byte completion queue entries (CQEs). + #[inline] + pub fn big_cqe(&self) -> &[u64; 2] { + &self.1 + } +} + +impl private::Sealed for Entry32 {} + +impl EntryMarker for Entry32 { + const BUILD_FLAGS: u32 = sys::IORING_SETUP_CQE32; + /// The operation-specific result code. For example, for a [`Read`](crate::opcode::Read) /// operation this is equivalent to the return value of the `read(2)` system call. #[inline] - pub fn result(&self) -> i32 { + fn result(&self) -> i32 { self.0 .0.res } /// The user data of the request, as set by /// [`Entry::user_data`](crate::squeue::Entry::user_data) on the submission queue event. #[inline] - pub fn user_data(&self) -> u64 { + fn user_data(&self) -> u64 { self.0 .0.user_data } @@ -261,21 +275,9 @@ impl Entry32 { /// - Storing the selected buffer ID, if one was selected. See /// [`BUFFER_SELECT`](crate::squeue::Flags::BUFFER_SELECT) for more info. #[inline] - pub fn flags(&self) -> u32 { + fn flags(&self) -> u32 { self.0 .0.flags } - - /// Additional data available in 32-byte completion queue entries (CQEs). - #[inline] - pub fn big_cqe(&self) -> &[u64; 2] { - &self.1 - } -} - -impl private::Sealed for Entry32 {} - -impl EntryMarker for Entry32 { - const BUILD_FLAGS: u32 = sys::IORING_SETUP_CQE32; } impl From for Entry { diff --git a/src/lib.rs b/src/lib.rs index ed5a73ea..c0b1307a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -90,7 +90,7 @@ impl IoUring { /// # Safety /// /// The caller must uphold that the file descriptor is owned and refers to a uring. The - /// `params` argument must be equivalent to the those previously filled in by the kernel when + /// `params` argument must be equivalent to those previously filled in by the kernel when /// the provided ring was created. pub unsafe fn from_fd(fd: RawFd, params: Parameters) -> io::Result { Self::with_fd_and_params(OwnedFd::from_raw_fd(fd), params.0) @@ -100,9 +100,9 @@ impl IoUring { impl IoUring { /// Create a [`Builder`] for an `IoUring` instance. /// - /// This allows for further customization than [`new`](Self::new). + /// This allows for more customization than [`new`](Self::new). /// - /// Unlike [`IoUring::new`], this function is available for any combination of submission + /// Unlike [`new`](Self::new), this function is available for any combination of submission /// queue entry (SQE) and completion queue entry (CQE) types. #[must_use] pub fn builder() -> Builder { @@ -215,7 +215,7 @@ impl IoUring { /// be used to operate on the different parts of the io_uring instance independently. /// /// If you use this method to obtain `sq` and `cq`, - /// please note that you need to `drop` or `sync` the queue before and after submit, + /// please note that you need to `drop` or `sync` the queue before and after each submit, /// otherwise the queue will not be updated. #[inline] pub fn split( @@ -402,7 +402,7 @@ impl Builder { /// thread interrupt. This can delay the application from making other progress. Setting this /// flag will hint to io_uring that it should defer work until an io_uring_enter(2) call with /// the IORING_ENTER_GETEVENTS flag set. This allows the application to request work to run - /// just just before it wants to process completions. This flag requires the + /// just before it wants to process completions. This flag requires the /// IORING_SETUP_SINGLE_ISSUER flag to be set, and also enforces that the call to /// io_uring_enter(2) is called from the same thread that submitted requests. Note that if this /// flag is set then it is the application's responsibility to periodically trigger work (for diff --git a/src/squeue.rs b/src/squeue.rs index 29b78f38..3a739494 100644 --- a/src/squeue.rs +++ b/src/squeue.rs @@ -31,8 +31,11 @@ pub struct SubmissionQueue<'a, E: EntryMarker = Entry> { /// A submission queue entry (SQE), representing a request for an I/O operation. /// /// This is implemented for [`Entry`] and [`Entry128`]. -pub trait EntryMarker: Clone + Debug + From + private::Sealed { +pub trait EntryMarker: Send + Sync + Clone + Debug + From + private::Sealed { const BUILD_FLAGS: u32; + + fn set_user_data(self, user_data: u64) -> Self; + fn get_user_data(&self) -> u64; } /// A 64-byte submission queue entry (SQE), representing a request for an I/O operation. @@ -273,7 +276,7 @@ impl SubmissionQueue<'_, E> { /// Developers must ensure that parameters of the entry (such as buffer) are valid and will /// be valid for the entire duration of the operation, otherwise it may cause memory problems. #[inline] - pub unsafe fn push(&mut self, entry: &E) -> Result<(), PushError> { + pub unsafe fn push(&mut self, entry: E) -> Result<(), PushError> { if !self.is_full() { self.push_unchecked(entry); Ok(()) @@ -291,12 +294,18 @@ impl SubmissionQueue<'_, E> { /// will be valid for the entire duration of the operation, otherwise it may cause memory /// problems. #[inline] - pub unsafe fn push_multiple(&mut self, entries: &[E]) -> Result<(), PushError> { - if self.capacity() - self.len() < entries.len() { + pub unsafe fn push_multiple(&mut self, entries: T) -> Result<(), PushError> + where + I: ExactSizeIterator, + T: IntoIterator, + { + let iter = entries.into_iter(); + + if self.capacity() - self.len() < iter.len() { return Err(PushError); } - for entry in entries { + for entry in iter { self.push_unchecked(entry); } @@ -304,11 +313,12 @@ impl SubmissionQueue<'_, E> { } #[inline] - unsafe fn push_unchecked(&mut self, entry: &E) { + pub unsafe fn push_unchecked(&mut self, entry: E) { *self .queue .sqes - .add((self.tail & self.queue.ring_mask) as usize) = entry.clone(); + .add((self.tail & self.queue.ring_mask) as usize) = entry; + // entry clone dropped self.tail = self.tail.wrapping_add(1); } } @@ -354,6 +364,14 @@ impl private::Sealed for Entry {} impl EntryMarker for Entry { const BUILD_FLAGS: u32 = 0; + + fn set_user_data(self, user_data: u64) -> Self { + self.user_data(user_data) + } + + fn get_user_data(&self) -> u64 { + self.get_user_data() + } } impl Clone for Entry { @@ -390,6 +408,13 @@ impl Entry128 { self } + + /// Get the previously application-supplied user data. + #[inline] + pub fn get_user_data(&self) -> u64 { + self.0.0.user_data + } + /// Set the personality of this event. You can obtain a personality using /// [`Submitter::register_personality`](crate::Submitter::register_personality). #[inline] @@ -403,6 +428,14 @@ impl private::Sealed for Entry128 {} impl EntryMarker for Entry128 { const BUILD_FLAGS: u32 = sys::IORING_SETUP_SQE128; + + fn set_user_data(self, user_data: u64) -> Self { + self.user_data(user_data) + } + + fn get_user_data(&self) -> u64 { + self.get_user_data() + } } impl From for Entry128 { diff --git a/src/submit.rs b/src/submit.rs index 70e6c942..6fc74674 100644 --- a/src/submit.rs +++ b/src/submit.rs @@ -80,7 +80,7 @@ impl<'a> Submitter<'a> { /// /// # Safety /// - /// This provides a raw interface so developer must ensure that parameters are correct. + /// This provides a raw interface so the developer must ensure that parameters are correct. pub unsafe fn enter( &self, to_submit: u32, @@ -271,9 +271,9 @@ impl<'a> Submitter<'a> { .map(drop) } - /// Registers an empty table of nr fixed buffers buffers. + /// Registers an empty table of `nr` fixed buffers. /// - /// These must be updated before use, using eg. + /// These must be updated before use; e.g. using /// [`register_buffers_update`](Self::register_buffers_update). /// /// See [`register_buffers`](Self::register_buffers) @@ -295,7 +295,7 @@ impl<'a> Submitter<'a> { .map(drop) } - /// Registers an empty file table of nr_files number of file descriptors. The sparse variant is + /// Registers an empty file table with `nr` file descriptors. The sparse variant is /// available in kernels 5.19 and later. /// /// Registering a file table is a prerequisite for using any request that @@ -320,7 +320,7 @@ impl<'a> Submitter<'a> { /// Register files for I/O. You can use the registered files with /// [`Fixed`](crate::types::Fixed). /// - /// Each fd may be -1, in which case it is considered "sparse", and can be filled in later with + /// An fd may be -1, in which case it is considered "sparse", and can be filled in later with /// [`register_files_update`](Self::register_files_update). /// /// Note that this will wait for the ring to idle; it will only return once all active requests @@ -384,7 +384,7 @@ impl<'a> Submitter<'a> { /// Fill in the given [`Probe`] with information about the opcodes supported by io_uring on the /// running kernel. /// - /// # Examples + /// # Example /// // This is marked no_run as it is only available from Linux 5.6+, however the latest Ubuntu (on // which CI runs) only has Linux 5.4. @@ -520,7 +520,7 @@ impl<'a> Submitter<'a> { .map(drop) } - /// Undoes a CPU mask previously set with register_iowq_aff + /// Undoes a CPU mask previously set with [register_iowq_aff](Self::register_iowq_aff) pub fn unregister_iowq_aff(&self) -> io::Result<()> { execute( self.fd.as_raw_fd(),