diff --git a/tokio/src/fs/file.rs b/tokio/src/fs/file.rs index 755b5eabd16..3579f11ba15 100644 --- a/tokio/src/fs/file.rs +++ b/tokio/src/fs/file.rs @@ -917,7 +917,9 @@ impl std::os::unix::io::AsFd for File { #[cfg(unix)] impl std::os::unix::io::FromRawFd for File { unsafe fn from_raw_fd(fd: std::os::unix::io::RawFd) -> Self { - StdFile::from_raw_fd(fd).into() + // Safety: exactly the same safety contract as + // `std::os::unix::io::FromRawFd::from_raw_fd`. + unsafe { StdFile::from_raw_fd(fd).into() } } } @@ -942,7 +944,9 @@ cfg_windows! { impl FromRawHandle for File { unsafe fn from_raw_handle(handle: RawHandle) -> Self { - StdFile::from_raw_handle(handle).into() + // Safety: exactly the same safety contract as + // `FromRawHandle::from_raw_handle`. + unsafe { StdFile::from_raw_handle(handle).into() } } } } diff --git a/tokio/src/io/poll_evented.rs b/tokio/src/io/poll_evented.rs index ab101c2975d..9f535db01ae 100644 --- a/tokio/src/io/poll_evented.rs +++ b/tokio/src/io/poll_evented.rs @@ -171,7 +171,7 @@ feature! { loop { let evt = ready!(self.registration.poll_read_ready(cx))?; - let b = &mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit] as *mut [u8]); + let b = unsafe { &mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit] as *mut [u8]) }; // used only when the cfgs below apply #[allow(unused_variables)] @@ -213,7 +213,7 @@ feature! { // Safety: We trust `TcpStream::read` to have filled up `n` bytes in the // buffer. - buf.assume_init(n); + unsafe { buf.assume_init(n) }; buf.advance(n); return Poll::Ready(Ok(())); }, diff --git a/tokio/src/io/read_buf.rs b/tokio/src/io/read_buf.rs index edef84851ec..da78c164b54 100644 --- a/tokio/src/io/read_buf.rs +++ b/tokio/src/io/read_buf.rs @@ -283,7 +283,9 @@ unsafe impl<'a> bytes::BufMut for ReadBuf<'a> { // SAFETY: The caller guarantees that at least `cnt` unfilled bytes have been initialized. unsafe fn advance_mut(&mut self, cnt: usize) { - self.assume_init(cnt); + unsafe { + self.assume_init(cnt); + } self.advance(cnt); } @@ -311,16 +313,32 @@ impl fmt::Debug for ReadBuf<'_> { } } +/// # Safety +/// +/// The caller must ensure that `slice` is fully initialized +/// and never writes uninitialized bytes to the returned slice. unsafe fn slice_to_uninit_mut(slice: &mut [u8]) -> &mut [MaybeUninit] { - &mut *(slice as *mut [u8] as *mut [MaybeUninit]) + // SAFETY: `MaybeUninit` has the same memory layout as u8, and the caller + // promises to not write uninitialized bytes to the returned slice. + unsafe { &mut *(slice as *mut [u8] as *mut [MaybeUninit]) } } +/// # Safety +/// +/// The caller must ensure that `slice` is fully initialized. // TODO: This could use `MaybeUninit::slice_assume_init` when it is stable. unsafe fn slice_assume_init(slice: &[MaybeUninit]) -> &[u8] { - &*(slice as *const [MaybeUninit] as *const [u8]) + // SAFETY: `MaybeUninit` has the same memory layout as u8, and the caller + // promises that `slice` is fully initialized. + unsafe { &*(slice as *const [MaybeUninit] as *const [u8]) } } +/// # Safety +/// +/// The caller must ensure that `slice` is fully initialized. // TODO: This could use `MaybeUninit::slice_assume_init_mut` when it is stable. unsafe fn slice_assume_init_mut(slice: &mut [MaybeUninit]) -> &mut [u8] { - &mut *(slice as *mut [MaybeUninit] as *mut [u8]) + // SAFETY: `MaybeUninit` has the same memory layout as `u8`, and the caller + // promises that `slice` is fully initialized. + unsafe { &mut *(slice as *mut [MaybeUninit] as *mut [u8]) } } diff --git a/tokio/src/lib.rs b/tokio/src/lib.rs index b24a7705935..83f0c0db950 100644 --- a/tokio/src/lib.rs +++ b/tokio/src/lib.rs @@ -10,7 +10,7 @@ rust_2018_idioms, unreachable_pub )] -#![deny(unused_must_use)] +#![deny(unused_must_use, unsafe_op_in_unsafe_fn)] #![doc(test( no_crate_inject, attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables)) diff --git a/tokio/src/loom/std/atomic_u16.rs b/tokio/src/loom/std/atomic_u16.rs index a9bdcb573c6..d39c3dfff5b 100644 --- a/tokio/src/loom/std/atomic_u16.rs +++ b/tokio/src/loom/std/atomic_u16.rs @@ -26,7 +26,7 @@ impl AtomicU16 { /// All mutations must have happened before the unsynchronized load. /// Additionally, there must be no concurrent mutations. pub(crate) unsafe fn unsync_load(&self) -> u16 { - core::ptr::read(self.inner.get() as *const u16) + unsafe { core::ptr::read(self.inner.get() as *const u16) } } } diff --git a/tokio/src/loom/std/atomic_u32.rs b/tokio/src/loom/std/atomic_u32.rs index 004bd9d75f7..f74f08167fb 100644 --- a/tokio/src/loom/std/atomic_u32.rs +++ b/tokio/src/loom/std/atomic_u32.rs @@ -26,7 +26,7 @@ impl AtomicU32 { /// All mutations must have happened before the unsynchronized load. /// Additionally, there must be no concurrent mutations. pub(crate) unsafe fn unsync_load(&self) -> u32 { - core::ptr::read(self.inner.get() as *const u32) + unsafe { core::ptr::read(self.inner.get() as *const u32) } } } diff --git a/tokio/src/loom/std/atomic_usize.rs b/tokio/src/loom/std/atomic_usize.rs index 0dee481b68e..a77d3eb785b 100644 --- a/tokio/src/loom/std/atomic_usize.rs +++ b/tokio/src/loom/std/atomic_usize.rs @@ -26,7 +26,7 @@ impl AtomicUsize { /// All mutations must have happened before the unsynchronized load. /// Additionally, there must be no concurrent mutations. pub(crate) unsafe fn unsync_load(&self) -> usize { - core::ptr::read(self.inner.get() as *const usize) + unsafe { core::ptr::read(self.inner.get() as *const usize) } } pub(crate) fn with_mut(&mut self, f: impl FnOnce(&mut usize) -> R) -> R { diff --git a/tokio/src/macros/addr_of.rs b/tokio/src/macros/addr_of.rs index 9d28158f200..0b03cc5fdab 100644 --- a/tokio/src/macros/addr_of.rs +++ b/tokio/src/macros/addr_of.rs @@ -11,11 +11,16 @@ macro_rules! generate_addr_of_methods { )*} ) => { impl<$($gen)*> $struct_name {$( + #[doc = "# Safety"] + #[doc = ""] + #[doc = "The `me` pointer must be valid."] $(#[$attrs])* $vis unsafe fn $fn_name(me: ::core::ptr::NonNull) -> ::core::ptr::NonNull<$field_type> { let me = me.as_ptr(); - let field = ::std::ptr::addr_of_mut!((*me) $(.$field_name)+ ); - ::core::ptr::NonNull::new_unchecked(field) + // safety: the caller guarantees that `me` is valid + let field = unsafe { ::std::ptr::addr_of_mut!((*me) $(.$field_name)+ ) }; + // safety: the field pointer is never null + unsafe { ::core::ptr::NonNull::new_unchecked(field) } } )*} }; diff --git a/tokio/src/net/tcp/socket.rs b/tokio/src/net/tcp/socket.rs index ee61ba0028a..a168154d4a3 100644 --- a/tokio/src/net/tcp/socket.rs +++ b/tokio/src/net/tcp/socket.rs @@ -836,7 +836,9 @@ cfg_unix! { /// The caller is responsible for ensuring that the socket is in /// non-blocking mode. unsafe fn from_raw_fd(fd: RawFd) -> TcpSocket { - let inner = socket2::Socket::from_raw_fd(fd); + // Safety: exactly the same safety requirements as the + // `FromRawFd::from_raw_fd` trait method. + let inner = unsafe { socket2::Socket::from_raw_fd(fd) }; TcpSocket { inner } } } @@ -875,7 +877,7 @@ cfg_windows! { /// The caller is responsible for ensuring that the socket is in /// non-blocking mode. unsafe fn from_raw_socket(socket: RawSocket) -> TcpSocket { - let inner = socket2::Socket::from_raw_socket(socket); + let inner = unsafe { socket2::Socket::from_raw_socket(socket) }; TcpSocket { inner } } } diff --git a/tokio/src/net/unix/socket.rs b/tokio/src/net/unix/socket.rs index cb383b09a59..47ada6042b3 100644 --- a/tokio/src/net/unix/socket.rs +++ b/tokio/src/net/unix/socket.rs @@ -259,7 +259,9 @@ impl AsFd for UnixSocket { impl FromRawFd for UnixSocket { unsafe fn from_raw_fd(fd: RawFd) -> UnixSocket { - let inner = socket2::Socket::from_raw_fd(fd); + // Safety: exactly the same safety requirements as the + // `FromRawFd::from_raw_fd` trait method. + let inner = unsafe { socket2::Socket::from_raw_fd(fd) }; UnixSocket { inner } } } diff --git a/tokio/src/net/windows/named_pipe.rs b/tokio/src/net/windows/named_pipe.rs index 0d51335aea6..17e793b4704 100644 --- a/tokio/src/net/windows/named_pipe.rs +++ b/tokio/src/net/windows/named_pipe.rs @@ -126,7 +126,7 @@ impl NamedPipeServer { /// [Tokio Runtime]: crate::runtime::Runtime /// [enabled I/O]: crate::runtime::Builder::enable_io pub unsafe fn from_raw_handle(handle: RawHandle) -> io::Result { - let named_pipe = mio_windows::NamedPipe::from_raw_handle(handle); + let named_pipe = unsafe { mio_windows::NamedPipe::from_raw_handle(handle) }; Ok(Self { io: PollEvented::new(named_pipe)?, @@ -999,7 +999,7 @@ impl NamedPipeClient { /// [Tokio Runtime]: crate::runtime::Runtime /// [enabled I/O]: crate::runtime::Builder::enable_io pub unsafe fn from_raw_handle(handle: RawHandle) -> io::Result { - let named_pipe = mio_windows::NamedPipe::from_raw_handle(handle); + let named_pipe = unsafe { mio_windows::NamedPipe::from_raw_handle(handle) }; Ok(Self { io: PollEvented::new(named_pipe)?, @@ -2344,22 +2344,24 @@ impl ServerOptions { mode }; - let h = windows_sys::CreateNamedPipeW( - addr.as_ptr(), - open_mode, - pipe_mode, - self.max_instances, - self.out_buffer_size, - self.in_buffer_size, - self.default_timeout, - attrs as *mut _, - ); + let h = unsafe { + windows_sys::CreateNamedPipeW( + addr.as_ptr(), + open_mode, + pipe_mode, + self.max_instances, + self.out_buffer_size, + self.in_buffer_size, + self.default_timeout, + attrs as *mut _, + ) + }; if h == windows_sys::INVALID_HANDLE_VALUE { return Err(io::Error::last_os_error()); } - NamedPipeServer::from_raw_handle(h as _) + unsafe { NamedPipeServer::from_raw_handle(h as _) } } } @@ -2550,15 +2552,17 @@ impl ClientOptions { // we have access to windows_sys it ultimately doesn't hurt to use // `CreateFile` explicitly since it allows the use of our already // well-structured wide `addr` to pass into CreateFileW. - let h = windows_sys::CreateFileW( - addr.as_ptr(), - desired_access, - 0, - attrs as *mut _, - windows_sys::OPEN_EXISTING, - self.get_flags(), - null_mut(), - ); + let h = unsafe { + windows_sys::CreateFileW( + addr.as_ptr(), + desired_access, + 0, + attrs as *mut _, + windows_sys::OPEN_EXISTING, + self.get_flags(), + null_mut(), + ) + }; if h == windows_sys::INVALID_HANDLE_VALUE { return Err(io::Error::last_os_error()); @@ -2566,15 +2570,16 @@ impl ClientOptions { if matches!(self.pipe_mode, PipeMode::Message) { let mode = windows_sys::PIPE_READMODE_MESSAGE; - let result = - windows_sys::SetNamedPipeHandleState(h, &mode, ptr::null_mut(), ptr::null_mut()); + let result = unsafe { + windows_sys::SetNamedPipeHandleState(h, &mode, ptr::null_mut(), ptr::null_mut()) + }; if result == 0 { return Err(io::Error::last_os_error()); } } - NamedPipeClient::from_raw_handle(h as _) + unsafe { NamedPipeClient::from_raw_handle(h as _) } } fn get_flags(&self) -> u32 { @@ -2659,13 +2664,15 @@ unsafe fn named_pipe_info(handle: RawHandle) -> io::Result { let mut in_buffer_size = 0; let mut max_instances = 0; - let result = windows_sys::GetNamedPipeInfo( - handle as _, - &mut flags, - &mut out_buffer_size, - &mut in_buffer_size, - &mut max_instances, - ); + let result = unsafe { + windows_sys::GetNamedPipeInfo( + handle as _, + &mut flags, + &mut out_buffer_size, + &mut in_buffer_size, + &mut max_instances, + ) + }; if result == 0 { return Err(io::Error::last_os_error()); diff --git a/tokio/src/process/mod.rs b/tokio/src/process/mod.rs index 4e1bd2a700e..aa57a9d408d 100644 --- a/tokio/src/process/mod.rs +++ b/tokio/src/process/mod.rs @@ -750,7 +750,7 @@ impl Command { where F: FnMut() -> io::Result<()> + Send + Sync + 'static, { - self.std.pre_exec(f); + unsafe { self.std.pre_exec(f) }; self } diff --git a/tokio/src/process/windows.rs b/tokio/src/process/windows.rs index ba00b7c84b8..e5c49a15ccc 100644 --- a/tokio/src/process/windows.rs +++ b/tokio/src/process/windows.rs @@ -162,7 +162,7 @@ impl Drop for Waiting { } unsafe extern "system" fn callback(ptr: *mut std::ffi::c_void, _timer_fired: bool) { - let complete = &mut *(ptr as *mut Option>); + let complete = unsafe { &mut *(ptr as *mut Option>) }; let _ = complete.take().unwrap().send(()); } diff --git a/tokio/src/runtime/handle.rs b/tokio/src/runtime/handle.rs index 756151690cf..5030adc6d92 100644 --- a/tokio/src/runtime/handle.rs +++ b/tokio/src/runtime/handle.rs @@ -392,6 +392,10 @@ impl Handle { #[track_caller] #[allow(dead_code)] + /// # Safety + /// + /// This must only be called in `LocalRuntime` if the runtime has been verified to be owned + /// by the current thread. pub(crate) unsafe fn spawn_local_named( &self, future: F, @@ -412,7 +416,7 @@ impl Handle { let future = super::task::trace::Trace::root(future); #[cfg(all(tokio_unstable, feature = "tracing"))] let future = crate::util::trace::task(future, "task", meta, id.as_u64()); - self.inner.spawn_local(future, id, meta.spawned_at) + unsafe { self.inner.spawn_local(future, id, meta.spawned_at) } } /// Returns the flavor of the current `Runtime`. diff --git a/tokio/src/runtime/io/registration_set.rs b/tokio/src/runtime/io/registration_set.rs index 21f6c873fb1..2796796de93 100644 --- a/tokio/src/runtime/io/registration_set.rs +++ b/tokio/src/runtime/io/registration_set.rs @@ -119,7 +119,8 @@ impl RegistrationSet { let io = unsafe { NonNull::new_unchecked(Arc::as_ptr(io).cast_mut()) }; super::EXPOSE_IO.unexpose_provenance(io.as_ptr()); - let _ = synced.registrations.remove(io); + // SAFETY: the caller guarantees that `io` is part of this list. + let _ = unsafe { synced.registrations.remove(io) }; } } @@ -141,6 +142,8 @@ unsafe impl linked_list::Link for Arc { unsafe fn pointers( target: NonNull, ) -> NonNull> { - NonNull::new_unchecked(target.as_ref().linked_list_pointers.get()) + // safety: `target.as_ref().linked_list_pointers` is a `UnsafeCell` that + // always returns a non-null pointer. + unsafe { NonNull::new_unchecked(target.as_ref().linked_list_pointers.get()) } } } diff --git a/tokio/src/runtime/io/scheduled_io.rs b/tokio/src/runtime/io/scheduled_io.rs index ce2dd84accb..62194ad2c69 100644 --- a/tokio/src/runtime/io/scheduled_io.rs +++ b/tokio/src/runtime/io/scheduled_io.rs @@ -417,7 +417,7 @@ unsafe impl linked_list::Link for Waiter { } unsafe fn pointers(target: NonNull) -> NonNull> { - Waiter::addr_of_pointers(target) + unsafe { Waiter::addr_of_pointers(target) } } } diff --git a/tokio/src/runtime/park.rs b/tokio/src/runtime/park.rs index 08d3e719bc4..d0ab07a6044 100644 --- a/tokio/src/runtime/park.rs +++ b/tokio/src/runtime/park.rs @@ -305,11 +305,15 @@ impl Inner { Arc::into_raw(this) as *const () } + /// # Safety + /// + /// The pointer must have been created by [`Self::into_raw`]. unsafe fn from_raw(ptr: *const ()) -> Arc { - Arc::from_raw(ptr as *const Inner) + unsafe { Arc::from_raw(ptr as *const Inner) } } } +// TODO: Is this really a unsafe function? unsafe fn unparker_to_raw_waker(unparker: Arc) -> RawWaker { RawWaker::new( Inner::into_raw(unparker), @@ -317,23 +321,39 @@ unsafe fn unparker_to_raw_waker(unparker: Arc) -> RawWaker { ) } +/// # Safety +/// +/// The pointer must have been created by [`Inner::into_raw`]. unsafe fn clone(raw: *const ()) -> RawWaker { - Arc::increment_strong_count(raw as *const Inner); - unparker_to_raw_waker(Inner::from_raw(raw)) + unsafe { + Arc::increment_strong_count(raw as *const Inner); + } + unsafe { unparker_to_raw_waker(Inner::from_raw(raw)) } } +/// # Safety +/// +/// The pointer must have been created by [`Inner::into_raw`]. unsafe fn drop_waker(raw: *const ()) { - drop(Inner::from_raw(raw)); + drop(unsafe { Inner::from_raw(raw) }); } +/// # Safety +/// +/// The pointer must have been created by [`Inner::into_raw`]. unsafe fn wake(raw: *const ()) { - let unparker = Inner::from_raw(raw); + let unparker = unsafe { Inner::from_raw(raw) }; unparker.unpark(); } +/// # Safety +/// +/// The pointer must have been created by [`Inner::into_raw`]. unsafe fn wake_by_ref(raw: *const ()) { let raw = raw as *const Inner; - (*raw).unpark(); + unsafe { + (*raw).unpark(); + } } #[cfg(loom)] diff --git a/tokio/src/runtime/scheduler/current_thread/mod.rs b/tokio/src/runtime/scheduler/current_thread/mod.rs index a05dbb96412..3b13cf6d9a4 100644 --- a/tokio/src/runtime/scheduler/current_thread/mod.rs +++ b/tokio/src/runtime/scheduler/current_thread/mod.rs @@ -474,6 +474,7 @@ impl Handle { /// Spawn a task which isn't safe to send across thread boundaries onto the runtime. /// /// # Safety + /// /// This should only be used when this is a `LocalRuntime` or in another case where the runtime /// provably cannot be driven from or moved to different threads from the one on which the task /// is spawned. @@ -488,10 +489,12 @@ impl Handle { F: crate::future::Future + 'static, F::Output: 'static, { - let (handle, notified) = me - .shared - .owned - .bind_local(future, me.clone(), id, spawned_at); + // Safety: the caller guarantees that the this is only called on a `LocalRuntime`. + let (handle, notified) = unsafe { + me.shared + .owned + .bind_local(future, me.clone(), id, spawned_at) + }; me.task_hooks.spawn(&TaskMeta { id, diff --git a/tokio/src/runtime/scheduler/inject/rt_multi_thread.rs b/tokio/src/runtime/scheduler/inject/rt_multi_thread.rs index 86bf1efb6fb..44f7b635941 100644 --- a/tokio/src/runtime/scheduler/inject/rt_multi_thread.rs +++ b/tokio/src/runtime/scheduler/inject/rt_multi_thread.rs @@ -55,13 +55,21 @@ impl Shared { // Now that the tasks are linked together, insert them into the // linked list. - self.push_batch_inner(shared, first, prev, counter); + // + // Safety: exactly the same safety requirements as `push_batch` method. + unsafe { + self.push_batch_inner(shared, first, prev, counter); + } } /// Inserts several tasks that have been linked together into the queue. /// /// The provided head and tail may be the same task. In this case, a /// single task is inserted. + /// + /// # Safety + /// + /// Must be called with the same `Synced` instance returned by `Inject::new` #[inline] unsafe fn push_batch_inner( &self, @@ -82,7 +90,8 @@ impl Shared { let mut curr = Some(batch_head); while let Some(task) = curr { - curr = task.get_queue_next(); + // Safety: exactly the same safety requirements as `push_batch_inner`. + curr = unsafe { task.get_queue_next() }; let _ = unsafe { task::Notified::::from_raw(task) }; } @@ -106,7 +115,7 @@ impl Shared { // // safety: All updates to the len atomic are guarded by the mutex. As // such, a non-atomic load followed by a store is safe. - let len = self.len.unsync_load(); + let len = unsafe { self.len.unsync_load() }; self.len.store(len + num, Release); } diff --git a/tokio/src/runtime/scheduler/inject/shared.rs b/tokio/src/runtime/scheduler/inject/shared.rs index 61504cf7c90..a73e7fb3448 100644 --- a/tokio/src/runtime/scheduler/inject/shared.rs +++ b/tokio/src/runtime/scheduler/inject/shared.rs @@ -71,7 +71,7 @@ impl Shared { } // safety: only mutated with the lock held - let len = self.len.unsync_load(); + let len = unsafe { self.len.unsync_load() }; let task = task.into_raw(); // The next pointer should already be null @@ -95,7 +95,7 @@ impl Shared { /// /// Must be called with the same `Synced` instance returned by `Inject::new` pub(crate) unsafe fn pop(&self, synced: &mut Synced) -> Option> { - self.pop_n(synced, 1).next() + unsafe { self.pop_n(synced, 1).next() } } /// Pop `n` values from the queue @@ -110,7 +110,7 @@ impl Shared { // safety: All updates to the len atomic are guarded by the mutex. As // such, a non-atomic load followed by a store is safe. - let len = self.len.unsync_load(); + let len = unsafe { self.len.unsync_load() }; let n = cmp::min(n, len); // Decrement the count. diff --git a/tokio/src/runtime/scheduler/mod.rs b/tokio/src/runtime/scheduler/mod.rs index ecd56aeee10..d52216f06f6 100644 --- a/tokio/src/runtime/scheduler/mod.rs +++ b/tokio/src/runtime/scheduler/mod.rs @@ -133,6 +133,7 @@ cfg_rt! { /// Spawn a local task /// /// # Safety + /// /// This should only be called in `LocalRuntime` if the runtime has been verified to be owned /// by the current thread. #[allow(irrefutable_let_patterns)] @@ -143,7 +144,8 @@ cfg_rt! { F::Output: 'static, { if let Handle::CurrentThread(h) = self { - current_thread::Handle::spawn_local(h, future, id, spawned_at) + // Safety: caller guarantees that this is a `LocalRuntime`. + unsafe { current_thread::Handle::spawn_local(h, future, id, spawned_at) } } else { panic!("Only current_thread and LocalSet have spawn_local internals implemented") } diff --git a/tokio/src/runtime/task/core.rs b/tokio/src/runtime/task/core.rs index e91e8be4025..e28bca617d4 100644 --- a/tokio/src/runtime/task/core.rs +++ b/tokio/src/runtime/task/core.rs @@ -9,6 +9,15 @@ //! Make sure to consult the relevant safety section of each function before //! use. +// It doesn't make sense to enforce `unsafe_op_in_unsafe_fn` for this module because +// +// * This module is doing the low-level task management that requires tons of unsafe +// operations. +// * Excessive `unsafe {}` blocks hurt readability significantly. +// TODO: replace with `#[expect(unsafe_op_in_unsafe_fn)]` after bumpping +// the MSRV to 1.81.0. +#![allow(unsafe_op_in_unsafe_fn)] + use crate::future::Future; use crate::loom::cell::UnsafeCell; use crate::runtime::context; diff --git a/tokio/src/runtime/task/list.rs b/tokio/src/runtime/task/list.rs index 908ce07ecf6..df119dbcd08 100644 --- a/tokio/src/runtime/task/list.rs +++ b/tokio/src/runtime/task/list.rs @@ -106,6 +106,7 @@ impl OwnedTasks { /// Bind a task that isn't safe to transfer across thread boundaries. /// /// # Safety + /// /// Only use this in `LocalRuntime` where the task cannot move pub(crate) unsafe fn bind_local( &self, diff --git a/tokio/src/runtime/task/mod.rs b/tokio/src/runtime/task/mod.rs index 093b6f6caad..d94cc68764b 100644 --- a/tokio/src/runtime/task/mod.rs +++ b/tokio/src/runtime/task/mod.rs @@ -398,8 +398,11 @@ impl Task { } } + /// # Safety + /// + /// `ptr` must be a valid pointer to a [`Header`]. unsafe fn from_raw(ptr: NonNull
) -> Task { - Task::new(RawTask::from_raw(ptr)) + unsafe { Task::new(RawTask::from_raw(ptr)) } } #[cfg(all( @@ -479,8 +482,11 @@ impl Notified { } impl Notified { + /// # Safety + /// + /// [`RawTask::ptr`] must be a valid pointer to a [`Header`]. pub(crate) unsafe fn from_raw(ptr: RawTask) -> Notified { - Notified(Task::new(ptr)) + Notified(unsafe { Task::new(ptr) }) } } @@ -597,11 +603,11 @@ unsafe impl linked_list::Link for Task { } unsafe fn from_raw(ptr: NonNull
) -> Task { - Task::from_raw(ptr) + unsafe { Task::from_raw(ptr) } } unsafe fn pointers(target: NonNull
) -> NonNull> { - self::core::Trailer::addr_of_owned(Header::get_trailer(target)) + unsafe { self::core::Trailer::addr_of_owned(Header::get_trailer(target)) } } } diff --git a/tokio/src/runtime/task/raw.rs b/tokio/src/runtime/task/raw.rs index e9a37802203..164e9422d68 100644 --- a/tokio/src/runtime/task/raw.rs +++ b/tokio/src/runtime/task/raw.rs @@ -1,3 +1,12 @@ +// It doesn't make sense to enforce `unsafe_op_in_unsafe_fn` for this module because +// +// * This module is doing the low-level task management that requires tons of unsafe +// operations. +// * Excessive `unsafe {}` blocks hurt readability significantly. +// TODO: replace with `#[expect(unsafe_op_in_unsafe_fn)]` after bumpping +// the MSRV to 1.81.0. +#![allow(unsafe_op_in_unsafe_fn)] + use crate::future::Future; use crate::runtime::task::core::{Core, Trailer}; use crate::runtime::task::{Cell, Harness, Header, Id, Schedule, State}; @@ -222,6 +231,9 @@ impl RawTask { RawTask { ptr } } + /// # Safety + /// + /// `ptr` must be a valid pointer to a [`Header`]. pub(super) unsafe fn from_raw(ptr: NonNull
) -> RawTask { RawTask { ptr } } diff --git a/tokio/src/runtime/task/trace/mod.rs b/tokio/src/runtime/task/trace/mod.rs index 648d3cc2263..7b018185ce8 100644 --- a/tokio/src/runtime/task/trace/mod.rs +++ b/tokio/src/runtime/task/trace/mod.rs @@ -82,14 +82,18 @@ impl Context { where F: FnOnce(&Self) -> R, { - crate::runtime::context::with_trace(f) + unsafe { crate::runtime::context::with_trace(f) } } + /// SAFETY: Callers of this function must ensure that trace frames always + /// form a valid linked list. unsafe fn with_current_frame(f: F) -> R where F: FnOnce(&Cell>>) -> R, { - Self::try_with_current(|context| f(&context.active_frame)).expect(FAIL_NO_THREAD_LOCAL) + unsafe { + Self::try_with_current(|context| f(&context.active_frame)).expect(FAIL_NO_THREAD_LOCAL) + } } fn with_current_collector(f: F) -> R @@ -313,7 +317,8 @@ cfg_rt_multi_thread! { // clear the injection queue let mut synced = synced.lock(); - while let Some(notified) = injection.pop(&mut synced.inject) { + // Safety: exactly the same safety requirements as `trace_multi_thread` function. + while let Some(notified) = unsafe { injection.pop(&mut synced.inject) } { dequeued.push(notified); } diff --git a/tokio/src/runtime/task/waker.rs b/tokio/src/runtime/task/waker.rs index 2a1568fe8f7..712260d2216 100644 --- a/tokio/src/runtime/task/waker.rs +++ b/tokio/src/runtime/task/waker.rs @@ -42,6 +42,9 @@ impl ops::Deref for WakerRef<'_, S> { } cfg_trace! { + /// # Safety + /// + /// `$header` must be a valid pointer to a [`Header`]. macro_rules! trace { ($header:expr, $op:expr) => { if let Some(id) = Header::get_tracing_id(&$header) { @@ -65,31 +68,50 @@ cfg_not_trace! { } unsafe fn clone_waker(ptr: *const ()) -> RawWaker { - let header = NonNull::new_unchecked(ptr as *mut Header); - trace!(header, "waker.clone"); - header.as_ref().state.ref_inc(); + // Safety: `ptr` was created from a `Header` pointer in function `waker_ref`. + let header = unsafe { NonNull::new_unchecked(ptr as *mut Header) }; + #[cfg_attr(not(all(tokio_unstable, feature = "tracing")), allow(unused_unsafe))] + unsafe { + trace!(header, "waker.clone"); + } + unsafe { header.as_ref() }.state.ref_inc(); raw_waker(header) } unsafe fn drop_waker(ptr: *const ()) { - let ptr = NonNull::new_unchecked(ptr as *mut Header); - trace!(ptr, "waker.drop"); - let raw = RawTask::from_raw(ptr); + // Safety: `ptr` was created from a `Header` pointer in function `waker_ref`. + let ptr = unsafe { NonNull::new_unchecked(ptr as *mut Header) }; + // TODO; replace to #[expect(unused_unsafe)] after bumping MSRV to 1.81.0. + #[cfg_attr(not(all(tokio_unstable, feature = "tracing")), allow(unused_unsafe))] + unsafe { + trace!(ptr, "waker.drop"); + } + let raw = unsafe { RawTask::from_raw(ptr) }; raw.drop_reference(); } unsafe fn wake_by_val(ptr: *const ()) { - let ptr = NonNull::new_unchecked(ptr as *mut Header); - trace!(ptr, "waker.wake"); - let raw = RawTask::from_raw(ptr); + // Safety: `ptr` was created from a `Header` pointer in function `waker_ref`. + let ptr = unsafe { NonNull::new_unchecked(ptr as *mut Header) }; + // TODO; replace to #[expect(unused_unsafe)] after bumping MSRV to 1.81.0. + #[cfg_attr(not(all(tokio_unstable, feature = "tracing")), allow(unused_unsafe))] + unsafe { + trace!(ptr, "waker.wake"); + } + let raw = unsafe { RawTask::from_raw(ptr) }; raw.wake_by_val(); } // Wake without consuming the waker unsafe fn wake_by_ref(ptr: *const ()) { - let ptr = NonNull::new_unchecked(ptr as *mut Header); - trace!(ptr, "waker.wake_by_ref"); - let raw = RawTask::from_raw(ptr); + // Safety: `ptr` was created from a `Header` pointer in function `waker_ref`. + let ptr = unsafe { NonNull::new_unchecked(ptr as *mut Header) }; + // TODO; replace to #[expect(unused_unsafe)] after bumping MSRV to 1.81.0. + #[cfg_attr(not(all(tokio_unstable, feature = "tracing")), allow(unused_unsafe))] + unsafe { + trace!(ptr, "waker.wake_by_ref"); + } + let raw = unsafe { RawTask::from_raw(ptr) }; raw.wake_by_ref(); } diff --git a/tokio/src/runtime/time/entry.rs b/tokio/src/runtime/time/entry.rs index 627fcbc5ec3..736105d5abf 100644 --- a/tokio/src/runtime/time/entry.rs +++ b/tokio/src/runtime/time/entry.rs @@ -471,7 +471,7 @@ unsafe impl linked_list::Link for TimerShared { unsafe fn pointers( target: NonNull, ) -> NonNull> { - TimerShared::addr_of_pointers(target) + unsafe { TimerShared::addr_of_pointers(target) } } } @@ -644,7 +644,9 @@ impl TimerHandle { /// SAFETY: The caller must ensure that the handle remains valid, the driver /// lock is held, and that the timer is not in any wheel linked lists. pub(super) unsafe fn set_expiration(&self, tick: u64) { - self.inner.as_ref().set_expiration(tick); + unsafe { + self.inner.as_ref().set_expiration(tick); + } } /// Attempts to mark this entry as pending. If the expiration time is after @@ -657,14 +659,18 @@ impl TimerHandle { /// lock is held, and that the timer is not in any wheel linked lists. /// After returning Ok, the entry must be added to the pending list. pub(super) unsafe fn mark_pending(&self, not_after: u64) -> Result<(), u64> { - match self.inner.as_ref().state.mark_pending(not_after) { + match unsafe { self.inner.as_ref().state.mark_pending(not_after) } { Ok(()) => { // mark this as being on the pending queue in registered_when - self.inner.as_ref().set_registered_when(STATE_DEREGISTERED); + unsafe { + self.inner.as_ref().set_registered_when(STATE_DEREGISTERED); + } Ok(()) } Err(tick) => { - self.inner.as_ref().set_registered_when(tick); + unsafe { + self.inner.as_ref().set_registered_when(tick); + } Err(tick) } } @@ -682,6 +688,6 @@ impl TimerHandle { /// SAFETY: The driver lock must be held while invoking this function, and /// the entry must not be in any wheel linked lists. pub(super) unsafe fn fire(self, completed_state: TimerResult) -> Option { - self.inner.as_ref().state.fire(completed_state) + unsafe { self.inner.as_ref().state.fire(completed_state) } } } diff --git a/tokio/src/runtime/time/wheel/level.rs b/tokio/src/runtime/time/wheel/level.rs index 754e638bf57..2fd20e56f40 100644 --- a/tokio/src/runtime/time/wheel/level.rs +++ b/tokio/src/runtime/time/wheel/level.rs @@ -120,7 +120,7 @@ impl Level { } pub(crate) unsafe fn add_entry(&mut self, item: TimerHandle) { - let slot = slot_for(item.registered_when(), self.level); + let slot = slot_for(unsafe { item.registered_when() }, self.level); self.slot[slot].push_front(item); diff --git a/tokio/src/runtime/time/wheel/mod.rs b/tokio/src/runtime/time/wheel/mod.rs index 8d94303544c..89adc8f2dcb 100644 --- a/tokio/src/runtime/time/wheel/mod.rs +++ b/tokio/src/runtime/time/wheel/mod.rs @@ -91,7 +91,7 @@ impl Wheel { &mut self, item: TimerHandle, ) -> Result { - let when = item.sync_when(); + let when = unsafe { item.sync_when() }; if when <= self.elapsed { return Err((item, InsertError::Elapsed)); diff --git a/tokio/src/signal/reusable_box.rs b/tokio/src/signal/reusable_box.rs index 796fa210b03..e8274d11df8 100644 --- a/tokio/src/signal/reusable_box.rs +++ b/tokio/src/signal/reusable_box.rs @@ -81,18 +81,21 @@ impl ReusableBoxFuture { F: Future + Send + 'static, { // Drop the existing future, catching any panics. - let result = panic::catch_unwind(AssertUnwindSafe(|| { + let result = panic::catch_unwind(AssertUnwindSafe(|| unsafe { ptr::drop_in_place(self.boxed.as_ptr()); })); // Overwrite the future behind the pointer. This is safe because the // allocation was allocated with the same size and alignment as the type F. let self_ptr: *mut F = self.boxed.as_ptr() as *mut F; - ptr::write(self_ptr, future); + // SAFETY: The pointer is valid and the layout is exactly same. + unsafe { + ptr::write(self_ptr, future); + } // Update the vtable of self.boxed. The pointer is not null because we // just got it from self.boxed, which is not null. - self.boxed = NonNull::new_unchecked(self_ptr); + self.boxed = unsafe { NonNull::new_unchecked(self_ptr) }; // If the old future's destructor panicked, resume unwinding. match result { diff --git a/tokio/src/signal/windows/sys.rs b/tokio/src/signal/windows/sys.rs index 8d81ad75f5b..70329e2b6fc 100644 --- a/tokio/src/signal/windows/sys.rs +++ b/tokio/src/signal/windows/sys.rs @@ -157,9 +157,9 @@ mod tests { if event_requires_infinite_sleep_in_handler(signum) { // Those events will enter an infinite loop in `handler`, so // we need to run them on a separate thread - std::thread::spawn(move || super::handler(signum)); + std::thread::spawn(move || unsafe { super::handler(signum) }); } else { - super::handler(signum); + unsafe { super::handler(signum) }; } } diff --git a/tokio/src/sync/batch_semaphore.rs b/tokio/src/sync/batch_semaphore.rs index b89a5a9a768..8662af3bbd1 100644 --- a/tokio/src/sync/batch_semaphore.rs +++ b/tokio/src/sync/batch_semaphore.rs @@ -775,6 +775,6 @@ unsafe impl linked_list::Link for Waiter { } unsafe fn pointers(target: NonNull) -> NonNull> { - Waiter::addr_of_pointers(target) + unsafe { Waiter::addr_of_pointers(target) } } } diff --git a/tokio/src/sync/broadcast.rs b/tokio/src/sync/broadcast.rs index 05dad6393d9..3ab9a2ca44c 100644 --- a/tokio/src/sync/broadcast.rs +++ b/tokio/src/sync/broadcast.rs @@ -1678,7 +1678,7 @@ unsafe impl linked_list::Link for Waiter { } unsafe fn pointers(target: NonNull) -> NonNull> { - Waiter::addr_of_pointers(target) + unsafe { Waiter::addr_of_pointers(target) } } } diff --git a/tokio/src/sync/mpsc/block.rs b/tokio/src/sync/mpsc/block.rs index 927c4566463..adc3b57e356 100644 --- a/tokio/src/sync/mpsc/block.rs +++ b/tokio/src/sync/mpsc/block.rs @@ -163,9 +163,15 @@ impl Block { } // Get the value - let value = self.values[offset].with(|ptr| ptr::read(ptr)); + // + // Safety: + // + // 1. The caller guarantees that there is no concurrent access to the slot. + // 2. The `UnsafeCell` always give us a valid pointer to the value. + let value = self.values[offset].with(|ptr| unsafe { ptr::read(ptr) }); - Some(Read::Value(value.assume_init())) + // Safety: the redy bit is set, so the value has been initialized. + Some(Read::Value(unsafe { value.assume_init() })) } /// Returns true if *this* block has a value in the given slot. @@ -197,7 +203,10 @@ impl Block { let slot_offset = offset(slot_index); self.values[slot_offset].with_mut(|ptr| { - ptr::write(ptr, MaybeUninit::new(value)); + // Safety: the caller guarantees that there is no concurrent access to the slot + unsafe { + ptr::write(ptr, MaybeUninit::new(value)); + } }); // Release the value. After this point, the slot ref may no longer @@ -246,7 +255,11 @@ impl Block { // tail_position is guaranteed to not access this block. self.header .observed_tail_position - .with_mut(|ptr| *ptr = tail_position); + // Safety: + // + // 1. The caller guarantees unique access to the block. + // 2. The `UnsafeCell` always gives us a valid pointer. + .with_mut(|ptr| unsafe { *ptr = tail_position }); // Set the released bit, signalling to the receiver that it is safe to // free the block's memory as soon as all slots **prior** to @@ -316,7 +329,9 @@ impl Block { success: Ordering, failure: Ordering, ) -> Result<(), NonNull>> { - block.as_mut().header.start_index = self.header.start_index.wrapping_add(BLOCK_CAP); + // Safety: caller guarantees that `block` is valid. + unsafe { block.as_mut() }.header.start_index = + self.header.start_index.wrapping_add(BLOCK_CAP); let next_ptr = self .header @@ -428,8 +443,9 @@ impl Values { if_loom! { let p = _value.as_ptr() as *mut UnsafeCell>; for i in 0..BLOCK_CAP { - p.add(i) - .write(UnsafeCell::new(MaybeUninit::uninit())); + unsafe { + p.add(i).write(UnsafeCell::new(MaybeUninit::uninit())); + } } } } diff --git a/tokio/src/sync/mpsc/list.rs b/tokio/src/sync/mpsc/list.rs index 118bac85633..289be0bcf0a 100644 --- a/tokio/src/sync/mpsc/list.rs +++ b/tokio/src/sync/mpsc/list.rs @@ -184,6 +184,13 @@ impl Tx { } } + /// # Safety + /// + /// Behavior is undefined if any of the following conditions are violated: + /// + /// - The `block` was created by [`Box::into_raw`]. + /// - The `block` is not currently part of any linked list. + /// - The `block` is a valid pointer to a [`Block`]. pub(crate) unsafe fn reclaim_block(&self, mut block: NonNull>) { // The block has been removed from the linked list and ownership // is reclaimed. @@ -192,24 +199,28 @@ impl Tx { // inserting it back at the end of the linked list. // // First, reset the data - block.as_mut().reclaim(); + // + // Safety: caller guarantees the block is valid and not in any list. + unsafe { + block.as_mut().reclaim(); + } let mut reused = false; // Attempt to insert the block at the end // // Walk at most three times - // let curr_ptr = self.block_tail.load(Acquire); // The pointer can never be null debug_assert!(!curr_ptr.is_null()); - let mut curr = NonNull::new_unchecked(curr_ptr); + // Safety: curr_ptr is never null. + let mut curr = unsafe { NonNull::new_unchecked(curr_ptr) }; // TODO: Unify this logic with Block::grow for _ in 0..3 { - match curr.as_ref().try_push(&mut block, AcqRel, Acquire) { + match unsafe { curr.as_ref().try_push(&mut block, AcqRel, Acquire) } { Ok(()) => { reused = true; break; @@ -221,7 +232,11 @@ impl Tx { } if !reused { - let _ = Box::from_raw(block.as_ptr()); + // Safety: + // + // 1. Caller guarantees the block is valid and not in any list. + // 2. The block was created by `Box::into_raw`. + let _ = unsafe { Box::from_raw(block.as_ptr()) }; } } @@ -387,8 +402,8 @@ impl Rx { } while let Some(block) = cur { - cur = block.as_ref().load_next(Relaxed); - drop(Box::from_raw(block.as_ptr())); + cur = unsafe { block.as_ref() }.load_next(Relaxed); + drop(unsafe { Box::from_raw(block.as_ptr()) }); } } } diff --git a/tokio/src/sync/notify.rs b/tokio/src/sync/notify.rs index 4539b3cf2fe..33bce0e4f66 100644 --- a/tokio/src/sync/notify.rs +++ b/tokio/src/sync/notify.rs @@ -1385,7 +1385,7 @@ unsafe impl linked_list::Link for Waiter { } unsafe fn pointers(target: NonNull) -> NonNull> { - Waiter::addr_of_pointers(target) + unsafe { Waiter::addr_of_pointers(target) } } } diff --git a/tokio/src/sync/once_cell.rs b/tokio/src/sync/once_cell.rs index 92194a5404e..70ba2b3160b 100644 --- a/tokio/src/sync/once_cell.rs +++ b/tokio/src/sync/once_cell.rs @@ -244,12 +244,16 @@ impl OnceCell { // SAFETY: The OnceCell must not be empty. unsafe fn get_unchecked(&self) -> &T { - &*self.value.with(|ptr| (*ptr).as_ptr()) + unsafe { &*self.value.with(|ptr| (*ptr).as_ptr()) } } // SAFETY: The OnceCell must not be empty. unsafe fn get_unchecked_mut(&mut self) -> &mut T { - &mut *self.value.with_mut(|ptr| (*ptr).as_mut_ptr()) + // SAFETY: + // + // 1. The caller guarantees that the OnceCell is initialized. + // 2. The `&mut self` guarantees that there are no other references to the value. + unsafe { &mut *self.value.with_mut(|ptr| (*ptr).as_mut_ptr()) } } fn set_value(&self, value: T, permit: SemaphorePermit<'_>) -> &T { diff --git a/tokio/src/sync/oneshot.rs b/tokio/src/sync/oneshot.rs index a93cea2367d..c5857f65491 100644 --- a/tokio/src/sync/oneshot.rs +++ b/tokio/src/sync/oneshot.rs @@ -404,31 +404,51 @@ struct Inner { struct Task(UnsafeCell>); impl Task { + /// # Safety + /// + /// The caller must do the necessary synchronization to ensure that + /// the [`Self::0`] contains the valid [`Waker`] during the call. unsafe fn will_wake(&self, cx: &mut Context<'_>) -> bool { - self.with_task(|w| w.will_wake(cx.waker())) + unsafe { self.with_task(|w| w.will_wake(cx.waker())) } } + /// # Safety + /// + /// The caller must do the necessary synchronization to ensure that + /// the [`Self::0`] contains the valid [`Waker`] during the call. unsafe fn with_task(&self, f: F) -> R where F: FnOnce(&Waker) -> R, { self.0.with(|ptr| { - let waker: *const Waker = (*ptr).as_ptr(); - f(&*waker) + let waker: *const Waker = unsafe { (*ptr).as_ptr() }; + f(unsafe { &*waker }) }) } + /// # Safety + /// + /// The caller must do the necessary synchronization to ensure that + /// the [`Self::0`] contains the valid [`Waker`] during the call. unsafe fn drop_task(&self) { self.0.with_mut(|ptr| { - let ptr: *mut Waker = (*ptr).as_mut_ptr(); - ptr.drop_in_place(); + let ptr: *mut Waker = unsafe { (*ptr).as_mut_ptr() }; + unsafe { + ptr.drop_in_place(); + } }); } + /// # Safety + /// + /// The caller must do the necessary synchronization to ensure that + /// the [`Self::0`] contains the valid [`Waker`] during the call. unsafe fn set_task(&self, cx: &mut Context<'_>) { self.0.with_mut(|ptr| { - let ptr: *mut Waker = (*ptr).as_mut_ptr(); - ptr.write(cx.waker().clone()); + let ptr: *mut Waker = unsafe { (*ptr).as_mut_ptr() }; + unsafe { + ptr.write(cx.waker().clone()); + } }); } } @@ -1377,7 +1397,7 @@ impl Inner { /// If `VALUE_SENT` is not set, then only the sender may call this method; /// if it is set, then only the receiver may call this method. unsafe fn consume_value(&self) -> Option { - self.value.with_mut(|ptr| (*ptr).take()) + self.value.with_mut(|ptr| unsafe { (*ptr).take() }) } /// Returns true if there is a value. This function does not check `state`. @@ -1390,7 +1410,7 @@ impl Inner { /// If `VALUE_SENT` is not set, then only the sender may call this method; /// if it is set, then only the receiver may call this method. unsafe fn has_value(&self) -> bool { - self.value.with(|ptr| (*ptr).is_some()) + self.value.with(|ptr| unsafe { (*ptr).is_some() }) } } diff --git a/tokio/src/sync/set_once.rs b/tokio/src/sync/set_once.rs index 3c4228e6a34..1648ff1080a 100644 --- a/tokio/src/sync/set_once.rs +++ b/tokio/src/sync/set_once.rs @@ -255,7 +255,7 @@ impl SetOnce { // SAFETY: The SetOnce must not be empty. unsafe fn get_unchecked(&self) -> &T { - &*self.value.with(|ptr| (*ptr).as_ptr()) + unsafe { &*self.value.with(|ptr| (*ptr).as_ptr()) } } /// Returns a reference to the value currently stored in the `SetOnce`, or diff --git a/tokio/src/sync/tests/notify.rs b/tokio/src/sync/tests/notify.rs index 83182464c50..540aa22444a 100644 --- a/tokio/src/sync/tests/notify.rs +++ b/tokio/src/sync/tests/notify.rs @@ -12,15 +12,17 @@ fn notify_clones_waker_before_lock() { unsafe fn clone_w(data: *const ()) -> RawWaker { let ptr = data as *const Notify; - Arc::::increment_strong_count(ptr); + unsafe { + Arc::::increment_strong_count(ptr); + } // Or some other arbitrary code that shouldn't be executed while the // Notify wait list is locked. - (*ptr).notify_one(); + unsafe { (*ptr).notify_one() }; RawWaker::new(data, VTABLE) } unsafe fn drop_w(data: *const ()) { - drop(Arc::::from_raw(data as *const Notify)); + drop(unsafe { Arc::::from_raw(data as *const Notify) }); } unsafe fn wake(_data: *const ()) { diff --git a/tokio/src/task/local.rs b/tokio/src/task/local.rs index 021e6277534..eb83a18eadf 100644 --- a/tokio/src/task/local.rs +++ b/tokio/src/task/local.rs @@ -1192,28 +1192,43 @@ impl task::Schedule for Arc { } impl LocalState { + /// # Safety + /// + /// This method must only be called from the thread who + /// has the same [`ThreadId`] as [`Self::owner`]. unsafe fn task_pop_front(&self) -> Option>> { // The caller ensures it is called from the same thread that owns // the LocalSet. self.assert_called_from_owner_thread(); - self.local_queue.with_mut(|ptr| (*ptr).pop_front()) + self.local_queue + .with_mut(|ptr| unsafe { (*ptr).pop_front() }) } + /// # Safety + /// + /// This method must only be called from the thread who + /// has the same [`ThreadId`] as [`Self::owner`]. unsafe fn task_push_back(&self, task: task::Notified>) { // The caller ensures it is called from the same thread that owns // the LocalSet. self.assert_called_from_owner_thread(); - self.local_queue.with_mut(|ptr| (*ptr).push_back(task)); + self.local_queue + .with_mut(|ptr| unsafe { (*ptr).push_back(task) }); } + /// # Safety + /// + /// This method must only be called from the thread who + /// has the same [`ThreadId`] as [`Self::owner`]. unsafe fn take_local_queue(&self) -> VecDeque>> { // The caller ensures it is called from the same thread that owns // the LocalSet. self.assert_called_from_owner_thread(); - self.local_queue.with_mut(|ptr| std::mem::take(&mut (*ptr))) + self.local_queue + .with_mut(|ptr| std::mem::take(unsafe { &mut (*ptr) })) } unsafe fn task_remove(&self, task: &Task>) -> Option>> { diff --git a/tokio/src/util/idle_notified_set.rs b/tokio/src/util/idle_notified_set.rs index 8fe13095a2c..ad266087569 100644 --- a/tokio/src/util/idle_notified_set.rs +++ b/tokio/src/util/idle_notified_set.rs @@ -349,7 +349,10 @@ impl IdleNotifiedSet { unsafe fn move_to_new_list(from: &mut LinkedList, to: &mut LinkedList) { while let Some(entry) = from.pop_back() { entry.my_list.with_mut(|ptr| { - *ptr = List::Neither; + // Safety: pointer is accessed while holding the mutex. + unsafe { + *ptr = List::Neither; + } }); to.push_front(entry); } @@ -479,13 +482,13 @@ unsafe impl linked_list::Link for ListEntry { } unsafe fn from_raw(ptr: NonNull>) -> Arc> { - Arc::from_raw(ptr.as_ptr()) + unsafe { Arc::from_raw(ptr.as_ptr()) } } unsafe fn pointers( target: NonNull>, ) -> NonNull>> { - ListEntry::addr_of_pointers(target) + unsafe { ListEntry::addr_of_pointers(target) } } } diff --git a/tokio/src/util/linked_list.rs b/tokio/src/util/linked_list.rs index 3650f87fbb0..eba767f16da 100644 --- a/tokio/src/util/linked_list.rs +++ b/tokio/src/util/linked_list.rs @@ -1,4 +1,11 @@ #![cfg_attr(not(feature = "full"), allow(dead_code))] +// It doesn't make sense to enforce `unsafe_op_in_unsafe_fn` for this module because +// +// * The intrusive linked list naturally relies on unsafe operations. +// * Excessive `unsafe {}` blocks hurt readability significantly. +// TODO: replace with `#[expect(unsafe_op_in_unsafe_fn)]` after bumpping +// the MSRV to 1.81.0. +#![allow(unsafe_op_in_unsafe_fn)] //! An intrusive double linked list of data. //! @@ -64,6 +71,8 @@ pub(crate) unsafe trait Link { /// stack as the argument. In particular, the method may not create an /// intermediate reference in the process of creating the resulting raw /// pointer. + /// + /// The `target` pointer must be valid. unsafe fn pointers(target: NonNull) -> NonNull>; } diff --git a/tokio/src/util/rc_cell.rs b/tokio/src/util/rc_cell.rs index 44724926848..399017b5649 100644 --- a/tokio/src/util/rc_cell.rs +++ b/tokio/src/util/rc_cell.rs @@ -35,7 +35,7 @@ impl RcCell { // not called recursively. Finally, this is the only place that can // create mutable references to the inner Rc. This ensures that any // mutable references created here are exclusive. - self.inner.with_mut(|ptr| f(&mut *ptr)) + self.inner.with_mut(|ptr| f(unsafe { &mut *ptr })) } pub(crate) fn get(&self) -> Option> { diff --git a/tokio/src/util/sharded_list.rs b/tokio/src/util/sharded_list.rs index 525cec9ea77..9fce8fa0792 100644 --- a/tokio/src/util/sharded_list.rs +++ b/tokio/src/util/sharded_list.rs @@ -27,6 +27,7 @@ pub(crate) struct ShardedList { /// call to call. pub(crate) unsafe trait ShardedListItem: Link { /// # Safety + /// /// The provided pointer must point at a valid list item. unsafe fn get_shard_id(target: NonNull) -> usize; } @@ -79,7 +80,7 @@ impl ShardedList { /// - `node` is not contained by any list, /// - `node` is currently contained by some other `GuardedLinkedList`. pub(crate) unsafe fn remove(&self, node: NonNull) -> Option { - let id = L::get_shard_id(node); + let id = unsafe { L::get_shard_id(node) }; let mut lock = self.shard_inner(id); // SAFETY: Since the shard id cannot change, it's not possible for this node // to be in any other list of the same sharded list. diff --git a/tokio/src/util/typeid.rs b/tokio/src/util/typeid.rs index 1cd14b59c9a..990f58ac4fc 100644 --- a/tokio/src/util/typeid.rs +++ b/tokio/src/util/typeid.rs @@ -9,7 +9,8 @@ use std::{ pub(super) unsafe fn try_transmute(x: Src) -> Result { if nonstatic_typeid::() == TypeId::of::() { let x = ManuallyDrop::new(x); - Ok(mem::transmute_copy::(&x)) + // SAFETY: we have checked that the types are the same. + Ok(unsafe { mem::transmute_copy::(&x) }) } else { Err(x) } diff --git a/tokio/src/util/wake.rs b/tokio/src/util/wake.rs index 896ec73e7b1..e3cf093f0c5 100644 --- a/tokio/src/util/wake.rs +++ b/tokio/src/util/wake.rs @@ -51,22 +51,28 @@ fn waker_vtable() -> &'static RawWakerVTable { } unsafe fn clone_arc_raw(data: *const ()) -> RawWaker { - Arc::::increment_strong_count(data as *const T); + // Safety: `data` was created from an `Arc::as_ptr` in function `waker_ref`. + unsafe { + Arc::::increment_strong_count(data as *const T); + } RawWaker::new(data, waker_vtable::()) } unsafe fn wake_arc_raw(data: *const ()) { - let arc: Arc = Arc::from_raw(data as *const T); + // Safety: `data` was created from an `Arc::as_ptr` in function `waker_ref`. + let arc: Arc = unsafe { Arc::from_raw(data as *const T) }; Wake::wake(arc); } // used by `waker_ref` unsafe fn wake_by_ref_arc_raw(data: *const ()) { // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop - let arc = ManuallyDrop::new(Arc::::from_raw(data.cast())); + // Safety: `data` was created from an `Arc::as_ptr` in function `waker_ref`. + let arc = ManuallyDrop::new(unsafe { Arc::::from_raw(data.cast()) }); Wake::wake_by_ref(&arc); } unsafe fn drop_arc_raw(data: *const ()) { - drop(Arc::::from_raw(data.cast())); + // Safety: `data` was created from an `Arc::as_ptr` in function `waker_ref`. + drop(unsafe { Arc::::from_raw(data.cast()) }); }