diff --git a/Cargo.toml b/Cargo.toml index 7a3c471c..644edb04 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,14 +26,13 @@ checkpoint = ["serde", "serde_derive", "serde_json"] [dependencies] cfg-if = "0.1.6" -# libc = "0.2.44" scoped-tls = "0.1.2" # Provides a generator based runtime generator = { version = "0.6.10", optional = true } # Provides a runtime based on libfringe. Requires nightly. -# fringe = { git = "https://github.com/carllerche/libfringe", branch = "track-nightly", optional = true } +fringe = { git = "https://github.com/carllerche/libfringe", branch = "track-nightly", optional = true } # Optional futures support futures = { version = "0.1.25", optional = true } @@ -42,3 +41,9 @@ futures = { version = "0.1.25", optional = true } serde = { version = "1.0.80", optional = true } serde_derive = { version = "1.0.80", optional = true } serde_json = { version = "1.0.33", optional = true } + +[target.'cfg(unix)'.dependencies] +libc = "0.2.44" + +[target.'cfg(windows)'.dependencies] +winapi = { version = "0.3.7", features = ["basetsd", "memoryapi", "minwindef", "sysinfoapi", "winnt"] } diff --git a/src/futures/atomic_task.rs b/src/futures/atomic_task.rs index 296b8633..52c07a31 100644 --- a/src/futures/atomic_task.rs +++ b/src/futures/atomic_task.rs @@ -22,7 +22,7 @@ impl AtomicTask { task: RefCell::new(None), // TODO: Make a custom object? object: execution.objects.insert(Object::condvar()), - sync: RefCell::new(Synchronize::new(execution.threads.max())), + sync: RefCell::new(Synchronize::new(&mut execution.arena, execution.threads.max())), } }) } diff --git a/src/lib.rs b/src/lib.rs index 16da9b03..8affb9d8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -113,7 +113,13 @@ #[macro_use] extern crate cfg_if; -// extern crate libc; + +#[cfg(unix)] +extern crate libc; + +#[cfg(windows)] +extern crate winapi; + #[macro_use] extern crate scoped_tls; diff --git a/src/rt/arena.rs b/src/rt/arena.rs index f8ddf112..ab816b7e 100644 --- a/src/rt/arena.rs +++ b/src/rt/arena.rs @@ -4,23 +4,32 @@ use std::alloc::Layout; use std::cell::Cell; use std::cmp; use std::fmt; +use std::marker; use std::ops::{Deref, DerefMut}; use std::ptr; use std::rc::Rc; use std::slice; +#[cfg(feature = "checkpoint")] +use serde::{Serialize, Serializer}; + #[derive(Debug)] pub struct Arena { - // inner: Rc, + inner: Rc, } -/* pub struct Slice { ptr: *mut T, len: usize, _inner: Rc, } +pub struct Iter<'a, T: 'a> { + ptr: *const T, + end: *const T, + _marker: marker::PhantomData<&'a T>, +} + #[derive(Debug)] struct Inner { /// Head of the arena space @@ -32,92 +41,138 @@ struct Inner { /// Total capacity of the arena cap: usize, } -*/ + +#[cfg(unix)] +fn create_mapping(capacity: usize) -> *mut u8 { + let ptr = unsafe { + libc::mmap( + ptr::null_mut(), + capacity, + libc::PROT_READ | libc::PROT_WRITE, + libc::MAP_ANON | libc::MAP_PRIVATE, + -1, + 0, + ) + }; + + ptr as *mut u8 +} + +#[cfg(windows)] +fn get_page_size() -> usize { + use std::mem; + use winapi::um::sysinfoapi::GetSystemInfo; + + unsafe { + let mut info = mem::zeroed(); + GetSystemInfo(&mut info); + + info.dwPageSize as usize + } +} + +#[cfg(windows)] +fn create_mapping(capacity: usize) -> *mut u8 { + use std::ptr; + use winapi::shared::basetsd::SIZE_T; + use winapi::shared::minwindef::LPVOID; + use winapi::um::memoryapi::VirtualAlloc; + use winapi::um::winnt::{PAGE_READWRITE, MEM_COMMIT, MEM_RESERVE}; + + let lpAddress: LPVOID = ptr::null_mut(); + let page_size = get_page_size(); + let len = if capacity % page_size == 0 { + capacity + } else { + capacity + page_size - (capacity % page_size) + }; + let flAllocationType = MEM_COMMIT | MEM_RESERVE; + let flProtect = PAGE_READWRITE; + + let r = unsafe { + VirtualAlloc(lpAddress, len as SIZE_T, flAllocationType, flProtect) + }; + + r as *mut u8 +} impl Arena { /// Create an `Arena` with specified capacity. /// /// Capacity must be a power of 2. The capacity cannot be grown after the fact. pub fn with_capacity(capacity: usize) -> Arena { - Arena {} - /* - let head = unsafe { - libc::mmap( - ptr::null_mut(), - capacity, - libc::PROT_READ | libc::PROT_WRITE, - libc::MAP_ANON | libc::MAP_PRIVATE, - -1, - 0, - ) - }; + let head = create_mapping(capacity); Arena { inner: Rc::new(Inner { - head: head as *mut u8, + head, pos: Cell::new(0), cap: capacity, - }), + }) } - */ } pub fn clear(&mut self) { - /* + println!("rc: {}", Rc::strong_count(&self.inner)); assert!(1 == Rc::strong_count(&self.inner)); self.inner.pos.set(0); - */ } -} -/* pub fn slice(&mut self, len: usize) -> Slice where T: Default, { - let ptr: *mut T = self.allocate(len); + slice(&self.inner, len) + } +} - for i in 0..len { - unsafe { - ptr::write(ptr.offset(i as isize), T::default()); - } - } +fn slice(inner: &Rc, len: usize) -> Slice +where + T: Default, +{ + let ptr: *mut T = allocate(inner, len); - Slice { - ptr, - len, - _inner: self.inner.clone(), + for i in 0..len { + unsafe { + ptr::write(ptr.offset(i as isize), T::default()); } } - fn allocate(&mut self, count: usize) -> *mut T { - let layout = Layout::new::(); - let mask = layout.align() - 1; - let pos = self.inner.pos.get(); + Slice { + ptr, + len, + _inner: inner.clone(), + } +} - debug_assert!(layout.align() >= (pos & mask)); +fn allocate(inner: &Rc, count: usize) -> *mut T { + let layout = Layout::new::(); + let mask = layout.align() - 1; + let pos = inner.pos.get(); - let mut skip = layout.align() - (pos & mask); + debug_assert!(layout.align() >= (pos & mask)); - if skip == layout.align() { - skip = 0; - } + let mut skip = layout.align() - (pos & mask); - let additional = skip + layout.size() * count; + if skip == layout.align() { + skip = 0; + } - assert!(pos + additional <= self.inner.cap, "arena overflow"); + let additional = skip + layout.size() * count; - self.inner.pos.set(pos + additional); + assert!(pos + additional <= inner.cap, "arena overflow"); - let ret = unsafe { self.inner.head.offset((pos + skip) as isize) as *mut T }; + inner.pos.set(pos + additional); - debug_assert!((ret as usize) >= self.inner.head as usize); - debug_assert!((ret as usize) < (self.inner.head as usize + self.inner.cap)); + let ret = unsafe { inner.head.offset((pos + skip) as isize) as *mut T }; - ret - } + debug_assert!((ret as usize) >= inner.head as usize); + debug_assert!((ret as usize) < (inner.head as usize + inner.cap)); + + ret } +#[cfg(unix)] impl Drop for Inner { fn drop(&mut self) { let res = unsafe { libc::munmap(self.head as *mut libc::c_void, self.cap) }; @@ -127,20 +182,50 @@ impl Drop for Inner { } } -impl Slice { - pub fn clone_with(&self, arena: &mut Arena) -> Slice { - let ptr: *mut T = arena.allocate(self.len); +#[cfg(windows)] +impl Drop for Inner { + fn drop(&mut self) { + use winapi::shared::minwindef::LPVOID; + use winapi::um::memoryapi::VirtualFree; + use winapi::um::winnt::MEM_RELEASE; + + let res = unsafe { VirtualFree(self.head as LPVOID, 0, MEM_RELEASE) }; + + // TODO: Do something on error + debug_assert_ne!(res, 0); + } +} + +impl Slice { + pub fn iter(&self) -> Iter { + unsafe { + // no ZST support + let ptr = self.ptr; + let end = self.ptr.add(self.len); + + Iter { + ptr, + end, + _marker: marker::PhantomData, + } + } + } +} + +impl Clone for Slice { + fn clone(&self) -> Self { + let ptr: *mut T = allocate(&self._inner, self.len); for i in 0..self.len { unsafe { - ptr::write(ptr.offset(i as isize), self[i].clone()); + ptr::write(ptr.offset(i as isize), (*self.ptr.offset(i as isize)).clone()); } } Slice { ptr, len: self.len, - _inner: arena.inner.clone(), + _inner: self._inner.clone(), } } } @@ -181,11 +266,41 @@ impl PartialOrd for Slice { impl Drop for Slice { fn drop(&mut self) { - for i in 0..self.len { + unsafe { + ptr::drop_in_place(&mut self[..]); + } + } +} + +#[cfg(feature = "checkpoint")] +impl Serialize for Slice +where + T: Serialize, +{ + #[inline] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.collect_seq(self.iter()) + } +} + +impl<'a, T> Iterator for Iter<'a, T> { + type Item = &'a T; + + #[inline] + fn next(&mut self) -> Option<&'a T> { + if self.ptr == self.end { + None + } else { unsafe { - ptr::read(self.ptr.offset(i as isize) as *const _); + // we do not ZSTs right now, the stdlib does some dancing for this + // which we can safely avoid for now + let old = self.ptr; + self.ptr = self.ptr.offset(1); + Some(&*old) } } } } -*/ diff --git a/src/rt/atomic.rs b/src/rt/atomic.rs index 7adae1b3..8da6848e 100644 --- a/src/rt/atomic.rs +++ b/src/rt/atomic.rs @@ -1,3 +1,4 @@ +use rt::arena::Arena; use rt::{self, thread, Synchronize, VersionVec}; use std::sync::atomic::Ordering; @@ -23,9 +24,9 @@ struct Store { struct FirstSeen(Vec>); impl History { - pub fn init(&mut self, threads: &mut thread::Set) { + pub fn init(&mut self, arena: &mut Arena, threads: &mut thread::Set) { self.stores.push(Store { - sync: Synchronize::new(threads.max()), + sync: Synchronize::new(arena, threads.max()), first_seen: FirstSeen::new(threads), seq_cst: false, }); @@ -51,9 +52,9 @@ impl History { index } - pub fn store(&mut self, threads: &mut thread::Set, order: Ordering) { + pub fn store(&mut self, arena: &mut Arena, threads: &mut thread::Set, order: Ordering) { let mut store = Store { - sync: Synchronize::new(threads.max()), + sync: Synchronize::new(arena, threads.max()), first_seen: FirstSeen::new(threads), seq_cst: is_seq_cst(order), }; diff --git a/src/rt/execution.rs b/src/rt/execution.rs index 78a12c69..a0661768 100644 --- a/src/rt/execution.rs +++ b/src/rt/execution.rs @@ -34,17 +34,18 @@ impl Execution { /// This is only called at the start of a fuzz run. The same instance is /// reused across permutations. pub fn new(max_threads: usize, max_memory: usize, max_branches: usize) -> Execution { + let mut arena = Arena::with_capacity(max_memory); let mut threads = thread::Set::new(max_threads); // Create the root thread - threads.new_thread(); + threads.new_thread(&mut arena); Execution { // id: Id::new(), path: Path::new(max_branches), threads, objects: object::Set::new(), - arena: Arena::with_capacity(max_memory), + arena, max_threads, max_history: 7, log: false, @@ -53,7 +54,7 @@ impl Execution { /// Create state to track a new thread pub fn new_thread(&mut self) -> thread::Id { - let thread_id = self.threads.new_thread(); + let thread_id = self.threads.new_thread(&mut self.arena); let (active, new) = self.threads.active2_mut(thread_id); @@ -93,15 +94,14 @@ impl Execution { let mut threads = self.threads; objects.clear(); - + threads.clear(); arena.clear(); if !path.step() { return None; } - threads.clear(); - threads.new_thread(); + threads.init(&mut arena); Some(Execution { path, diff --git a/src/rt/object.rs b/src/rt/object.rs index 6e8539eb..38b13ac1 100644 --- a/src/rt/object.rs +++ b/src/rt/object.rs @@ -178,7 +178,7 @@ impl Id { pub fn atomic_init(self, execution: &mut Execution) { execution.objects[self].atomic_mut() - .history.init(&mut execution.threads); + .history.init(&mut execution.arena, &mut execution.threads); } pub fn atomic_load(self, order: Ordering) -> usize { @@ -205,7 +205,7 @@ impl Id { execution.objects[self] .atomic_mut() .history - .store(&mut execution.threads, order) + .store(&mut execution.arena, &mut execution.threads, order) }) } diff --git a/src/rt/scheduler/fringe.rs b/src/rt/scheduler/fringe.rs index dda7b583..1ebf077d 100644 --- a/src/rt/scheduler/fringe.rs +++ b/src/rt/scheduler/fringe.rs @@ -6,7 +6,7 @@ use fringe::{ generator::Yielder, }; -use std::cell::Cell; +use std::cell::{Cell, RefCell}; use std::collections::VecDeque; use std::fmt; use std::ptr; @@ -27,8 +27,8 @@ struct State<'a> { queued_spawn: &'a mut VecDeque>, } -scoped_mut_thread_local! { - static STATE: State +scoped_thread_local! { + static STATE: RefCell } thread_local!(static YIELDER: Cell<*const Yielder>, ()>> = Cell::new(ptr::null())); @@ -52,7 +52,9 @@ impl Scheduler { where F: FnOnce(&mut Execution) -> R, { - STATE.with(|state| f(state.execution)) + use std::ops::DerefMut; + + STATE.with(|state| f(state.borrow_mut().deref_mut().execution)) } /// Perform a context switch @@ -61,8 +63,10 @@ impl Scheduler { } pub fn spawn(f: Box) { + use std::ops::DerefMut; + STATE.with(|state| { - state.queued_spawn.push_back(f); + state.borrow_mut().deref_mut().queued_spawn.push_back(f); }); } @@ -96,14 +100,14 @@ impl Scheduler { } fn tick(&mut self, thread: thread::Id, execution: &mut Execution) { - let mut state = State { + let state = State { execution: execution, queued_spawn: &mut self.queued_spawn, }; let threads = &mut self.threads; - STATE.set(unsafe { transmute_lt(&mut state) }, || { + STATE.set(&mut RefCell::new(unsafe { transmute_lt(state) }), || { threads[thread.as_usize()].resume(None); }); } @@ -160,6 +164,6 @@ fn spawn_threads(n: usize) -> Vec { }).collect() } -unsafe fn transmute_lt<'a, 'b>(state: &'a mut State<'b>) -> &'a mut State<'static> { +unsafe fn transmute_lt<'b>(state: State<'b>) -> State<'static> { ::std::mem::transmute(state) } diff --git a/src/rt/synchronize.rs b/src/rt/synchronize.rs index b34939dd..fed1ea78 100644 --- a/src/rt/synchronize.rs +++ b/src/rt/synchronize.rs @@ -1,3 +1,4 @@ +use rt::arena::Arena; use rt::{thread, VersionVec}; use std::sync::atomic::Ordering::{self, *}; @@ -8,9 +9,8 @@ pub(crate) struct Synchronize { } impl Synchronize { - pub fn new(max_threads: usize) -> Self { - let happens_before = - VersionVec::new(max_threads); + pub fn new(arena: &mut Arena, max_threads: usize) -> Self { + let happens_before = VersionVec::new(arena, max_threads); Synchronize { happens_before, diff --git a/src/rt/thread.rs b/src/rt/thread.rs index 6a6af2d2..dd873c82 100644 --- a/src/rt/thread.rs +++ b/src/rt/thread.rs @@ -1,3 +1,4 @@ +use rt::arena::Arena; use rt::object::Operation; use rt::vv::VersionVec; @@ -60,14 +61,14 @@ pub enum State { } impl Thread { - fn new(id: Id, max_threads: usize) -> Thread { + fn new(arena: &mut Arena, id: Id, max_threads: usize) -> Thread { Thread { id, state: State::Runnable, critical: false, operation: None, - causality: VersionVec::new(max_threads), - dpor_vv: VersionVec::new(max_threads), + causality: VersionVec::new(arena, max_threads), + dpor_vv: VersionVec::new(arena, max_threads), notified: false, last_yield: None, } @@ -127,12 +128,12 @@ impl Set { Set { threads: Vec::with_capacity(max_threads), active: None, - seq_cst_causality: VersionVec::new(max_threads), + seq_cst_causality: VersionVec::new_perm(max_threads), } } /// Create a new thread - pub fn new_thread(&mut self) -> Id { + pub fn new_thread(&mut self, arena: &mut Arena) -> Id { assert!(self.threads.len() < self.max()); // Get the identifier for the thread about to be created @@ -141,7 +142,7 @@ impl Set { // Push the thread onto the stack self.threads.push( - Thread::new(Id::from_usize(id), max_threads)); + Thread::new(arena, Id::from_usize(id), max_threads)); if self.active.is_none() { self.active = Some(id); @@ -211,7 +212,11 @@ impl Set { pub fn clear(&mut self) { self.threads.clear(); self.active = None; - self.seq_cst_causality = VersionVec::new(self.max()); + self.seq_cst_causality.clear(); + } + + pub fn init(&mut self, arena: &mut Arena) { + self.new_thread(arena); } pub fn iter<'a>(&'a self) -> impl Iterator + 'a { diff --git a/src/rt/vv.rs b/src/rt/vv.rs index d1d13e0d..a4f9477a 100644 --- a/src/rt/vv.rs +++ b/src/rt/vv.rs @@ -1,25 +1,70 @@ +use rt::arena::{Arena, Slice}; use rt::thread; use std::cmp; use std::ops; -#[derive(Debug, Clone, PartialOrd, Eq, PartialEq)] -#[cfg_attr(feature = "checkpoint", derive(Serialize, Deserialize))] -pub struct VersionVec { - versions: Box<[usize]>, +#[cfg(feature = "checkpoint")] +use serde::{Deserialize, Deserializer}; + +#[derive(Clone, Debug, PartialOrd, Eq, PartialEq)] +#[cfg_attr(feature = "checkpoint", derive(Serialize))] +pub enum VersionVec { + Arena(Slice), + Perm(Box<[usize]>), +} + +#[cfg(feature = "checkpoint")] +impl<'de> Deserialize<'de> for VersionVec { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Deserialize::deserialize(deserializer).map(VersionVec::Perm) + + } } impl VersionVec { - pub fn new(max_threads: usize) -> VersionVec { + pub fn new(arena: &mut Arena, max_threads: usize) -> VersionVec { + assert!(max_threads > 0, "max_threads = {:?}", max_threads); + + VersionVec::Arena(arena.slice(max_threads)) + } + + pub fn new_perm(max_threads: usize) -> VersionVec { assert!(max_threads > 0, "max_threads = {:?}", max_threads); - VersionVec { - versions: vec![0; max_threads].into_boxed_slice(), + VersionVec::Perm(vec![0; max_threads].into_boxed_slice()) + } + + pub fn clear(&mut self) { + match self { + VersionVec::Arena(_) => panic!("clearing arena allocated VersionVec"), + VersionVec::Perm(ref mut v) => { + for r in v.iter_mut() { + *r = 0; + } + }, + } + } + + pub fn as_slice(&self) -> &[usize] { + match self { + VersionVec::Arena(v) => &v, + VersionVec::Perm(v) => &v, + } + } + + pub fn as_slice_mut(&mut self) -> &mut [usize] { + match self { + VersionVec::Arena(ref mut v) => v, + VersionVec::Perm(ref mut v) => v, } } pub fn versions<'a>(&'a self) -> impl Iterator + 'a { - self.versions.iter() + self.as_slice().iter() .enumerate() .map(|(thread_id, &version)| { (thread::Id::from_usize(thread_id), version) @@ -27,18 +72,21 @@ impl VersionVec { } pub fn len(&self) -> usize { - self.versions.len() + self.as_slice().len() } pub fn inc(&mut self, id: thread::Id) { - self.versions[id.as_usize()] += 1; + match self { + VersionVec::Arena(v) => v[id.as_usize()] += 1, + VersionVec::Perm(v) => v[id.as_usize()] += 1, + } } pub fn join(&mut self, other: &VersionVec) { - assert_eq!(self.versions.len(), other.versions.len()); + assert_eq!(self.len(), other.len()); - for (i, &version) in other.versions.iter().enumerate() { - self.versions[i] = cmp::max(self.versions[i], version); + for (i, &version) in other.as_slice().iter().enumerate() { + self.as_slice_mut()[i] = cmp::max(self.as_slice()[i], version); } } } @@ -47,12 +95,12 @@ impl ops::Index for VersionVec { type Output = usize; fn index(&self, index: thread::Id) -> &usize { - self.versions.index(index.as_usize()) + self.as_slice().index(index.as_usize()) } } impl ops::IndexMut for VersionVec { fn index_mut(&mut self, index: thread::Id) -> &mut usize { - self.versions.index_mut(index.as_usize()) + self.as_slice_mut().index_mut(index.as_usize()) } }