From 33ccac47cf59702c78f922ece8aed98db81f2546 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20F=C3=A4rnstrand?= Date: Sat, 1 Dec 2018 16:43:06 +0100 Subject: [PATCH 1/9] Use FIXME instead of TODO --- core/src/parking_lot.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/parking_lot.rs b/core/src/parking_lot.rs index 4c34df88..35c245da 100644 --- a/core/src/parking_lot.rs +++ b/core/src/parking_lot.rs @@ -136,7 +136,7 @@ struct ThreadData { parked_with_timeout: Cell, // Extra data for deadlock detection - // TODO: once supported in stable replace with #[cfg...] & remove dummy struct/impl + // FIXME: once supported in stable replace with #[cfg...] & remove dummy struct/impl #[allow(dead_code)] deadlock_data: deadlock::DeadlockData, } From 8bcfae3d1c8b8fff72e485b02e557f8a1e1cc7be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20F=C3=A4rnstrand?= Date: Sat, 1 Dec 2018 16:50:05 +0100 Subject: [PATCH 2/9] Create ThreadParker::IS_CHEAP_TO_CONSTRUCT Use instead of conditional compile directly in word_lock.rs --- core/src/thread_parker/generic.rs | 2 ++ core/src/thread_parker/linux.rs | 2 ++ core/src/thread_parker/unix.rs | 2 ++ core/src/thread_parker/windows/mod.rs | 2 ++ core/src/word_lock.rs | 2 +- 5 files changed, 9 insertions(+), 1 deletion(-) diff --git a/core/src/thread_parker/generic.rs b/core/src/thread_parker/generic.rs index c9205c66..d67efc83 100644 --- a/core/src/thread_parker/generic.rs +++ b/core/src/thread_parker/generic.rs @@ -18,6 +18,8 @@ pub struct ThreadParker { } impl ThreadParker { + pub const IS_CHEAP_TO_CONSTRUCT: bool = true; + pub fn new() -> ThreadParker { ThreadParker { parked: AtomicBool::new(false), diff --git a/core/src/thread_parker/linux.rs b/core/src/thread_parker/linux.rs index 3a495d16..0f01addd 100644 --- a/core/src/thread_parker/linux.rs +++ b/core/src/thread_parker/linux.rs @@ -29,6 +29,8 @@ pub struct ThreadParker { } impl ThreadParker { + pub const IS_CHEAP_TO_CONSTRUCT: bool = true; + pub fn new() -> ThreadParker { ThreadParker { futex: AtomicI32::new(0), diff --git a/core/src/thread_parker/unix.rs b/core/src/thread_parker/unix.rs index 6ec12ed5..76dd0adf 100644 --- a/core/src/thread_parker/unix.rs +++ b/core/src/thread_parker/unix.rs @@ -30,6 +30,8 @@ pub struct ThreadParker { } impl ThreadParker { + pub const IS_CHEAP_TO_CONSTRUCT: bool = false; + pub fn new() -> ThreadParker { ThreadParker { should_park: Cell::new(false), diff --git a/core/src/thread_parker/windows/mod.rs b/core/src/thread_parker/windows/mod.rs index 07e2294c..c7410432 100644 --- a/core/src/thread_parker/windows/mod.rs +++ b/core/src/thread_parker/windows/mod.rs @@ -67,6 +67,8 @@ pub struct ThreadParker { } impl ThreadParker { + pub const IS_CHEAP_TO_CONSTRUCT: bool = true; + pub fn new() -> ThreadParker { // Initialize the backend here to ensure we don't get any panics // later on, which could leave synchronization primitives in a broken diff --git a/core/src/word_lock.rs b/core/src/word_lock.rs index 3ebb57b5..7a6b9608 100644 --- a/core/src/word_lock.rs +++ b/core/src/word_lock.rs @@ -66,7 +66,7 @@ where let mut thread_data_ptr = ptr::null(); // If ThreadData is expensive to construct, then we want to use a cached // version in thread-local storage if possible. - if !cfg!(windows) && !cfg!(all(feature = "nightly", target_os = "linux")) { + if !ThreadParker::IS_CHEAP_TO_CONSTRUCT { thread_local!(static THREAD_DATA: ThreadData = ThreadData::new()); if let Some(tls_thread_data) = try_get_tls(&THREAD_DATA) { thread_data_ptr = tls_thread_data; From f5f6be35fbf3c9066964428448467112e0558b3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20F=C3=A4rnstrand?= Date: Sat, 1 Dec 2018 17:28:32 +0100 Subject: [PATCH 3/9] Move thread_yield into thread_parker module --- core/src/spinwait.rs | 49 ++------------------------- core/src/thread_parker/generic.rs | 9 +++-- core/src/thread_parker/linux.rs | 6 ++++ core/src/thread_parker/unix.rs | 6 ++++ core/src/thread_parker/windows/mod.rs | 28 +++++++++++++++ 5 files changed, 49 insertions(+), 49 deletions(-) diff --git a/core/src/spinwait.rs b/core/src/spinwait.rs index 41850268..8851888b 100644 --- a/core/src/spinwait.rs +++ b/core/src/spinwait.rs @@ -5,53 +5,8 @@ // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. -#[cfg(unix)] -use libc; use std::sync::atomic::spin_loop_hint; -#[cfg(not(any(windows, unix)))] -use std::thread; -#[cfg(windows)] -use winapi; - -// Yields the rest of the current timeslice to the OS -#[cfg(windows)] -#[inline] -fn thread_yield() { - // Note that this is manually defined here rather than using the definition - // through `winapi`. The `winapi` definition comes from the `synchapi` - // header which enables the "synchronization.lib" library. It turns out, - // however that `Sleep` comes from `kernel32.dll` so this activation isn't - // necessary. - // - // This was originally identified in rust-lang/rust where on MinGW the - // libsynchronization.a library pulls in a dependency on a newer DLL not - // present in older versions of Windows. (see rust-lang/rust#49438) - // - // This is a bit of a hack for now and ideally we'd fix MinGW's own import - // libraries, but that'll probably take a lot longer than patching this here - // and avoiding the `synchapi` feature entirely. - extern "system" { - fn Sleep(a: winapi::shared::minwindef::DWORD); - } - unsafe { - // We don't use SwitchToThread here because it doesn't consider all - // threads in the system and the thread we are waiting for may not get - // selected. - Sleep(0); - } -} -#[cfg(unix)] -#[inline] -fn thread_yield() { - unsafe { - libc::sched_yield(); - } -} -#[cfg(not(any(windows, unix)))] -#[inline] -fn thread_yield() { - thread::yield_now(); -} +use thread_parker; // Wastes some CPU time for the given number of iterations, // using a hint to indicate to the CPU that we are spinning. @@ -97,7 +52,7 @@ impl SpinWait { if self.counter <= 3 { cpu_relax(1 << self.counter); } else { - thread_yield(); + thread_parker::thread_yield(); } true } diff --git a/core/src/thread_parker/generic.rs b/core/src/thread_parker/generic.rs index d67efc83..68d56085 100644 --- a/core/src/thread_parker/generic.rs +++ b/core/src/thread_parker/generic.rs @@ -8,8 +8,8 @@ //! A simple spin lock based thread parker. Used on platforms without better //! parking facilities available. -use std::sync::atomic::spin_loop_hint; -use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::atomic::{AtomicBool, Ordering, spin_loop_hint}; +use std::thread; use std::time::Instant; // Helper type for putting a thread to sleep until some other thread wakes it up @@ -78,3 +78,8 @@ impl UnparkHandle { // released to avoid blocking the queue for too long. pub fn unpark(self) {} } + +#[inline] +pub fn thread_yield() { + thread::yield_now(); +} diff --git a/core/src/thread_parker/linux.rs b/core/src/thread_parker/linux.rs index 0f01addd..b4d39b95 100644 --- a/core/src/thread_parker/linux.rs +++ b/core/src/thread_parker/linux.rs @@ -8,6 +8,7 @@ use libc; use std::ptr; use std::sync::atomic::{AtomicI32, Ordering}; +use std::thread; use std::time::Instant; const FUTEX_WAIT: i32 = 0; @@ -139,3 +140,8 @@ impl UnparkHandle { } } } + +#[inline] +pub fn thread_yield() { + thread::yield_now(); +} diff --git a/core/src/thread_parker/unix.rs b/core/src/thread_parker/unix.rs index 76dd0adf..88995f83 100644 --- a/core/src/thread_parker/unix.rs +++ b/core/src/thread_parker/unix.rs @@ -10,6 +10,7 @@ use std::cell::{Cell, UnsafeCell}; use std::mem; #[cfg(any(target_os = "macos", target_os = "ios"))] use std::ptr; +use std::thread; use std::time::{Duration, Instant}; // x32 Linux uses a non-standard type for tv_nsec in timespec. @@ -242,3 +243,8 @@ fn timeout_to_timespec(timeout: Duration) -> Option { tv_sec: sec, }) } + +#[inline] +pub fn thread_yield() { + thread::yield_now(); +} diff --git a/core/src/thread_parker/windows/mod.rs b/core/src/thread_parker/windows/mod.rs index c7410432..0f2ef099 100644 --- a/core/src/thread_parker/windows/mod.rs +++ b/core/src/thread_parker/windows/mod.rs @@ -8,6 +8,7 @@ use std::ptr; use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; use std::time::Instant; +use winapi; mod keyed_event; mod waitaddress; @@ -144,3 +145,30 @@ impl UnparkHandle { } } } + +// Yields the rest of the current timeslice to the OS +#[inline] +pub fn thread_yield() { + // Note that this is manually defined here rather than using the definition + // through `winapi`. The `winapi` definition comes from the `synchapi` + // header which enables the "synchronization.lib" library. It turns out, + // however that `Sleep` comes from `kernel32.dll` so this activation isn't + // necessary. + // + // This was originally identified in rust-lang/rust where on MinGW the + // libsynchronization.a library pulls in a dependency on a newer DLL not + // present in older versions of Windows. (see rust-lang/rust#49438) + // + // This is a bit of a hack for now and ideally we'd fix MinGW's own import + // libraries, but that'll probably take a lot longer than patching this here + // and avoiding the `synchapi` feature entirely. + extern "system" { + fn Sleep(a: winapi::shared::minwindef::DWORD); + } + unsafe { + // We don't use SwitchToThread here because it doesn't consider all + // threads in the system and the thread we are waiting for may not get + // selected. + Sleep(0); + } +} From e3409004b6215d7d64d1aaa6fc124374f73073dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20F=C3=A4rnstrand?= Date: Mon, 3 Dec 2018 17:40:56 +0100 Subject: [PATCH 4/9] Use #[derive(Default)] on SpinWait --- core/src/spinwait.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/core/src/spinwait.rs b/core/src/spinwait.rs index 8851888b..6912d43b 100644 --- a/core/src/spinwait.rs +++ b/core/src/spinwait.rs @@ -18,6 +18,7 @@ fn cpu_relax(iterations: u32) { } /// A counter used to perform exponential backoff in spin loops. +#[derive(Default)] pub struct SpinWait { counter: u32, } @@ -25,8 +26,8 @@ pub struct SpinWait { impl SpinWait { /// Creates a new `SpinWait`. #[inline] - pub fn new() -> SpinWait { - SpinWait { counter: 0 } + pub fn new() -> Self { + Self::default() } /// Resets a `SpinWait` to its initial state. @@ -71,10 +72,3 @@ impl SpinWait { cpu_relax(1 << self.counter); } } - -impl Default for SpinWait { - #[inline] - fn default() -> SpinWait { - SpinWait::new() - } -} From 0aec0c9030eade1f45fa512d06e8e84f93951e5e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20F=C3=A4rnstrand?= Date: Mon, 3 Dec 2018 18:26:13 +0100 Subject: [PATCH 5/9] Port with_thread_data from word_lock into parking_lot --- core/src/parking_lot.rs | 212 +++++++++++++++++++++------------------- 1 file changed, 110 insertions(+), 102 deletions(-) diff --git a/core/src/parking_lot.rs b/core/src/parking_lot.rs index 35c245da..4b524b80 100644 --- a/core/src/parking_lot.rs +++ b/core/src/parking_lot.rs @@ -162,8 +162,11 @@ impl ThreadData { } } -// Returns a ThreadData structure for the current thread -unsafe fn get_thread_data(local: &mut Option) -> &ThreadData { +// Invokes the given closure with a reference to the current thread `ThreadData`. +fn with_thread_data(f: F) -> T +where + F: FnOnce(&ThreadData) -> T, +{ // Try to read from thread-local storage, but return None if the TLS has // already been destroyed. #[cfg(has_localkey_try_with)] @@ -177,14 +180,19 @@ unsafe fn get_thread_data(local: &mut Option) -> &ThreadData { // Unlike word_lock::ThreadData, parking_lot::ThreadData is always expensive // to construct. Try to use a thread-local version if possible. + let mut thread_data_ptr = ptr::null(); thread_local!(static THREAD_DATA: ThreadData = ThreadData::new()); - if let Some(tls) = try_get_tls(&THREAD_DATA) { - return &*tls; + if let Some(tls_thread_data) = try_get_tls(&THREAD_DATA) { + thread_data_ptr = tls_thread_data; } // Otherwise just create a ThreadData on the stack - *local = Some(ThreadData::new()); - local.as_ref().unwrap() + let mut thread_data_storage = None; + if thread_data_ptr.is_null() { + thread_data_ptr = thread_data_storage.get_or_insert_with(ThreadData::new); + } + + f(unsafe { &*thread_data_ptr }) } impl Drop for ThreadData { @@ -579,106 +587,106 @@ unsafe fn park_internal( timeout: Option, ) -> ParkResult { // Grab our thread data, this also ensures that the hash table exists - let mut thread_data = None; - let thread_data = get_thread_data(&mut thread_data); + with_thread_data(|thread_data| { - // Lock the bucket for the given key - let bucket = lock_bucket(key); + // Lock the bucket for the given key + let bucket = lock_bucket(key); - // If the validation function fails, just return - if !validate() { + // If the validation function fails, just return + if !validate() { + bucket.mutex.unlock(); + return ParkResult::Invalid; + } + + // Append our thread data to the queue and unlock the bucket + thread_data.parked_with_timeout.set(timeout.is_some()); + thread_data.next_in_queue.set(ptr::null()); + thread_data.key.store(key, Ordering::Relaxed); + thread_data.park_token.set(park_token); + thread_data.parker.prepare_park(); + if !bucket.queue_head.get().is_null() { + (*bucket.queue_tail.get()).next_in_queue.set(thread_data); + } else { + bucket.queue_head.set(thread_data); + } + bucket.queue_tail.set(thread_data); bucket.mutex.unlock(); - return ParkResult::Invalid; - } - // Append our thread data to the queue and unlock the bucket - thread_data.parked_with_timeout.set(timeout.is_some()); - thread_data.next_in_queue.set(ptr::null()); - thread_data.key.store(key, Ordering::Relaxed); - thread_data.park_token.set(park_token); - thread_data.parker.prepare_park(); - if !bucket.queue_head.get().is_null() { - (*bucket.queue_tail.get()).next_in_queue.set(thread_data); - } else { - bucket.queue_head.set(thread_data); - } - bucket.queue_tail.set(thread_data); - bucket.mutex.unlock(); + // Invoke the pre-sleep callback + before_sleep(); + + // Park our thread and determine whether we were woken up by an unpark or by + // our timeout. Note that this isn't precise: we can still be unparked since + // we are still in the queue. + let unparked = match timeout { + Some(timeout) => thread_data.parker.park_until(timeout), + None => { + thread_data.parker.park(); + // call deadlock detection on_unpark hook + deadlock::on_unpark(thread_data); + true + } + }; - // Invoke the pre-sleep callback - before_sleep(); - - // Park our thread and determine whether we were woken up by an unpark or by - // our timeout. Note that this isn't precise: we can still be unparked since - // we are still in the queue. - let unparked = match timeout { - Some(timeout) => thread_data.parker.park_until(timeout), - None => { - thread_data.parker.park(); - // call deadlock detection on_unpark hook - deadlock::on_unpark(thread_data); - true + // If we were unparked, return now + if unparked { + return ParkResult::Unparked(thread_data.unpark_token.get()); } - }; - // If we were unparked, return now - if unparked { - return ParkResult::Unparked(thread_data.unpark_token.get()); - } - - // Lock our bucket again. Note that the hashtable may have been rehashed in - // the meantime. Our key may also have changed if we were requeued. - let (key, bucket) = lock_bucket_checked(&thread_data.key); + // Lock our bucket again. Note that the hashtable may have been rehashed in + // the meantime. Our key may also have changed if we were requeued. + let (key, bucket) = lock_bucket_checked(&thread_data.key); - // Now we need to check again if we were unparked or timed out. Unlike the - // last check this is precise because we hold the bucket lock. - if !thread_data.parker.timed_out() { - bucket.mutex.unlock(); - return ParkResult::Unparked(thread_data.unpark_token.get()); - } + // Now we need to check again if we were unparked or timed out. Unlike the + // last check this is precise because we hold the bucket lock. + if !thread_data.parker.timed_out() { + bucket.mutex.unlock(); + return ParkResult::Unparked(thread_data.unpark_token.get()); + } - // We timed out, so we now need to remove our thread from the queue - let mut link = &bucket.queue_head; - let mut current = bucket.queue_head.get(); - let mut previous = ptr::null(); - while !current.is_null() { - if current == thread_data { - let next = (*current).next_in_queue.get(); - link.set(next); - let mut was_last_thread = true; - if bucket.queue_tail.get() == current { - bucket.queue_tail.set(previous); - } else { - // Scan the rest of the queue to see if there are any other - // entries with the given key. - let mut scan = next; - while !scan.is_null() { - if (*scan).key.load(Ordering::Relaxed) == key { - was_last_thread = false; - break; + // We timed out, so we now need to remove our thread from the queue + let mut link = &bucket.queue_head; + let mut current = bucket.queue_head.get(); + let mut previous = ptr::null(); + while !current.is_null() { + if current == thread_data { + let next = (*current).next_in_queue.get(); + link.set(next); + let mut was_last_thread = true; + if bucket.queue_tail.get() == current { + bucket.queue_tail.set(previous); + } else { + // Scan the rest of the queue to see if there are any other + // entries with the given key. + let mut scan = next; + while !scan.is_null() { + if (*scan).key.load(Ordering::Relaxed) == key { + was_last_thread = false; + break; + } + scan = (*scan).next_in_queue.get(); } - scan = (*scan).next_in_queue.get(); } - } - // Callback to indicate that we timed out, and whether we were the - // last thread on the queue. - timed_out(key, was_last_thread); - break; - } else { - link = &(*current).next_in_queue; - previous = current; - current = link.get(); + // Callback to indicate that we timed out, and whether we were the + // last thread on the queue. + timed_out(key, was_last_thread); + break; + } else { + link = &(*current).next_in_queue; + previous = current; + current = link.get(); + } } - } - // There should be no way for our thread to have been removed from the queue - // if we timed out. - debug_assert!(!current.is_null()); + // There should be no way for our thread to have been removed from the queue + // if we timed out. + debug_assert!(!current.is_null()); - // Unlock the bucket, we are done - bucket.mutex.unlock(); - ParkResult::TimedOut + // Unlock the bucket, we are done + bucket.mutex.unlock(); + ParkResult::TimedOut + }) } /// Unparks one thread from the queue associated with the given key. @@ -1149,7 +1157,7 @@ pub mod deadlock { #[cfg(feature = "deadlock_detection")] mod deadlock_impl { - use super::{get_hashtable, get_thread_data, lock_bucket, ThreadData, NUM_THREADS}; + use super::{get_hashtable, with_thread_data, lock_bucket, ThreadData, NUM_THREADS}; use backtrace::Backtrace; use petgraph; use petgraph::graphmap::DiGraphMap; @@ -1222,19 +1230,19 @@ mod deadlock_impl { } pub unsafe fn acquire_resource(key: usize) { - let mut thread_data = None; - let thread_data = get_thread_data(&mut thread_data); - (*thread_data.deadlock_data.resources.get()).push(key); + with_thread_data(|thread_data| { + (*thread_data.deadlock_data.resources.get()).push(key); + }); } pub unsafe fn release_resource(key: usize) { - let mut thread_data = None; - let thread_data = get_thread_data(&mut thread_data); - let resources = &mut (*thread_data.deadlock_data.resources.get()); - match resources.iter().rposition(|x| *x == key) { - Some(p) => resources.swap_remove(p), - None => panic!("key {} not found in thread resources", key), - }; + with_thread_data(|thread_data| { + let resources = &mut (*thread_data.deadlock_data.resources.get()); + match resources.iter().rposition(|x| *x == key) { + Some(p) => resources.swap_remove(p), + None => panic!("key {} not found in thread resources", key), + }; + }); } pub fn check_deadlock() -> Vec> { From c84b8de62f93dfda6a614e4008f488c31b60674f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20F=C3=A4rnstrand?= Date: Tue, 4 Dec 2018 01:10:09 +0100 Subject: [PATCH 6/9] Add documentation on when Condvar::wait_for can panic --- src/condvar.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/condvar.rs b/src/condvar.rs index 0e22beb1..19ca5868 100644 --- a/src/condvar.rs +++ b/src/condvar.rs @@ -387,12 +387,17 @@ impl Condvar { /// /// Like `wait`, the lock specified will be re-acquired when this function /// returns, regardless of whether the timeout elapsed or not. + /// + /// # Panics + /// + /// Panics if the given `timeout` is so large that it can't be added to the current time. #[inline] pub fn wait_for( &self, guard: &mut MutexGuard, timeout: Duration, ) -> WaitTimeoutResult { + // FIXME: Change to Intstant::now().checked_add(timeout) when stable. self.wait_until(guard, Instant::now() + timeout) } } From a1e7769943b9fb029c0652fb57cbc7f490c74e23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20F=C3=A4rnstrand?= Date: Fri, 7 Dec 2018 13:29:06 +0100 Subject: [PATCH 7/9] Add FIXME note about checked_add of Instant + Duration --- src/condvar.rs | 2 +- src/raw_mutex.rs | 1 + src/raw_rwlock.rs | 5 +++++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/condvar.rs b/src/condvar.rs index 19ca5868..eb757d52 100644 --- a/src/condvar.rs +++ b/src/condvar.rs @@ -397,7 +397,7 @@ impl Condvar { guard: &mut MutexGuard, timeout: Duration, ) -> WaitTimeoutResult { - // FIXME: Change to Intstant::now().checked_add(timeout) when stable. + // FIXME: Change to Instant::now().checked_add(timeout) when stable. self.wait_until(guard, Instant::now() + timeout) } } diff --git a/src/raw_mutex.rs b/src/raw_mutex.rs index 27ce64b9..36c2b93f 100644 --- a/src/raw_mutex.rs +++ b/src/raw_mutex.rs @@ -144,6 +144,7 @@ unsafe impl RawMutexTimed for RawMutex { { true } else { + // FIXME: Change to Intstant::now().checked_add(timeout) when stable. self.lock_slow(Some(Instant::now() + timeout)) }; if result { diff --git a/src/raw_rwlock.rs b/src/raw_rwlock.rs index 96424d08..34b6148a 100644 --- a/src/raw_rwlock.rs +++ b/src/raw_rwlock.rs @@ -228,6 +228,7 @@ unsafe impl RawRwLockTimed for RawRwLock { let result = if self.try_lock_shared_fast(false) { true } else { + // FIXME: Change to Instant::now().checked_add(timeout) when stable. self.lock_shared_slow(false, Some(Instant::now() + timeout)) }; if result { @@ -258,6 +259,7 @@ unsafe impl RawRwLockTimed for RawRwLock { { true } else { + // FIXME: Change to Instant::now().checked_add(timeout) when stable. self.lock_exclusive_slow(Some(Instant::now() + timeout)) }; if result { @@ -314,6 +316,7 @@ unsafe impl RawRwLockRecursiveTimed for RawRwLock { let result = if self.try_lock_shared_fast(true) { true } else { + // FIXME: Change to Instant::now().checked_add(timeout) when stable. self.lock_shared_slow(true, Some(Instant::now() + timeout)) }; if result { @@ -472,6 +475,7 @@ unsafe impl RawRwLockUpgradeTimed for RawRwLock { let result = if self.try_lock_upgradable_fast() { true } else { + // FIXME: Change to Instant::now().checked_add(timeout) when stable. self.lock_upgradable_slow(Some(Instant::now() + timeout)) }; if result { @@ -510,6 +514,7 @@ unsafe impl RawRwLockUpgradeTimed for RawRwLock { { true } else { + // FIXME: Change to Instant::now().checked_add(timeout) when stable. self.upgrade_slow(Some(Instant::now() + timeout)) } } From 73504893aecb0b906da36b33ed49c430e750f518 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20F=C3=A4rnstrand?= Date: Sat, 8 Dec 2018 23:46:02 +0100 Subject: [PATCH 8/9] Format codebase with latest rustfmt --- core/src/parking_lot.rs | 3 +-- core/src/thread_parker/generic.rs | 2 +- core/src/thread_parker/unix.rs | 12 ++-------- lock_api/src/mutex.rs | 6 +++-- lock_api/src/remutex.rs | 28 +++++++++++++++------- lock_api/src/rwlock.rs | 21 ++++++++++------ src/elision.rs | 5 +--- src/mutex.rs | 3 ++- src/raw_rwlock.rs | 40 +++++++++++++++++++------------ src/remutex.rs | 3 ++- src/rwlock.rs | 15 ++++++++---- 11 files changed, 82 insertions(+), 56 deletions(-) diff --git a/core/src/parking_lot.rs b/core/src/parking_lot.rs index 4b524b80..69904cd1 100644 --- a/core/src/parking_lot.rs +++ b/core/src/parking_lot.rs @@ -588,7 +588,6 @@ unsafe fn park_internal( ) -> ParkResult { // Grab our thread data, this also ensures that the hash table exists with_thread_data(|thread_data| { - // Lock the bucket for the given key let bucket = lock_bucket(key); @@ -1157,7 +1156,7 @@ pub mod deadlock { #[cfg(feature = "deadlock_detection")] mod deadlock_impl { - use super::{get_hashtable, with_thread_data, lock_bucket, ThreadData, NUM_THREADS}; + use super::{get_hashtable, lock_bucket, with_thread_data, ThreadData, NUM_THREADS}; use backtrace::Backtrace; use petgraph; use petgraph::graphmap::DiGraphMap; diff --git a/core/src/thread_parker/generic.rs b/core/src/thread_parker/generic.rs index 68d56085..45fd1e2b 100644 --- a/core/src/thread_parker/generic.rs +++ b/core/src/thread_parker/generic.rs @@ -8,7 +8,7 @@ //! A simple spin lock based thread parker. Used on platforms without better //! parking facilities available. -use std::sync::atomic::{AtomicBool, Ordering, spin_loop_hint}; +use std::sync::atomic::{spin_loop_hint, AtomicBool, Ordering}; use std::thread; use std::time::Instant; diff --git a/core/src/thread_parker/unix.rs b/core/src/thread_parker/unix.rs index 88995f83..57d4e612 100644 --- a/core/src/thread_parker/unix.rs +++ b/core/src/thread_parker/unix.rs @@ -43,17 +43,9 @@ impl ThreadParker { } // Initializes the condvar to use CLOCK_MONOTONIC instead of CLOCK_REALTIME. - #[cfg(any( - target_os = "macos", - target_os = "ios", - target_os = "android" - ))] + #[cfg(any(target_os = "macos", target_os = "ios", target_os = "android"))] unsafe fn init(&self) {} - #[cfg(not(any( - target_os = "macos", - target_os = "ios", - target_os = "android" - )))] + #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "android")))] unsafe fn init(&self) { let mut attr: libc::pthread_condattr_t = mem::uninitialized(); let r = libc::pthread_condattr_init(&mut attr); diff --git a/lock_api/src/mutex.rs b/lock_api/src/mutex.rs index b9cf49bb..435cf2be 100644 --- a/lock_api/src/mutex.rs +++ b/lock_api/src/mutex.rs @@ -447,10 +447,12 @@ pub struct MappedMutexGuard<'a, R: RawMutex + 'a, T: ?Sized + 'a> { unsafe impl<'a, R: RawMutex + Sync + 'a, T: ?Sized + Sync + 'a> Sync for MappedMutexGuard<'a, R, T> -{} +{ +} unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Send for MappedMutexGuard<'a, R, T> where R::GuardMarker: Send -{} +{ +} impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> { /// Makes a new `MappedMutexGuard` for a component of the locked data. diff --git a/lock_api/src/remutex.rs b/lock_api/src/remutex.rs index 3db9f516..bf37ed85 100644 --- a/lock_api/src/remutex.rs +++ b/lock_api/src/remutex.rs @@ -142,10 +142,12 @@ pub struct ReentrantMutex { unsafe impl Send for ReentrantMutex -{} +{ +} unsafe impl Sync for ReentrantMutex -{} +{ +} impl ReentrantMutex { /// Creates a new reentrant mutex in an unlocked state ready for use. @@ -354,7 +356,8 @@ pub struct ReentrantMutexGuard<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Si unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync for ReentrantMutexGuard<'a, R, G, T> -{} +{ +} impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, R, G, T> { /// Returns a reference to the original `ReentrantMutex` object. @@ -395,7 +398,10 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGu /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] - pub fn try_map(s: Self, f: F) -> Result, Self> + pub fn try_map( + s: Self, + f: F, + ) -> Result, Self> where F: FnOnce(&mut T) -> Option<&mut U>, { @@ -497,7 +503,8 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress for ReentrantMutexGuard<'a, R, G, T> -{} +{ +} /// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a /// subfield of the protected data. @@ -515,7 +522,8 @@ pub struct MappedReentrantMutexGuard<'a, R: RawMutex + 'a, G: GetThreadId + 'a, unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync for MappedReentrantMutexGuard<'a, R, G, T> -{} +{ +} impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> MappedReentrantMutexGuard<'a, R, G, T> @@ -553,7 +561,10 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> /// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] - pub fn try_map(s: Self, f: F) -> Result, Self> + pub fn try_map( + s: Self, + f: F, + ) -> Result, Self> where F: FnOnce(&T) -> Option<&U>, { @@ -615,4 +626,5 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress for MappedReentrantMutexGuard<'a, R, G, T> -{} +{ +} diff --git a/lock_api/src/rwlock.rs b/lock_api/src/rwlock.rs index 4edb9179..adbd69ae 100644 --- a/lock_api/src/rwlock.rs +++ b/lock_api/src/rwlock.rs @@ -1020,7 +1020,8 @@ pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + ' unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + Sync + 'a> Sync for RwLockUpgradableReadGuard<'a, R, T> -{} +{ +} impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> { /// Returns a reference to the original reader-writer lock object. @@ -1199,7 +1200,8 @@ impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableRead #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> StableAddress for RwLockUpgradableReadGuard<'a, R, T> -{} +{ +} /// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a /// subfield of the protected data. @@ -1218,7 +1220,8 @@ pub struct MappedRwLockReadGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> { unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockReadGuard<'a, R, T> {} unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Send for MappedRwLockReadGuard<'a, R, T> where R::GuardMarker: Send -{} +{ +} impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> { /// Make a new `MappedRwLockReadGuard` for a component of the locked data. @@ -1310,7 +1313,8 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for MappedRwLockReadGuard<'a, R, T> -{} +{ +} /// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a /// subfield of the protected data. @@ -1328,10 +1332,12 @@ pub struct MappedRwLockWriteGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> { unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockWriteGuard<'a, R, T> -{} +{ +} unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Send for MappedRwLockWriteGuard<'a, R, T> where R::GuardMarker: Send -{} +{ +} impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> { /// Make a new `MappedRwLockWriteGuard` for a component of the locked data. @@ -1450,4 +1456,5 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for MappedRwLockWriteGuard<'a, R, T> -{} +{ +} diff --git a/src/elision.rs b/src/elision.rs index b7a29d9b..b1201ee9 100644 --- a/src/elision.rs +++ b/src/elision.rs @@ -36,10 +36,7 @@ pub fn have_elision() -> bool { // This implementation is never actually called because it is guarded by // have_elision(). -#[cfg(not(all( - feature = "nightly", - any(target_arch = "x86", target_arch = "x86_64") -)))] +#[cfg(not(all(feature = "nightly", any(target_arch = "x86", target_arch = "x86_64"))))] impl AtomicElisionExt for AtomicUsize { type IntType = usize; diff --git a/src/mutex.rs b/src/mutex.rs index d530400e..0e26570a 100644 --- a/src/mutex.rs +++ b/src/mutex.rs @@ -253,7 +253,8 @@ mod tests { } let _u = Unwinder { i: arc2 }; panic!(); - }).join(); + }) + .join(); let lock = arc.lock(); assert_eq!(*lock, 2); } diff --git a/src/raw_rwlock.rs b/src/raw_rwlock.rs index 34b6148a..2c1d3056 100644 --- a/src/raw_rwlock.rs +++ b/src/raw_rwlock.rs @@ -134,7 +134,8 @@ unsafe impl RawRwLockTrait for RawRwLock { state - SHARED_GUARD, Ordering::Release, Ordering::Relaxed, - ).is_ok() + ) + .is_ok() { return; } @@ -168,7 +169,8 @@ unsafe impl RawRwLockFair for RawRwLock { state - SHARED_GUARD, Ordering::Release, Ordering::Relaxed, - ).is_ok() + ) + .is_ok() { return; } @@ -384,7 +386,8 @@ unsafe impl RawRwLockUpgrade for RawRwLock { EXCLUSIVE_GUARD, Ordering::Relaxed, Ordering::Relaxed, - ).is_err() + ) + .is_err() { let result = self.upgrade_slow(None); debug_assert!(result); @@ -399,7 +402,8 @@ unsafe impl RawRwLockUpgrade for RawRwLock { EXCLUSIVE_GUARD, Ordering::Relaxed, Ordering::Relaxed, - ).is_ok() + ) + .is_ok() { true } else { @@ -493,7 +497,8 @@ unsafe impl RawRwLockUpgradeTimed for RawRwLock { EXCLUSIVE_GUARD, Ordering::Relaxed, Ordering::Relaxed, - ).is_ok() + ) + .is_ok() { true } else { @@ -510,7 +515,8 @@ unsafe impl RawRwLockUpgradeTimed for RawRwLock { EXCLUSIVE_GUARD, Ordering::Relaxed, Ordering::Relaxed, - ).is_ok() + ) + .is_ok() { true } else { @@ -793,7 +799,8 @@ impl RawRwLock { new_state, Ordering::Acquire, Ordering::Relaxed, - ).is_ok() + ) + .is_ok() { return true; } @@ -964,13 +971,15 @@ impl RawRwLock { } None => FilterOp::Stop, }, - Some(false) => if token & UPGRADING_BIT != 0 { - additional_guards.set(token & !UPGRADING_BIT); - has_upgraded.set(Some(true)); - FilterOp::Unpark - } else { - FilterOp::Skip - }, + Some(false) => { + if token & UPGRADING_BIT != 0 { + additional_guards.set(token & !UPGRADING_BIT); + has_upgraded.set(Some(true)); + FilterOp::Unpark + } else { + FilterOp::Skip + } + } Some(true) => FilterOp::Stop, } }; @@ -1041,7 +1050,8 @@ impl RawRwLock { new_state, Ordering::Acquire, Ordering::Relaxed, - ).is_ok() + ) + .is_ok() { return true; } diff --git a/src/remutex.rs b/src/remutex.rs index a36a4a62..aa40e144 100644 --- a/src/remutex.rs +++ b/src/remutex.rs @@ -102,7 +102,8 @@ mod tests { thread::spawn(move || { let lock = m2.try_lock(); assert!(lock.is_none()); - }).join() + }) + .join() .unwrap(); let _lock3 = m.try_lock(); } diff --git a/src/rwlock.rs b/src/rwlock.rs index 8ee58322..f9709245 100644 --- a/src/rwlock.rs +++ b/src/rwlock.rs @@ -177,7 +177,8 @@ mod tests { let _: Result<(), _> = thread::spawn(move || { let _lock = arc2.write(); panic!(); - }).join(); + }) + .join(); let lock = arc.read(); assert_eq!(*lock, 1); } @@ -189,7 +190,8 @@ mod tests { let _: Result<(), _> = thread::spawn(move || { let _lock = arc2.write(); panic!(); - }).join(); + }) + .join(); let lock = arc.write(); assert_eq!(*lock, 1); } @@ -201,7 +203,8 @@ mod tests { let _: Result<(), _> = thread::spawn(move || { let _lock = arc2.read(); panic!(); - }).join(); + }) + .join(); let lock = arc.read(); assert_eq!(*lock, 1); } @@ -213,7 +216,8 @@ mod tests { let _: Result<(), _> = thread::spawn(move || { let _lock = arc2.read(); panic!() - }).join(); + }) + .join(); let lock = arc.write(); assert_eq!(*lock, 1); } @@ -328,7 +332,8 @@ mod tests { } let _u = Unwinder { i: arc2 }; panic!(); - }).join(); + }) + .join(); let lock = arc.read(); assert_eq!(*lock, 2); } From 01e0f8d018f016b3571f8ade46171efbd96295b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20F=C3=A4rnstrand?= Date: Sat, 8 Dec 2018 23:51:20 +0100 Subject: [PATCH 9/9] Only store DeadlockData if feature is activated --- core/src/parking_lot.rs | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/core/src/parking_lot.rs b/core/src/parking_lot.rs index 69904cd1..ff663a9b 100644 --- a/core/src/parking_lot.rs +++ b/core/src/parking_lot.rs @@ -136,8 +136,7 @@ struct ThreadData { parked_with_timeout: Cell, // Extra data for deadlock detection - // FIXME: once supported in stable replace with #[cfg...] & remove dummy struct/impl - #[allow(dead_code)] + #[cfg(feature = "deadlock_detection")] deadlock_data: deadlock::DeadlockData, } @@ -157,6 +156,7 @@ impl ThreadData { unpark_token: Cell::new(DEFAULT_UNPARK_TOKEN), park_token: Cell::new(DEFAULT_PARK_TOKEN), parked_with_timeout: Cell::new(false), + #[cfg(feature = "deadlock_detection")] deadlock_data: deadlock::DeadlockData::new(), } } @@ -1109,16 +1109,6 @@ pub mod deadlock { #[cfg(feature = "deadlock_detection")] pub(super) use super::deadlock_impl::DeadlockData; - #[cfg(not(feature = "deadlock_detection"))] - pub(super) struct DeadlockData {} - - #[cfg(not(feature = "deadlock_detection"))] - impl DeadlockData { - pub(super) fn new() -> Self { - DeadlockData {} - } - } - /// Acquire a resource identified by key in the deadlock detector /// Noop if deadlock_detection feature isn't enabled. /// Note: Call after the resource is acquired