Skip to content

Commit

Permalink
feat(miri): activate doc tests
Browse files Browse the repository at this point in the history
  • Loading branch information
wvwwvwwv committed Aug 10, 2024
1 parent e5f2273 commit b30817b
Show file tree
Hide file tree
Showing 12 changed files with 145 additions and 195 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/sdd.yml
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ jobs:
- name: Loom
run: cargo test --features loom --release --lib
- name: Miri
run: MIRIFLAGS="-Zmiri-disable-data-race-detector" cargo +nightly miri test --lib --bins --tests
run: MIRIFLAGS="-Zmiri-disable-data-race-detector" cargo +nightly miri test
benchmark:
runs-on: ubuntu-latest
timeout-minutes: 15
Expand Down
6 changes: 6 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
# Changelog

3.0.0

* Make `Collectible` private since it is unsafe.
* Remove `Guard::defer` which depends on `Collectible`.
* Remove `prepare`.

2.1.0

* Minor performance optimization.
Expand Down
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
name = "sdd"
description = "Scalable lock-free delayed memory reclaimer"
documentation = "https://docs.rs/sdd"
version = "2.1.0"
version = "3.0.0"
authors = ["wvwwvwwv <[email protected]>"]
edition = "2021"
rust-version = "1.65.0"
Expand Down
10 changes: 7 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ Its delayed deallocation algorithm is based on a variant of epoch-based reclamat
This crate can be used _without an `unsafe` block_.

```rust
use sdd::{suspend, AtomicOwned, AtomicShared, Guard, Ptr, Shared, Tag};
use sdd::{suspend, AtomicOwned, AtomicShared, Guard, Owned, Ptr, Shared, Tag};
use std::sync::atomic::Ordering::Relaxed;

// `atomic_shared` holds a strong reference to `17`.
Expand Down Expand Up @@ -51,7 +51,7 @@ ptr.set_tag(Tag::First);
// The ownership of the contained instance is transferred to the return value of CAS.
let prev: Shared<usize> = atomic_shared.compare_exchange(
ptr,
(Some(Shared::new(18)), Tag::Second),
(Some(Shared::new(19)), Tag::Second),
Relaxed,
Relaxed,
&guard).unwrap().0.unwrap();
Expand All @@ -62,7 +62,7 @@ drop(prev);

// `sdd::AtomicShared` can be converted into `sdd::Shared`.
let shared: Shared<usize> = atomic_shared.into_shared(Relaxed).unwrap();
assert_eq!(*shared, 18);
assert_eq!(*shared, 19);

// `18` and `19` will be garbage-collected later.
drop(shared);
Expand All @@ -75,6 +75,10 @@ assert_eq!(*ptr.as_ref().unwrap(), 17);
guard.defer_execute(|| println!("deferred"));
drop(guard);

// `sdd::Owned` and `sdd::Shared` can be nested.
let shared_nested: Shared<Owned<Shared<usize>>> = Shared::new(Owned::new(Shared::new(20)));
assert_eq!(***shared_nested, 20);

// If the thread is expected to lie dormant for a while, call `suspend()` to allow
// others to reclaim the memory.
suspend();
Expand Down
10 changes: 5 additions & 5 deletions examples/src/ebr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ mod examples {
use sdd::{AtomicShared, Guard, Owned, Shared, Tag};
use std::sync::atomic::AtomicIsize;
use std::sync::atomic::Ordering::{Acquire, Relaxed};
use std::thread;
use std::thread::{self, yield_now};

struct R(&'static AtomicIsize);
impl Drop for R {
Expand Down Expand Up @@ -35,8 +35,8 @@ mod examples {
drop(guard);

while DROP_CNT.load(Relaxed) != 1 {
let guard = Guard::new();
drop(guard);
Guard::new().accelerate();
yield_now();
}
assert_eq!(DROP_CNT.load(Relaxed), 1);
}
Expand Down Expand Up @@ -81,8 +81,8 @@ mod examples {
});

while DROP_CNT.load(Relaxed) != 2 {
let guard = Guard::new();
drop(guard);
Guard::new().accelerate();
yield_now();
}
assert_eq!(DROP_CNT.load(Relaxed), 2);
}
Expand Down
33 changes: 1 addition & 32 deletions src/collectible.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,38 +4,7 @@ use std::sync::atomic::{AtomicPtr, AtomicUsize};

/// [`Collectible`] defines the memory layout for the type in order to be passed to the garbage
/// collector.
///
/// # Examples
///
/// ```
/// use sdd::{Collectible, Guard, Link};
/// use std::ptr::NonNull;
///
/// struct LazyString(String, Link);
///
/// impl Collectible for LazyString {
/// fn next_ptr(&self) -> Option<NonNull<dyn Collectible>> {
/// self.1.next_ptr()
/// }
/// fn set_next_ptr(&self, next_ptr: Option<NonNull<dyn Collectible>>) {
/// self.1.set_next_ptr(next_ptr);
/// }
/// }
///
/// let boxed: Box<LazyString> = Box::new(LazyString(String::from("Lazy"), Link::default()));
///
/// let static_ref: &'static LazyString = unsafe { std::mem::transmute(&*boxed) };
/// let guard_for_ref = Guard::new();
///
/// let guard_to_drop = Guard::new();
/// guard_to_drop.defer(boxed);
/// drop(guard_to_drop);
///
/// // The reference is valid as long as a `Guard` that had been created before `boxed` was
/// // passed to a `Guard` survives.
/// assert_eq!(static_ref.0, "Lazy");
/// ```
pub trait Collectible {
pub(super) trait Collectible {
/// Returns the next [`Collectible`] pointer.
fn next_ptr(&self) -> Option<NonNull<dyn Collectible>>;

Expand Down
179 changes: 101 additions & 78 deletions src/collector.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
use super::collectible::{Collectible, Link};
use super::exit_guard::ExitGuard;
use super::maybe_std::fence as maybe_std_fence;
use super::{Collectible, Epoch, Link, Tag};
use std::ptr::{self, NonNull};
use super::{Epoch, Tag};
use std::ptr::{self, addr_of_mut, NonNull};
use std::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
use std::sync::atomic::{AtomicPtr, AtomicU8};

/// [`Collector`] is a garbage collector that reclaims thread-locally unreachable instances
/// when they are globally unreachable.
#[derive(Debug)]
#[derive(Debug, Default)]
#[repr(align(128))]
pub(super) struct Collector {
state: AtomicU8,
Expand Down Expand Up @@ -205,18 +206,9 @@ impl Collector {

/// Allocates a new [`Collector`].
fn alloc() -> *mut Collector {
let boxed = Box::new(Collector {
state: AtomicU8::new(Self::INACTIVE),
announcement: Epoch::default(),
next_epoch_update: Self::CADENCE,
has_garbage: false,
num_readers: 0,
previous_instance_link: Link::default().next_ptr(),
current_instance_link: Link::default().next_ptr(),
next_instance_link: Link::default().next_ptr(),
next_link: AtomicPtr::default(),
link: Link::default(),
});
let boxed = Box::new(Collector::default());
boxed.state.store(Self::INACTIVE, Relaxed);

let ptr = Box::into_raw(boxed);
let mut current = GLOBAL_ROOT.chain_head.load(Relaxed);
loop {
Expand Down Expand Up @@ -297,44 +289,13 @@ impl Collector {

/// Scans the [`Collector`] instances to update the global epoch.
unsafe fn scan(collector_ptr: *mut Collector) -> bool {
debug_assert_eq!((*collector_ptr).state.load(Relaxed) & Self::INACTIVE, 0);
debug_assert_eq!(
(*collector_ptr).state.load(Relaxed),
u8::from((*collector_ptr).announcement)
);
debug_assert_eq!((*collector_ptr).state.load(Relaxed) & Self::INVALID, 0);

// Only one thread that acquires the chain lock is allowed to scan the thread-local
// collectors.
let lock_result = GLOBAL_ROOT
.chain_head
.fetch_update(Acquire, Acquire, |p| {
let tag = Tag::into_tag(p);
if tag == Tag::First || tag == Tag::Both {
None
} else {
Some(Tag::update_tag(p, Tag::First).cast_mut())
}
})
.map(|p| Tag::unset_tag(p).cast_mut());
let lock_result = Self::lock_chain();
if let Ok(mut current_collector_ptr) = lock_result {
let _guard = ExitGuard::new((), |()| {
// Unlock the chain.
loop {
let result = GLOBAL_ROOT.chain_head.fetch_update(Release, Relaxed, |p| {
let tag = Tag::into_tag(p);
debug_assert!(tag == Tag::First || tag == Tag::Both);
let new_tag = if tag == Tag::First {
Tag::None
} else {
Tag::Second
};
Some(Tag::update_tag(p, new_tag).cast_mut())
});
if result.is_ok() {
break;
}
}
});
let _guard = ExitGuard::new((), |()| Self::unlock_chain());

let known_epoch = (*collector_ptr).state.load(Relaxed);
let mut update_global_epoch = true;
Expand Down Expand Up @@ -396,6 +357,80 @@ impl Collector {

false
}

/// Clears the [`Collector`] chain to if all are invalid.
unsafe fn clear_chain() -> bool {
let lock_result = Self::lock_chain();
if let Ok(collector_head) = lock_result {
let _guard = ExitGuard::new((), |()| Self::unlock_chain());

let mut current_collector_ptr = collector_head;
while !current_collector_ptr.is_null() {
if ((*current_collector_ptr).state.load(Relaxed) & Self::INVALID) == 0 {
return false;
}
current_collector_ptr = (*current_collector_ptr).next_link.load(Relaxed);
}

// Reaching here means that there is no `Ptr` that possibly sees any garbage instances
// in those `Collector` instances in the chain.
let result = GLOBAL_ROOT.chain_head.fetch_update(Release, Relaxed, |p| {
if Tag::unset_tag(p) == collector_head {
let tag = Tag::into_tag(p);
debug_assert!(tag == Tag::First || tag == Tag::Both);
Some(Tag::update_tag(ptr::null::<Collector>(), tag).cast_mut())
} else {
None
}
});

if result.is_ok() {
let mut current_collector_ptr = collector_head;
while !current_collector_ptr.is_null() {
let next_collector_ptr = (*current_collector_ptr).next_link.load(Relaxed);
drop(Box::from_raw(current_collector_ptr));
current_collector_ptr = next_collector_ptr;
}
return true;
}
}
false
}

/// Locks the chain.
fn lock_chain() -> Result<*mut Collector, *mut Collector> {
GLOBAL_ROOT
.chain_head
.fetch_update(Acquire, Acquire, |p| {
let tag = Tag::into_tag(p);
if tag == Tag::First || tag == Tag::Both {
None
} else {
Some(Tag::update_tag(p, Tag::First).cast_mut())
}
})
.map(|p| Tag::unset_tag(p).cast_mut())
}

/// Unlocks the chain.
fn unlock_chain() {
loop {
let result = GLOBAL_ROOT.chain_head.fetch_update(Release, Relaxed, |p| {
let tag = Tag::into_tag(p);
debug_assert!(tag == Tag::First || tag == Tag::Both);
let new_tag = if tag == Tag::First {
Tag::None
} else {
// Retain the mark.
Tag::Second
};
Some(Tag::update_tag(p, new_tag).cast_mut())
});
if result.is_ok() {
break;
}
}
}
}

impl Drop for Collector {
Expand Down Expand Up @@ -431,7 +466,7 @@ impl Drop for CollectorAnchor {
#[inline]
fn drop(&mut self) {
unsafe {
try_drop_local_collector();
clear_local_collector();
}
}
}
Expand All @@ -449,35 +484,23 @@ fn mark_scan_enforced() {
});
}

/// Tries to drop the local [`Collector`] if it is the sole survivor.
///
/// # Safety
///
/// The function is safe to call only when the thread is being joined.
unsafe fn try_drop_local_collector() {
let collector_ptr = LOCAL_COLLECTOR.with(|local_collector| local_collector.load(Relaxed));
if collector_ptr.is_null() {
return;
}
let chain_head_ptr = GLOBAL_ROOT.chain_head.load(Relaxed);
if (*collector_ptr).next_link.load(Relaxed).is_null()
&& ptr::eq(collector_ptr, chain_head_ptr)
&& GLOBAL_ROOT
.chain_head
.compare_exchange(chain_head_ptr, ptr::null_mut(), Relaxed, Relaxed)
.is_ok()
{
// If it is the head, and the only `Collector` in the chain, drop it here.
//
// The `Collector` needs to be cleared before being dropped since nested `Collectible`s may
// access the `Collector`, causing trouble with `MIRI`.
Collector::clear_for_drop(collector_ptr);
drop(Box::from_raw(collector_ptr));
return;
}
/// Tries to clear the local [`Collector`] and the chain.
unsafe fn clear_local_collector() {
LOCAL_COLLECTOR.with(|local_collector| {
let collector_ptr = local_collector.load(Relaxed);
if !collector_ptr.is_null() {
(*collector_ptr).state.fetch_or(Collector::INVALID, Release);
}

let mut temp_collector = Collector::default();
local_collector.store(addr_of_mut!(temp_collector), Relaxed);
if !Collector::clear_chain() {
mark_scan_enforced();
}

(*collector_ptr).state.fetch_or(Collector::INVALID, Release);
mark_scan_enforced();
Collector::clear_for_drop(addr_of_mut!(temp_collector));
local_collector.store(ptr::null_mut(), Relaxed);
});
}

thread_local! {
Expand Down
2 changes: 1 addition & 1 deletion src/exit_guard.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
use std::ops::{Deref, DerefMut};

/// [`ExitGuard`] captures the environment and invokes the defined closure at the end of the scope.
pub(crate) struct ExitGuard<T, F: FnOnce(T)> {
pub(super) struct ExitGuard<T, F: FnOnce(T)> {
drop_callback: Option<(T, F)>,
}

Expand Down
Loading

0 comments on commit b30817b

Please sign in to comment.