From a47b8cea2c04d45c7e681b380e7e476290c4356a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Tue, 21 May 2024 03:25:55 +0300 Subject: [PATCH 1/6] Move `Weak` into a separate module --- src/lib.rs | 1 + src/netbsd.rs | 15 +++++++----- src/util_libc.rs | 64 +----------------------------------------------- src/weak.rs | 59 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 70 insertions(+), 69 deletions(-) create mode 100644 src/weak.rs diff --git a/src/lib.rs b/src/lib.rs index bc3695b6..1de548d2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -306,6 +306,7 @@ cfg_if! { #[path = "solaris.rs"] mod imp; } else if #[cfg(target_os = "netbsd")] { mod util_libc; + mod weak; #[path = "netbsd.rs"] mod imp; } else if #[cfg(target_os = "fuchsia")] { #[path = "fuchsia.rs"] mod imp; diff --git a/src/netbsd.rs b/src/netbsd.rs index b8a770f5..d02400db 100644 --- a/src/netbsd.rs +++ b/src/netbsd.rs @@ -1,9 +1,6 @@ //! Implementation for NetBSD -use crate::{ - util_libc::{sys_fill_exact, Weak}, - Error, -}; -use core::{mem::MaybeUninit, ptr}; +use crate::{util_libc::sys_fill_exact, weak::Weak, Error}; +use core::{ffi::c_void, mem::MaybeUninit, ptr}; fn kern_arnd(buf: &mut [MaybeUninit]) -> libc::ssize_t { static MIB: [libc::c_int; 2] = [libc::CTL_KERN, libc::KERN_ARND]; @@ -28,8 +25,14 @@ fn kern_arnd(buf: &mut [MaybeUninit]) -> libc::ssize_t { type GetRandomFn = unsafe extern "C" fn(*mut u8, libc::size_t, libc::c_uint) -> libc::ssize_t; pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + fn link_getrandom() -> *mut c_void { + static NAME: &[u8] = b"getrandom\0"; + unsafe { libc::dlsym(libc::RTLD_DEFAULT, NAME.as_ptr() as *const _) } + } + // getrandom(2) was introduced in NetBSD 10.0 - static GETRANDOM: Weak = unsafe { Weak::new("getrandom\0") }; + static GETRANDOM: Weak = Weak::new(link_getrandom); + if let Some(fptr) = GETRANDOM.ptr() { let func: GetRandomFn = unsafe { core::mem::transmute(fptr) }; return sys_fill_exact(dest, |buf| unsafe { diff --git a/src/util_libc.rs b/src/util_libc.rs index 129362d5..765d5fd4 100644 --- a/src/util_libc.rs +++ b/src/util_libc.rs @@ -1,12 +1,6 @@ #![allow(dead_code)] use crate::Error; -use core::{ - mem::MaybeUninit, - num::NonZeroU32, - ptr::NonNull, - sync::atomic::{fence, AtomicPtr, Ordering}, -}; -use libc::c_void; +use core::{mem::MaybeUninit, num::NonZeroU32}; cfg_if! { if #[cfg(any(target_os = "netbsd", target_os = "openbsd", target_os = "android"))] { @@ -76,62 +70,6 @@ pub fn sys_fill_exact( Ok(()) } -// A "weak" binding to a C function that may or may not be present at runtime. -// Used for supporting newer OS features while still building on older systems. -// Based off of the DlsymWeak struct in libstd: -// https://github.com/rust-lang/rust/blob/1.61.0/library/std/src/sys/unix/weak.rs#L84 -// except that the caller must manually cast self.ptr() to a function pointer. -pub struct Weak { - name: &'static str, - addr: AtomicPtr, -} - -impl Weak { - // A non-null pointer value which indicates we are uninitialized. This - // constant should ideally not be a valid address of a function pointer. - // However, if by chance libc::dlsym does return UNINIT, there will not - // be undefined behavior. libc::dlsym will just be called each time ptr() - // is called. This would be inefficient, but correct. - // TODO: Replace with core::ptr::invalid_mut(1) when that is stable. - const UNINIT: *mut c_void = 1 as *mut c_void; - - // Construct a binding to a C function with a given name. This function is - // unsafe because `name` _must_ be null terminated. - pub const unsafe fn new(name: &'static str) -> Self { - Self { - name, - addr: AtomicPtr::new(Self::UNINIT), - } - } - - // Return the address of a function if present at runtime. Otherwise, - // return None. Multiple callers can call ptr() concurrently. It will - // always return _some_ value returned by libc::dlsym. However, the - // dlsym function may be called multiple times. - pub fn ptr(&self) -> Option> { - // Despite having only a single atomic variable (self.addr), we still - // cannot always use Ordering::Relaxed, as we need to make sure a - // successful call to dlsym() is "ordered before" any data read through - // the returned pointer (which occurs when the function is called). - // Our implementation mirrors that of the one in libstd, meaning that - // the use of non-Relaxed operations is probably unnecessary. - match self.addr.load(Ordering::Relaxed) { - Self::UNINIT => { - let symbol = self.name.as_ptr() as *const _; - let addr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, symbol) }; - // Synchronizes with the Acquire fence below - self.addr.store(addr, Ordering::Release); - NonNull::new(addr) - } - addr => { - let func = NonNull::new(addr)?; - fence(Ordering::Acquire); - Some(func) - } - } - } -} - // SAFETY: path must be null terminated, FD must be manually closed. pub unsafe fn open_readonly(path: &str) -> Result { debug_assert_eq!(path.as_bytes().last(), Some(&0)); diff --git a/src/weak.rs b/src/weak.rs new file mode 100644 index 00000000..9690caee --- /dev/null +++ b/src/weak.rs @@ -0,0 +1,59 @@ +use core::{ + ffi::c_void, + ptr::NonNull, + sync::atomic::{fence, AtomicPtr, Ordering}, +}; + +// A "weak" binding to a C function that may or may not be present at runtime. +// Used for supporting newer OS features while still building on older systems. +// Based off of the DlsymWeak struct in libstd: +// https://github.com/rust-lang/rust/blob/1.61.0/library/std/src/sys/unix/weak.rs#L84 +// except that the caller must manually cast self.ptr() to a function pointer. +pub struct Weak { + addr: AtomicPtr, + link_fn: fn() -> *mut c_void, +} + +impl Weak { + // A non-null pointer value which indicates we are uninitialized. This + // constant should ideally not be a valid address of a function pointer. + // However, if by chance libc::dlsym does return UNINIT, there will not + // be undefined behavior. libc::dlsym will just be called each time ptr() + // is called. This would be inefficient, but correct. + // TODO: Replace with core::ptr::invalid_mut(1) when that is stable. + const UNINIT: *mut c_void = 1 as *mut c_void; + + // Construct a weak binding a C function. + pub const fn new(link_fn: fn() -> *mut c_void) -> Self { + Self { + addr: AtomicPtr::new(Self::UNINIT), + link_fn, + } + } + + // Return the address of a function if present at runtime. Otherwise, + // return None. Multiple callers can call ptr() concurrently. It will + // always return _some_ value returned by libc::dlsym. However, the + // dlsym function may be called multiple times. + pub fn ptr(&self) -> Option> { + // Despite having only a single atomic variable (self.addr), we still + // cannot always use Ordering::Relaxed, as we need to make sure a + // successful call to dlsym() is "ordered before" any data read through + // the returned pointer (which occurs when the function is called). + // Our implementation mirrors that of the one in libstd, meaning that + // the use of non-Relaxed operations is probably unnecessary. + match self.addr.load(Ordering::Relaxed) { + Self::UNINIT => { + let addr = (self.link_fn)(); + // Synchronizes with the Acquire fence below + self.addr.store(addr, Ordering::Release); + NonNull::new(addr) + } + addr => { + let func = NonNull::new(addr)?; + fence(Ordering::Acquire); + Some(func) + } + } + } +} From cbd5142784f8960bbf1597e7e2528e80f098f587 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Tue, 21 May 2024 04:54:06 +0300 Subject: [PATCH 2/6] Rename `Weak` to `CachedPtr` --- src/cached_ptr.rs | 63 +++++++++++++++++++++++++++++++++++++++++++++++ src/lib.rs | 2 +- src/netbsd.rs | 19 +++++++------- src/weak.rs | 59 -------------------------------------------- 4 files changed, 74 insertions(+), 69 deletions(-) create mode 100644 src/cached_ptr.rs delete mode 100644 src/weak.rs diff --git a/src/cached_ptr.rs b/src/cached_ptr.rs new file mode 100644 index 00000000..68b96e70 --- /dev/null +++ b/src/cached_ptr.rs @@ -0,0 +1,63 @@ +use core::{ + ffi::c_void, + ptr::NonNull, + sync::atomic::{fence, AtomicPtr, Ordering}, +}; + +/// Cached pointer. +/// +/// It's intended to be used for weak linking of a C function that may +/// or may not be present at runtime. +/// +/// Based off of the DlsymWeak struct in libstd: +/// https://github.com/rust-lang/rust/blob/1.61.0/library/std/src/sys/unix/weak.rs#L84 +/// except that the caller must manually cast self.ptr() to a function pointer. +pub struct CachedPtr { + addr: AtomicPtr, +} + +impl CachedPtr { + /// A non-null pointer value which indicates we are uninitialized. + /// + /// This constant should ideally not be a valid pointer. + /// However, if by chance initialization function passed to the `get` + /// method does return UNINIT, there will not be undefined behavior. + /// The initialization function will just be called each time `get()` + /// is called. This would be inefficient, but correct. + const UNINIT: *mut c_void = !0usize as *mut c_void; + + /// Construct new `CachedPtr` in uninitialized state. + pub const fn new() -> Self { + Self { + addr: AtomicPtr::new(Self::UNINIT), + } + } + + /// Return cached address initialized by `f`. + /// + /// Multiple callers can call `get` concurrently. It will always return + /// _some_ value returned by `f`. However, `f` may be called multiple times. + /// + /// If cached pointer is null, this method returns `None`. + pub fn get(&self, f: fn() -> *mut c_void) -> Option> { + // Despite having only a single atomic variable (self.addr), we still + // cannot always use Ordering::Relaxed, as we need to make sure a + // successful call to `f` is "ordered before" any data read through + // the returned pointer (which occurs when the function is called). + // Our implementation mirrors that of the one in libstd, meaning that + // the use of non-Relaxed operations is probably unnecessary. + match self.addr.load(Ordering::Relaxed) { + Self::UNINIT => { + let addr = f(); + // Synchronizes with the Acquire fence below + self.addr.store(addr, Ordering::Release); + NonNull::new(addr) + } + addr => { + let func = NonNull::new(addr)?; + fence(Ordering::Acquire); + Some(func) + } + } + } +} diff --git a/src/lib.rs b/src/lib.rs index 1de548d2..19c9b298 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -306,7 +306,7 @@ cfg_if! { #[path = "solaris.rs"] mod imp; } else if #[cfg(target_os = "netbsd")] { mod util_libc; - mod weak; + mod cached_ptr; #[path = "netbsd.rs"] mod imp; } else if #[cfg(target_os = "fuchsia")] { #[path = "fuchsia.rs"] mod imp; diff --git a/src/netbsd.rs b/src/netbsd.rs index d02400db..1095c1c3 100644 --- a/src/netbsd.rs +++ b/src/netbsd.rs @@ -1,5 +1,5 @@ //! Implementation for NetBSD -use crate::{util_libc::sys_fill_exact, weak::Weak, Error}; +use crate::{cached_ptr::CachedPtr, util_libc::sys_fill_exact, Error}; use core::{ffi::c_void, mem::MaybeUninit, ptr}; fn kern_arnd(buf: &mut [MaybeUninit]) -> libc::ssize_t { @@ -24,16 +24,17 @@ fn kern_arnd(buf: &mut [MaybeUninit]) -> libc::ssize_t { type GetRandomFn = unsafe extern "C" fn(*mut u8, libc::size_t, libc::c_uint) -> libc::ssize_t; -pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { - fn link_getrandom() -> *mut c_void { - static NAME: &[u8] = b"getrandom\0"; - unsafe { libc::dlsym(libc::RTLD_DEFAULT, NAME.as_ptr() as *const _) } - } +// getrandom(2) was introduced in NetBSD 10.0 +static GETRANDOM: CachedPtr = CachedPtr::new(); - // getrandom(2) was introduced in NetBSD 10.0 - static GETRANDOM: Weak = Weak::new(link_getrandom); +fn dlsym_getrandom() -> *mut c_void { + static NAME: &[u8] = b"getrandom\0"; + let name_ptr = NAME.as_ptr() as *const libc::c_char; + unsafe { libc::dlsym(libc::RTLD_DEFAULT, name_ptr) } +} - if let Some(fptr) = GETRANDOM.ptr() { +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + if let Some(fptr) = GETRANDOM.get(dlsym_getrandom) { let func: GetRandomFn = unsafe { core::mem::transmute(fptr) }; return sys_fill_exact(dest, |buf| unsafe { func(buf.as_mut_ptr() as *mut u8, buf.len(), 0) diff --git a/src/weak.rs b/src/weak.rs deleted file mode 100644 index 9690caee..00000000 --- a/src/weak.rs +++ /dev/null @@ -1,59 +0,0 @@ -use core::{ - ffi::c_void, - ptr::NonNull, - sync::atomic::{fence, AtomicPtr, Ordering}, -}; - -// A "weak" binding to a C function that may or may not be present at runtime. -// Used for supporting newer OS features while still building on older systems. -// Based off of the DlsymWeak struct in libstd: -// https://github.com/rust-lang/rust/blob/1.61.0/library/std/src/sys/unix/weak.rs#L84 -// except that the caller must manually cast self.ptr() to a function pointer. -pub struct Weak { - addr: AtomicPtr, - link_fn: fn() -> *mut c_void, -} - -impl Weak { - // A non-null pointer value which indicates we are uninitialized. This - // constant should ideally not be a valid address of a function pointer. - // However, if by chance libc::dlsym does return UNINIT, there will not - // be undefined behavior. libc::dlsym will just be called each time ptr() - // is called. This would be inefficient, but correct. - // TODO: Replace with core::ptr::invalid_mut(1) when that is stable. - const UNINIT: *mut c_void = 1 as *mut c_void; - - // Construct a weak binding a C function. - pub const fn new(link_fn: fn() -> *mut c_void) -> Self { - Self { - addr: AtomicPtr::new(Self::UNINIT), - link_fn, - } - } - - // Return the address of a function if present at runtime. Otherwise, - // return None. Multiple callers can call ptr() concurrently. It will - // always return _some_ value returned by libc::dlsym. However, the - // dlsym function may be called multiple times. - pub fn ptr(&self) -> Option> { - // Despite having only a single atomic variable (self.addr), we still - // cannot always use Ordering::Relaxed, as we need to make sure a - // successful call to dlsym() is "ordered before" any data read through - // the returned pointer (which occurs when the function is called). - // Our implementation mirrors that of the one in libstd, meaning that - // the use of non-Relaxed operations is probably unnecessary. - match self.addr.load(Ordering::Relaxed) { - Self::UNINIT => { - let addr = (self.link_fn)(); - // Synchronizes with the Acquire fence below - self.addr.store(addr, Ordering::Release); - NonNull::new(addr) - } - addr => { - let func = NonNull::new(addr)?; - fence(Ordering::Acquire); - Some(func) - } - } - } -} From 105ce267d1778dfc8767c44d86a004738db1129e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Tue, 21 May 2024 14:33:15 +0300 Subject: [PATCH 3/6] Rename to `LazyPtr` and move to the `lazy` module --- src/cached_ptr.rs | 63 -------------------------------------------- src/lazy.rs | 66 ++++++++++++++++++++++++++++++++++++++++++++--- src/lib.rs | 2 +- src/netbsd.rs | 7 ++--- 4 files changed, 68 insertions(+), 70 deletions(-) delete mode 100644 src/cached_ptr.rs diff --git a/src/cached_ptr.rs b/src/cached_ptr.rs deleted file mode 100644 index 68b96e70..00000000 --- a/src/cached_ptr.rs +++ /dev/null @@ -1,63 +0,0 @@ -use core::{ - ffi::c_void, - ptr::NonNull, - sync::atomic::{fence, AtomicPtr, Ordering}, -}; - -/// Cached pointer. -/// -/// It's intended to be used for weak linking of a C function that may -/// or may not be present at runtime. -/// -/// Based off of the DlsymWeak struct in libstd: -/// https://github.com/rust-lang/rust/blob/1.61.0/library/std/src/sys/unix/weak.rs#L84 -/// except that the caller must manually cast self.ptr() to a function pointer. -pub struct CachedPtr { - addr: AtomicPtr, -} - -impl CachedPtr { - /// A non-null pointer value which indicates we are uninitialized. - /// - /// This constant should ideally not be a valid pointer. - /// However, if by chance initialization function passed to the `get` - /// method does return UNINIT, there will not be undefined behavior. - /// The initialization function will just be called each time `get()` - /// is called. This would be inefficient, but correct. - const UNINIT: *mut c_void = !0usize as *mut c_void; - - /// Construct new `CachedPtr` in uninitialized state. - pub const fn new() -> Self { - Self { - addr: AtomicPtr::new(Self::UNINIT), - } - } - - /// Return cached address initialized by `f`. - /// - /// Multiple callers can call `get` concurrently. It will always return - /// _some_ value returned by `f`. However, `f` may be called multiple times. - /// - /// If cached pointer is null, this method returns `None`. - pub fn get(&self, f: fn() -> *mut c_void) -> Option> { - // Despite having only a single atomic variable (self.addr), we still - // cannot always use Ordering::Relaxed, as we need to make sure a - // successful call to `f` is "ordered before" any data read through - // the returned pointer (which occurs when the function is called). - // Our implementation mirrors that of the one in libstd, meaning that - // the use of non-Relaxed operations is probably unnecessary. - match self.addr.load(Ordering::Relaxed) { - Self::UNINIT => { - let addr = f(); - // Synchronizes with the Acquire fence below - self.addr.store(addr, Ordering::Release); - NonNull::new(addr) - } - addr => { - let func = NonNull::new(addr)?; - fence(Ordering::Acquire); - Some(func) - } - } - } -} diff --git a/src/lazy.rs b/src/lazy.rs index 100ce1ea..7823d92a 100644 --- a/src/lazy.rs +++ b/src/lazy.rs @@ -1,3 +1,4 @@ +#![allow(dead_code)] use core::sync::atomic::{AtomicUsize, Ordering::Relaxed}; // This structure represents a lazily initialized static usize value. Useful @@ -21,13 +22,13 @@ use core::sync::atomic::{AtomicUsize, Ordering::Relaxed}; pub(crate) struct LazyUsize(AtomicUsize); impl LazyUsize { + // The initialization is not completed. + const UNINIT: usize = usize::max_value(); + pub const fn new() -> Self { Self(AtomicUsize::new(Self::UNINIT)) } - // The initialization is not completed. - pub const UNINIT: usize = usize::max_value(); - // Runs the init() function at most once, returning the value of some run of // init(). Multiple callers can run their init() functions in parallel. // init() should always return the same value, if it succeeds. @@ -54,3 +55,62 @@ impl LazyBool { self.0.unsync_init(|| init() as usize) != 0 } } + +use core::{ + ffi::c_void, + sync::atomic::{fence, AtomicPtr, Ordering}, +}; + +// This structure represents a lazily initialized static pointer value. +/// +/// It's intended to be used for weak linking of a C function that may +/// or may not be present at runtime. +/// +/// Based off of the DlsymWeak struct in libstd: +/// https://github.com/rust-lang/rust/blob/1.61.0/library/std/src/sys/unix/weak.rs#L84 +/// except that the caller must manually cast self.ptr() to a function pointer. +pub struct LazyPtr { + addr: AtomicPtr, +} + +impl LazyPtr { + /// A non-null pointer value which indicates we are uninitialized. + /// + /// This constant should ideally not be a valid pointer. + /// However, if by chance initialization function passed to the `get` + /// method does return UNINIT, there will not be undefined behavior. + /// The initialization function will just be called each time `get()` + /// is called. This would be inefficient, but correct. + const UNINIT: *mut c_void = !0usize as *mut c_void; + + /// Construct new `LazyPtr` in uninitialized state. + pub const fn new() -> Self { + Self { + addr: AtomicPtr::new(Self::UNINIT), + } + } + + // Runs the init() function at most once, returning the value of some run of + // init(). Multiple callers can run their init() functions in parallel. + // init() should always return the same value, if it succeeds. + pub fn unsync_init(&self, f: impl Fn() -> *mut c_void) -> *mut c_void { + // Despite having only a single atomic variable (self.addr), we still + // cannot always use Ordering::Relaxed, as we need to make sure a + // successful call to `f` is "ordered before" any data read through + // the returned pointer (which occurs when the function is called). + // Our implementation mirrors that of the one in libstd, meaning that + // the use of non-Relaxed operations is probably unnecessary. + match self.addr.load(Ordering::Relaxed) { + Self::UNINIT => { + let addr = f(); + // Synchronizes with the Acquire fence below + self.addr.store(addr, Ordering::Release); + addr + } + addr => { + fence(Ordering::Acquire); + addr + } + } + } +} diff --git a/src/lib.rs b/src/lib.rs index 19c9b298..eb8c7fc5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -306,7 +306,7 @@ cfg_if! { #[path = "solaris.rs"] mod imp; } else if #[cfg(target_os = "netbsd")] { mod util_libc; - mod cached_ptr; + mod lazy; #[path = "netbsd.rs"] mod imp; } else if #[cfg(target_os = "fuchsia")] { #[path = "fuchsia.rs"] mod imp; diff --git a/src/netbsd.rs b/src/netbsd.rs index 1095c1c3..4211a865 100644 --- a/src/netbsd.rs +++ b/src/netbsd.rs @@ -1,5 +1,5 @@ //! Implementation for NetBSD -use crate::{cached_ptr::CachedPtr, util_libc::sys_fill_exact, Error}; +use crate::{lazy::LazyPtr, util_libc::sys_fill_exact, Error}; use core::{ffi::c_void, mem::MaybeUninit, ptr}; fn kern_arnd(buf: &mut [MaybeUninit]) -> libc::ssize_t { @@ -25,7 +25,7 @@ fn kern_arnd(buf: &mut [MaybeUninit]) -> libc::ssize_t { type GetRandomFn = unsafe extern "C" fn(*mut u8, libc::size_t, libc::c_uint) -> libc::ssize_t; // getrandom(2) was introduced in NetBSD 10.0 -static GETRANDOM: CachedPtr = CachedPtr::new(); +static GETRANDOM: LazyPtr = LazyPtr::new(); fn dlsym_getrandom() -> *mut c_void { static NAME: &[u8] = b"getrandom\0"; @@ -34,7 +34,8 @@ fn dlsym_getrandom() -> *mut c_void { } pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { - if let Some(fptr) = GETRANDOM.get(dlsym_getrandom) { + let fptr = GETRANDOM.unsync_init(dlsym_getrandom); + if !fptr.is_null() { let func: GetRandomFn = unsafe { core::mem::transmute(fptr) }; return sys_fill_exact(dest, |buf| unsafe { func(buf.as_mut_ptr() as *mut u8, buf.len(), 0) From c98c1149abf8877f50d5771dc6fc97d0981f75c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Tue, 21 May 2024 14:39:44 +0300 Subject: [PATCH 4/6] Tweak imports --- src/lazy.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/lazy.rs b/src/lazy.rs index 7823d92a..3e761953 100644 --- a/src/lazy.rs +++ b/src/lazy.rs @@ -1,5 +1,8 @@ #![allow(dead_code)] -use core::sync::atomic::{AtomicUsize, Ordering::Relaxed}; +use core::{ + ffi::c_void, + sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering}, +}; // This structure represents a lazily initialized static usize value. Useful // when it is preferable to just rerun initialization instead of locking. @@ -34,10 +37,10 @@ impl LazyUsize { // init() should always return the same value, if it succeeds. pub fn unsync_init(&self, init: impl FnOnce() -> usize) -> usize { // Relaxed ordering is fine, as we only have a single atomic variable. - let mut val = self.0.load(Relaxed); + let mut val = self.0.load(Ordering::Relaxed); if val == Self::UNINIT { val = init(); - self.0.store(val, Relaxed); + self.0.store(val, Ordering::Relaxed); } val } @@ -56,11 +59,6 @@ impl LazyBool { } } -use core::{ - ffi::c_void, - sync::atomic::{fence, AtomicPtr, Ordering}, -}; - // This structure represents a lazily initialized static pointer value. /// /// It's intended to be used for weak linking of a C function that may From 9ef7aaadf1549ce224e813d163d82110bbb04208 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Tue, 21 May 2024 14:53:16 +0300 Subject: [PATCH 5/6] Use Acquire load --- src/lazy.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/src/lazy.rs b/src/lazy.rs index 3e761953..9228d06b 100644 --- a/src/lazy.rs +++ b/src/lazy.rs @@ -1,7 +1,7 @@ #![allow(dead_code)] use core::{ ffi::c_void, - sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering}, + sync::atomic::{AtomicPtr, AtomicUsize, Ordering}, }; // This structure represents a lazily initialized static usize value. Useful @@ -98,17 +98,13 @@ impl LazyPtr { // the returned pointer (which occurs when the function is called). // Our implementation mirrors that of the one in libstd, meaning that // the use of non-Relaxed operations is probably unnecessary. - match self.addr.load(Ordering::Relaxed) { + match self.addr.load(Ordering::Acquire) { Self::UNINIT => { let addr = f(); - // Synchronizes with the Acquire fence below self.addr.store(addr, Ordering::Release); addr } - addr => { - fence(Ordering::Acquire); - addr - } + addr => addr, } } } From 6e0b5dd79c9757301379b42d40ee101db7d5cadc Mon Sep 17 00:00:00 2001 From: Artyom Pavlov Date: Wed, 22 May 2024 05:11:44 +0300 Subject: [PATCH 6/6] review fixes --- src/lazy.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/lazy.rs b/src/lazy.rs index 9228d06b..693bb7ff 100644 --- a/src/lazy.rs +++ b/src/lazy.rs @@ -74,8 +74,8 @@ pub struct LazyPtr { impl LazyPtr { /// A non-null pointer value which indicates we are uninitialized. /// - /// This constant should ideally not be a valid pointer. - /// However, if by chance initialization function passed to the `get` + /// This constant should ideally not be a valid pointer. However, + /// if by chance initialization function passed to the `unsync_init` /// method does return UNINIT, there will not be undefined behavior. /// The initialization function will just be called each time `get()` /// is called. This would be inefficient, but correct. @@ -91,16 +91,16 @@ impl LazyPtr { // Runs the init() function at most once, returning the value of some run of // init(). Multiple callers can run their init() functions in parallel. // init() should always return the same value, if it succeeds. - pub fn unsync_init(&self, f: impl Fn() -> *mut c_void) -> *mut c_void { + pub fn unsync_init(&self, init: impl Fn() -> *mut c_void) -> *mut c_void { // Despite having only a single atomic variable (self.addr), we still // cannot always use Ordering::Relaxed, as we need to make sure a - // successful call to `f` is "ordered before" any data read through + // successful call to `init` is "ordered before" any data read through // the returned pointer (which occurs when the function is called). // Our implementation mirrors that of the one in libstd, meaning that // the use of non-Relaxed operations is probably unnecessary. match self.addr.load(Ordering::Acquire) { Self::UNINIT => { - let addr = f(); + let addr = init(); self.addr.store(addr, Ordering::Release); addr }