Merge pull request #30 from bertptrs/remove-lazy-static

This commit is contained in:
2023-08-27 11:26:58 +02:00
committed by GitHub
7 changed files with 160 additions and 63 deletions

View File

@@ -15,7 +15,7 @@ jobs:
strategy:
matrix:
rust:
- "1.63" # minimum stable rust version
- "1.70" # minimum stable rust version
- stable
- beta
- nightly

View File

@@ -8,8 +8,9 @@ adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
### Added
- The minimum supported Rust version is now defined as 1.63. Previously it was undefined.
- The minimum supported Rust version is now defined as 1.70. Previously it was undefined.
- Wrappers for `std::sync` primitives can now be `const` constructed.
- Add support for `std::sync::OnceLock`
### Breaking

View File

@@ -10,7 +10,7 @@ keywords = ["mutex", "rwlock", "once", "thread"]
description = "Ensure deadlock-free mutexes by allocating in order, or else."
readme = "README.md"
repository = "https://github.com/bertptrs/tracing-mutex"
rust-version = "1.63"
rust-version = "1.70"
[package.metadata.docs.rs]
# Build docs for all features so the documentation is more complete
@@ -19,7 +19,6 @@ all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[dependencies]
lazy_static = "1"
lock_api = { version = "0.4", optional = true }
parking_lot = { version = "0.12", optional = true }

View File

@@ -61,7 +61,7 @@ performance penalty in your production environment, this library also offers deb
when debug assertions are enabled, and to `Mutex` when they are not. Similar helper types are
available for other synchronization primitives.
The minimum supported Rust version is 1.63. Increasing this is not considered a breaking change, but
The minimum supported Rust version is 1.70. Increasing this is not considered a breaking change, but
will be avoided within semver-compatible releases if possible.
### Features

View File

@@ -1,5 +1,5 @@
status = [
'Rust project (1.63)',
'Rust project (1.70)',
'Rust project (stable)',
'Rust project (beta)',
'Documentation build',

View File

@@ -47,20 +47,17 @@
//! [paper]: https://whileydave.com/publications/pk07_jea/
#![cfg_attr(docsrs, feature(doc_cfg))]
use std::cell::RefCell;
use std::cell::UnsafeCell;
use std::fmt;
use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::ops::Deref;
use std::ops::DerefMut;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::sync::Mutex;
use std::sync::MutexGuard;
use std::sync::Once;
use std::sync::OnceLock;
use std::sync::PoisonError;
use lazy_static::lazy_static;
#[cfg(feature = "lockapi")]
#[cfg_attr(docsrs, doc(cfg(feature = "lockapi")))]
pub use lock_api;
@@ -79,11 +76,6 @@ pub mod lockapi;
pub mod parkinglot;
pub mod stdsync;
/// Counter for Mutex IDs. Atomic avoids the need for locking.
///
/// Should be part of the `MutexID` impl but static items are not yet a thing.
static ID_SEQUENCE: AtomicUsize = AtomicUsize::new(0);
thread_local! {
/// Stack to track which locks are held
///
@@ -92,10 +84,6 @@ thread_local! {
static HELD_LOCKS: RefCell<Vec<usize>> = RefCell::new(Vec::new());
}
lazy_static! {
static ref DEPENDENCY_GRAPH: Mutex<DiGraph<usize>> = Default::default();
}
/// Dedicated ID type for Mutexes
///
/// # Unstable
@@ -114,6 +102,9 @@ impl MutexId {
/// This function may panic when there are no more mutex IDs available. The number of mutex ids
/// is `usize::MAX - 1` which should be plenty for most practical applications.
pub fn new() -> Self {
// Counter for Mutex IDs. Atomic avoids the need for locking.
static ID_SEQUENCE: AtomicUsize = AtomicUsize::new(0);
ID_SEQUENCE
.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |id| id.checked_add(1))
.map(Self)
@@ -207,17 +198,13 @@ impl Drop for MutexId {
///
/// This type can be largely replaced once std::lazy gets stabilized.
struct LazyMutexId {
inner: UnsafeCell<MaybeUninit<MutexId>>,
setter: Once,
_marker: PhantomData<MutexId>,
inner: OnceLock<MutexId>,
}
impl LazyMutexId {
pub const fn new() -> Self {
Self {
inner: UnsafeCell::new(MaybeUninit::uninit()),
setter: Once::new(),
_marker: PhantomData,
inner: OnceLock::new(),
}
}
}
@@ -234,44 +221,11 @@ impl Default for LazyMutexId {
}
}
/// Safety: the UnsafeCell is guaranteed to only be accessed mutably from a `Once`.
unsafe impl Sync for LazyMutexId {}
impl Deref for LazyMutexId {
type Target = MutexId;
fn deref(&self) -> &Self::Target {
self.setter.call_once(|| {
// Safety: this function is only called once, so only one mutable reference should exist
// at a time.
unsafe {
*self.inner.get() = MaybeUninit::new(MutexId::new());
}
});
// Safety: after the above Once runs, there are no longer any mutable references, so we can
// hand this out safely.
//
// Explanation of this monstrosity:
//
// - Get a pointer to the data from the UnsafeCell
// - Dereference that to get a reference to the underlying MaybeUninit
// - Use as_ptr on MaybeUninit to get a pointer to the initialized MutexID
// - Dereference the pointer to turn in into a reference as intended.
//
// This should get slightly nicer once `maybe_uninit_extra` is stabilized.
unsafe { &*((*self.inner.get()).as_ptr()) }
}
}
impl Drop for LazyMutexId {
fn drop(&mut self) {
if self.setter.is_completed() {
// We have a valid mutex ID and need to drop it
// Safety: we know that this pointer is valid because the initializer has successfully run.
unsafe { (*self.inner.get()).assume_init_drop() };
}
self.inner.get_or_init(MutexId::new)
}
}
@@ -307,7 +261,10 @@ impl<'a> Drop for BorrowedMutex<'a> {
/// Get a reference to the current dependency graph
fn get_dependency_graph() -> impl DerefMut<Target = DiGraph<usize>> {
static DEPENDENCY_GRAPH: OnceLock<Mutex<DiGraph<usize>>> = OnceLock::new();
DEPENDENCY_GRAPH
.get_or_init(Default::default)
.lock()
.unwrap_or_else(PoisonError::into_inner)
}

View File

@@ -21,10 +21,14 @@
pub use std::sync as raw;
#[cfg(not(debug_assertions))]
pub use std::sync::{Condvar, Mutex, MutexGuard, Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
pub use std::sync::{
Condvar, Mutex, MutexGuard, Once, OnceLock, RwLock, RwLockReadGuard, RwLockWriteGuard,
};
#[cfg(debug_assertions)]
pub use tracing::{Condvar, Mutex, MutexGuard, Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
pub use tracing::{
Condvar, Mutex, MutexGuard, Once, OnceLock, RwLock, RwLockReadGuard, RwLockWriteGuard,
};
/// Dependency tracing versions of [`std::sync`].
pub mod tracing {
@@ -427,8 +431,8 @@ pub mod tracing {
/// Wrapper around [`std::sync::Once`].
///
/// Refer to the [crate-level][`crate`] documentaiton for the differences between this struct and
/// the one it wraps.
/// Refer to the [crate-level][`crate`] documentaiton for the differences between this struct
/// and the one it wraps.
#[derive(Debug)]
pub struct Once {
inner: sync::Once,
@@ -479,6 +483,142 @@ pub mod tracing {
}
}
/// Wrapper for [`std::sync::OnceLock`]
///
/// The exact locking behaviour of [`std::sync::OnceLock`] is currently undefined, but may
/// deadlock in the event of reentrant initialization attempts. This wrapper participates in
/// cycle detection as normal and will therefore panic in the event of reentrancy.
///
/// Most of this primitive's methods do not involve locking and as such are simply passed
/// through to the inner implementation.
///
/// # Examples
///
/// ```
/// use tracing_mutex::stdsync::tracing::OnceLock;
///
/// static LOCK: OnceLock<i32> = OnceLock::new();
/// assert!(LOCK.get().is_none());
///
/// std::thread::spawn(|| {
/// let value: &i32 = LOCK.get_or_init(|| 42);
/// assert_eq!(value, &42);
/// }).join().unwrap();
///
/// let value: Option<&i32> = LOCK.get();
/// assert_eq!(value, Some(&42));
/// ```
#[derive(Debug)]
pub struct OnceLock<T> {
id: LazyMutexId,
inner: sync::OnceLock<T>,
}
// N.B. this impl inlines everything that directly calls the inner implementation as there
// should be 0 overhead to doing so.
impl<T> OnceLock<T> {
/// Creates a new empty cell
pub const fn new() -> Self {
Self {
id: LazyMutexId::new(),
inner: sync::OnceLock::new(),
}
}
/// Gets a reference to the underlying value.
///
/// This method does not attempt to lock and therefore does not participate in cycle
/// detection.
#[inline]
pub fn get(&self) -> Option<&T> {
self.inner.get()
}
/// Gets a mutable reference to the underlying value.
///
/// This method does not attempt to lock and therefore does not participate in cycle
/// detection.
#[inline]
pub fn get_mut(&mut self) -> Option<&mut T> {
self.inner.get_mut()
}
/// Sets the contents of this cell to the underlying value
///
/// As this method may block until initialization is complete, it participates in cycle
/// detection.
pub fn set(&self, value: T) -> Result<(), T> {
let _guard = self.id.get_borrowed();
self.inner.set(value)
}
/// Gets the contents of the cell, initializing it with `f` if the cell was empty.
///
/// This method participates in cycle detection. Reentrancy is considered a cycle.
pub fn get_or_init<F>(&self, f: F) -> &T
where
F: FnOnce() -> T,
{
let _guard = self.id.get_borrowed();
self.inner.get_or_init(f)
}
/// Takes the value out of this `OnceLock`, moving it back to an uninitialized state.
///
/// This method does not attempt to lock and therefore does not participate in cycle
/// detection.
#[inline]
pub fn take(&mut self) -> Option<T> {
self.inner.take()
}
/// Consumes the `OnceLock`, returning the wrapped value. Returns None if the cell was
/// empty.
///
/// This method does not attempt to lock and therefore does not participate in cycle
/// detection.
#[inline]
pub fn into_inner(mut self) -> Option<T> {
self.take()
}
}
impl<T> Default for OnceLock<T> {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl<T: PartialEq> PartialEq for OnceLock<T> {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.inner == other.inner
}
}
impl<T: Eq> Eq for OnceLock<T> {}
impl<T: Clone> Clone for OnceLock<T> {
fn clone(&self) -> Self {
Self {
id: LazyMutexId::new(),
inner: self.inner.clone(),
}
}
}
impl<T> From<T> for OnceLock<T> {
#[inline]
fn from(value: T) -> Self {
Self {
id: LazyMutexId::new(),
inner: sync::OnceLock::from(value),
}
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;