From 08cfb17234d82c9920b08d98798a99ada05a1899 Mon Sep 17 00:00:00 2001 From: Bert Peters Date: Mon, 24 May 2021 21:28:25 +0200 Subject: [PATCH 1/9] Build all features on CI --- .github/workflows/ci.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f9f817b..6753492 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -30,10 +30,12 @@ jobs: - uses: actions-rs/cargo@v1 with: command: build + args: --all-features - uses: actions-rs/cargo@v1 with: command: test + args: --all-features - uses: actions-rs/cargo@v1 with: @@ -43,4 +45,4 @@ jobs: - uses: actions-rs/cargo@v1 with: command: clippy - args: -- -D warnings + args: --all-features -- -D warnings From 6a3cb83d013d9888f40a544870d84ff25e6f9913 Mon Sep 17 00:00:00 2001 From: Bert Peters Date: Mon, 24 May 2021 22:03:03 +0200 Subject: [PATCH 2/9] Implement Mutex behaviour for lock_api --- Cargo.toml | 5 +++ src/lib.rs | 54 +++++++++++++++------- src/lockapi.rs | 118 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 161 insertions(+), 16 deletions(-) create mode 100644 src/lockapi.rs diff --git a/Cargo.toml b/Cargo.toml index 9be107a..1182071 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,6 +13,11 @@ repository = "https://github.com/bertptrs/tracing-mutex" [dependencies] lazy_static = "1" +lock_api = { version = "0.4", optional = true } [dev-dependencies] rand = "0.8" + +[features] +# Feature names do not match crate names pending namespaced features. +lockapi = ["lock_api"] diff --git a/src/lib.rs b/src/lib.rs index a578753..64ded52 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -60,10 +60,14 @@ use std::sync::Once; use std::sync::PoisonError; use lazy_static::lazy_static; +#[cfg(feature = "lockapi")] +pub use lock_api; use crate::graph::DiGraph; mod graph; +#[cfg(feature = "lockapi")] +pub mod lockapi; pub mod stdsync; /// Counter for Mutex IDs. Atomic avoids the need for locking. @@ -120,6 +124,16 @@ impl MutexId { /// /// This method panics if the new dependency would introduce a cycle. pub fn get_borrowed(&self) -> BorrowedMutex { + self.mark_held(); + BorrowedMutex(self) + } + + /// Mark this lock as held for the purposes of dependency tracking. + /// + /// # Panics + /// + /// This method panics if the new dependency would introduce a cycle. + pub fn mark_held(&self) { let creates_cycle = HELD_LOCKS.with(|locks| { if let Some(&previous) = locks.borrow().last() { let mut graph = get_dependency_graph(); @@ -136,7 +150,22 @@ impl MutexId { } HELD_LOCKS.with(|locks| locks.borrow_mut().push(self.value())); - BorrowedMutex(self) + } + + pub unsafe fn mark_released(&self) { + HELD_LOCKS.with(|locks| { + let mut locks = locks.borrow_mut(); + + for (i, &lock) in locks.iter().enumerate().rev() { + if lock == self.value() { + locks.remove(i); + return; + } + } + + // Drop impls shouldn't panic but if this happens something is seriously broken. + unreachable!("Tried to drop lock for mutex {:?} but it wasn't held", self) + }); } } @@ -187,6 +216,12 @@ impl fmt::Debug for LazyMutexId { } } +impl Default for LazyMutexId { + fn default() -> Self { + Self::new() + } +} + /// Safety: the UnsafeCell is guaranteed to only be accessed mutably from a `Once`. unsafe impl Sync for LazyMutexId {} @@ -241,21 +276,8 @@ struct BorrowedMutex<'a>(&'a MutexId); /// that is an indication of a serious design flaw in this library. impl<'a> Drop for BorrowedMutex<'a> { fn drop(&mut self) { - let id = self.0; - - HELD_LOCKS.with(|locks| { - let mut locks = locks.borrow_mut(); - - for (i, &lock) in locks.iter().enumerate().rev() { - if lock == id.value() { - locks.remove(i); - return; - } - } - - // Drop impls shouldn't panic but if this happens something is seriously broken. - unreachable!("Tried to drop lock for mutex {:?} but it wasn't held", id) - }); + // Safety: the only way to get a BorrowedMutex is by locking the mutex. + unsafe { self.0.mark_released() }; } } diff --git a/src/lockapi.rs b/src/lockapi.rs new file mode 100644 index 0000000..b957d6d --- /dev/null +++ b/src/lockapi.rs @@ -0,0 +1,118 @@ +//! Wrapper implementations for [`lock_api`]. +use lock_api::GuardNoSend; +use lock_api::RawMutex; +use lock_api::RawMutexFair; +use lock_api::RawMutexTimed; + +use crate::LazyMutexId; + +/// Tracing wrapper for all [`lock_api`] traits. +/// +/// This wrapper implements any of the locking traits available, given that the wrapped type +/// implements them. As such, this wrapper can be used both for normal mutexes and rwlocks. +#[derive(Debug, Default)] +pub struct TracingWrapper { + inner: T, + id: LazyMutexId, +} + +impl TracingWrapper { + /// Mark this lock as held in the dependency graph. + fn mark_held(&self) { + self.id.mark_held(); + } + + /// Mark this lock as released in the dependency graph. + /// + /// # Safety + /// + /// This function should only be called when the lock has been previously acquired by this + /// thread. + unsafe fn mark_released(&self) { + self.id.mark_released(); + } + + /// Conditionally lock the mutex. + /// + /// First acquires the lock, then runs the provided function. If that function returns true, + /// then the lock is kept, otherwise the mutex is immediately marked as relased. + fn conditionally_lock(&self, f: impl FnOnce() -> bool) -> bool { + // Mark as locked while we try to do the thing + self.mark_held(); + + if f() { + true + } else { + // Safety: we just locked it above. + unsafe { self.mark_released() } + false + } + } +} + +unsafe impl RawMutex for TracingWrapper +where + T: RawMutex, +{ + const INIT: Self = Self { + inner: T::INIT, + id: LazyMutexId::new(), + }; + + /// Always equal to [`GuardNoSend`], as an implementation detail in the tracking system requires + /// this behaviour. May change in the future to reflect the actual guard type from the wrapped + /// primitive. + type GuardMarker = GuardNoSend; + + fn lock(&self) { + self.mark_held(); + self.inner.lock(); + } + + fn try_lock(&self) -> bool { + self.conditionally_lock(|| self.inner.try_lock()) + } + + unsafe fn unlock(&self) { + self.inner.unlock(); + self.mark_released(); + } + + fn is_locked(&self) -> bool { + // Can't use the default implementation as the inner type might've overwritten it. + self.inner.is_locked() + } +} + +unsafe impl RawMutexFair for TracingWrapper +where + T: RawMutexFair, +{ + unsafe fn unlock_fair(&self) { + self.inner.unlock_fair(); + self.mark_released(); + } + + unsafe fn bump(&self) { + // Bumping effectively doesn't change which locks are held, so we don't need to manage the + // lock state. + self.inner.bump(); + } +} + +unsafe impl RawMutexTimed for TracingWrapper +where + T: RawMutexTimed, +{ + type Duration = T::Duration; + + type Instant = T::Instant; + + fn try_lock_for(&self, timeout: Self::Duration) -> bool { + self.conditionally_lock(|| self.inner.try_lock_for(timeout)) + } + + fn try_lock_until(&self, timeout: Self::Instant) -> bool { + self.conditionally_lock(|| self.inner.try_lock_until(timeout)) + } +} From b21a63e74b792d173b048e0da3a4ce239aabca85 Mon Sep 17 00:00:00 2001 From: Bert Peters Date: Thu, 27 May 2021 19:20:45 +0200 Subject: [PATCH 3/9] Implement RwLock-based traits for lockapi worker. --- src/lockapi.rs | 233 +++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 227 insertions(+), 6 deletions(-) diff --git a/src/lockapi.rs b/src/lockapi.rs index b957d6d..c9f82f4 100644 --- a/src/lockapi.rs +++ b/src/lockapi.rs @@ -3,6 +3,16 @@ use lock_api::GuardNoSend; use lock_api::RawMutex; use lock_api::RawMutexFair; use lock_api::RawMutexTimed; +use lock_api::RawRwLock; +use lock_api::RawRwLockDowngrade; +use lock_api::RawRwLockFair; +use lock_api::RawRwLockRecursive; +use lock_api::RawRwLockRecursiveTimed; +use lock_api::RawRwLockTimed; +use lock_api::RawRwLockUpgrade; +use lock_api::RawRwLockUpgradeDowngrade; +use lock_api::RawRwLockUpgradeFair; +use lock_api::RawRwLockUpgradeTimed; use crate::LazyMutexId; @@ -13,6 +23,7 @@ use crate::LazyMutexId; #[derive(Debug, Default)] pub struct TracingWrapper { inner: T, + // Need to use a lazy mutex ID to intialize statically. id: LazyMutexId, } @@ -32,10 +43,26 @@ impl TracingWrapper { self.id.mark_released(); } + /// First mark ourselves as held, then call the locking function. + fn lock(&self, f: impl FnOnce()) { + self.mark_held(); + f(); + } + + /// First call the unlocking function, then mark ourselves as realeased. + unsafe fn unlock(&self, f: impl FnOnce()) { + f(); + self.mark_released(); + } + /// Conditionally lock the mutex. /// /// First acquires the lock, then runs the provided function. If that function returns true, /// then the lock is kept, otherwise the mutex is immediately marked as relased. + /// + /// # Returns + /// + /// The value returned from the callback. fn conditionally_lock(&self, f: impl FnOnce() -> bool) -> bool { // Mark as locked while we try to do the thing self.mark_held(); @@ -65,8 +92,7 @@ where type GuardMarker = GuardNoSend; fn lock(&self) { - self.mark_held(); - self.inner.lock(); + self.lock(|| self.inner.lock()); } fn try_lock(&self) -> bool { @@ -74,8 +100,7 @@ where } unsafe fn unlock(&self) { - self.inner.unlock(); - self.mark_released(); + self.unlock(|| self.inner.unlock()); } fn is_locked(&self) -> bool { @@ -89,8 +114,7 @@ where T: RawMutexFair, { unsafe fn unlock_fair(&self) { - self.inner.unlock_fair(); - self.mark_released(); + self.unlock(|| self.inner.unlock_fair()) } unsafe fn bump(&self) { @@ -116,3 +140,200 @@ where self.conditionally_lock(|| self.inner.try_lock_until(timeout)) } } + +unsafe impl RawRwLock for TracingWrapper +where + T: RawRwLock, +{ + const INIT: Self = Self { + inner: T::INIT, + id: LazyMutexId::new(), + }; + + /// Always equal to [`GuardNoSend`], as an implementation detail in the tracking system requires + /// this behaviour. May change in the future to reflect the actual guard type from the wrapped + /// primitive. + type GuardMarker = GuardNoSend; + + fn lock_shared(&self) { + self.lock(|| self.inner.lock_shared()); + } + + fn try_lock_shared(&self) -> bool { + self.conditionally_lock(|| self.inner.try_lock_shared()) + } + + unsafe fn unlock_shared(&self) { + self.unlock(|| self.inner.unlock_shared()); + } + + fn lock_exclusive(&self) { + self.lock(|| self.inner.lock_exclusive()); + } + + fn try_lock_exclusive(&self) -> bool { + self.conditionally_lock(|| self.inner.try_lock_exclusive()) + } + + unsafe fn unlock_exclusive(&self) { + self.lock(|| self.inner.unlock_exclusive()); + } + + fn is_locked(&self) -> bool { + self.inner.is_locked() + } +} + +unsafe impl RawRwLockDowngrade for TracingWrapper +where + T: RawRwLockDowngrade, +{ + unsafe fn downgrade(&self) { + // Downgrading does not require tracking + self.inner.downgrade() + } +} + +unsafe impl RawRwLockUpgrade for TracingWrapper +where + T: RawRwLockUpgrade, +{ + fn lock_upgradable(&self) { + self.lock(|| self.inner.lock_upgradable()); + } + + fn try_lock_upgradable(&self) -> bool { + self.conditionally_lock(|| self.inner.try_lock_upgradable()) + } + + unsafe fn unlock_upgradable(&self) { + self.unlock(|| self.inner.unlock_upgradable()); + } + + unsafe fn upgrade(&self) { + self.inner.upgrade(); + } + + unsafe fn try_upgrade(&self) -> bool { + self.inner.try_upgrade() + } +} + +unsafe impl RawRwLockFair for TracingWrapper +where + T: RawRwLockFair, +{ + unsafe fn unlock_shared_fair(&self) { + self.unlock(|| self.inner.unlock_shared_fair()); + } + + unsafe fn unlock_exclusive_fair(&self) { + self.unlock(|| self.inner.unlock_exclusive_fair()); + } + + unsafe fn bump_shared(&self) { + self.inner.bump_shared(); + } + + unsafe fn bump_exclusive(&self) { + self.inner.bump_exclusive(); + } +} + +unsafe impl RawRwLockRecursive for TracingWrapper +where + T: RawRwLockRecursive, +{ + fn lock_shared_recursive(&self) { + self.lock(|| self.inner.lock_shared_recursive()); + } + + fn try_lock_shared_recursive(&self) -> bool { + self.conditionally_lock(|| self.inner.try_lock_shared_recursive()) + } +} + +unsafe impl RawRwLockRecursiveTimed for TracingWrapper +where + T: RawRwLockRecursiveTimed, +{ + fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool { + self.conditionally_lock(|| self.inner.try_lock_shared_recursive_for(timeout)) + } + + fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool { + self.conditionally_lock(|| self.inner.try_lock_shared_recursive_until(timeout)) + } +} + +unsafe impl RawRwLockTimed for TracingWrapper +where + T: RawRwLockTimed, +{ + type Duration = T::Duration; + + type Instant = T::Instant; + + fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool { + self.conditionally_lock(|| self.inner.try_lock_shared_for(timeout)) + } + + fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool { + self.conditionally_lock(|| self.inner.try_lock_shared_until(timeout)) + } + + fn try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool { + self.conditionally_lock(|| self.inner.try_lock_exclusive_for(timeout)) + } + + fn try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool { + self.conditionally_lock(|| self.inner.try_lock_exclusive_until(timeout)) + } +} + +unsafe impl RawRwLockUpgradeDowngrade for TracingWrapper +where + T: RawRwLockUpgradeDowngrade, +{ + unsafe fn downgrade_upgradable(&self) { + self.inner.downgrade_upgradable() + } + + unsafe fn downgrade_to_upgradable(&self) { + self.inner.downgrade_to_upgradable() + } +} + +unsafe impl RawRwLockUpgradeFair for TracingWrapper +where + T: RawRwLockUpgradeFair, +{ + unsafe fn unlock_upgradable_fair(&self) { + self.lock(|| self.inner.unlock_upgradable_fair()) + } + + unsafe fn bump_upgradable(&self) { + self.inner.bump_upgradable() + } +} + +unsafe impl RawRwLockUpgradeTimed for TracingWrapper +where + T: RawRwLockUpgradeTimed, +{ + fn try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool { + self.conditionally_lock(|| self.inner.try_lock_upgradable_for(timeout)) + } + + fn try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool { + self.conditionally_lock(|| self.inner.try_lock_upgradable_until(timeout)) + } + + unsafe fn try_upgrade_for(&self, timeout: Self::Duration) -> bool { + self.conditionally_lock(|| self.inner.try_upgrade_for(timeout)) + } + + unsafe fn try_upgrade_until(&self, timeout: Self::Instant) -> bool { + self.conditionally_lock(|| self.inner.try_upgrade_until(timeout)) + } +} From 73b4c8b1af4c7da79048a50290f39571e05acd71 Mon Sep 17 00:00:00 2001 From: Bert Peters Date: Thu, 27 May 2021 21:04:49 +0200 Subject: [PATCH 4/9] Minimal parking_lot support --- Cargo.toml | 2 ++ src/lib.rs | 4 ++++ src/parkinglot.rs | 58 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+) create mode 100644 src/parkinglot.rs diff --git a/Cargo.toml b/Cargo.toml index 1182071..d48a6b4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,6 +14,7 @@ repository = "https://github.com/bertptrs/tracing-mutex" [dependencies] lazy_static = "1" lock_api = { version = "0.4", optional = true } +parking_lot = { version = "0.11", optional = true } [dev-dependencies] rand = "0.8" @@ -21,3 +22,4 @@ rand = "0.8" [features] # Feature names do not match crate names pending namespaced features. lockapi = ["lock_api"] +parkinglot = ["parking_lot", "lockapi"] diff --git a/src/lib.rs b/src/lib.rs index 64ded52..71c049e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -62,12 +62,16 @@ use std::sync::PoisonError; use lazy_static::lazy_static; #[cfg(feature = "lockapi")] pub use lock_api; +#[cfg(feature = "parkinglot")] +pub use parking_lot; use crate::graph::DiGraph; mod graph; #[cfg(feature = "lockapi")] pub mod lockapi; +#[cfg(feature = "lockapi")] +pub mod parkinglot; pub mod stdsync; /// Counter for Mutex IDs. Atomic avoids the need for locking. diff --git a/src/parkinglot.rs b/src/parkinglot.rs new file mode 100644 index 0000000..c02e0ea --- /dev/null +++ b/src/parkinglot.rs @@ -0,0 +1,58 @@ +use crate::lockapi::TracingWrapper; + +macro_rules! create_mutex_wrapper { + ($wrapped:ty, $tracing_name:ident, $debug_name:ident) => { + pub type $tracing_name = lock_api::Mutex, T>; + + #[cfg(debug_assertions)] + pub type $debug_name = $tracing_name; + #[cfg(not(debug_assertions))] + pub type $debug_name = lock_api::Mutex<$wrapped, T>; + }; +} + +create_mutex_wrapper!(parking_lot::RawFairMutex, TracingFairMutex, DebugFairMutex); +create_mutex_wrapper!(parking_lot::RawMutex, TracingMutex, DebugMutex); + +pub type TracingReentrantMutex = + lock_api::ReentrantMutex, parking_lot::RawThreadId, T>; +#[cfg(debug_assertions)] +pub type DebugReentrantMutex = TracingReentrantMutex; +#[cfg(not(debug_assertions))] +pub type DebugReentrantMutex = parking_lot::ReentrantMutex; + +#[cfg(test)] +mod tests { + use std::sync::Arc; + use std::thread; + + use super::*; + + #[test] + fn test_mutex_usage() { + let mutex = Arc::new(TracingMutex::new(())); + let local_lock = mutex.lock(); + drop(local_lock); + + thread::spawn(move || { + let _remote_lock = mutex.lock(); + }) + .join() + .unwrap(); + } + + #[test] + #[should_panic] + fn test_mutex_conflict() { + let mutexes = [ + TracingMutex::new(()), + TracingMutex::new(()), + TracingMutex::new(()), + ]; + + for i in 0..3 { + let _first_lock = mutexes[i].lock(); + let _second_lock = mutexes[(i + 1) % 3].lock(); + } + } +} From 77cd603363f3a1da358307f11e342ac076b7575f Mon Sep 17 00:00:00 2001 From: Bert Peters Date: Thu, 27 May 2021 22:00:37 +0200 Subject: [PATCH 5/9] Implement minimal mutexes for parking_lot. --- src/parkinglot.rs | 64 +++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 53 insertions(+), 11 deletions(-) diff --git a/src/parkinglot.rs b/src/parkinglot.rs index c02e0ea..25b258c 100644 --- a/src/parkinglot.rs +++ b/src/parkinglot.rs @@ -1,25 +1,67 @@ use crate::lockapi::TracingWrapper; -macro_rules! create_mutex_wrapper { - ($wrapped:ty, $tracing_name:ident, $debug_name:ident) => { - pub type $tracing_name = lock_api::Mutex, T>; +macro_rules! debug_variant { + ($debug_name:ident, $tracing_name:ident, $normal_name:ty) => { + type $tracing_name = TracingWrapper<$normal_name>; #[cfg(debug_assertions)] - pub type $debug_name = $tracing_name; + type $debug_name = TracingWrapper<$normal_name>; #[cfg(not(debug_assertions))] - pub type $debug_name = lock_api::Mutex<$wrapped, T>; + type $debug_name = $normal_name; }; } -create_mutex_wrapper!(parking_lot::RawFairMutex, TracingFairMutex, DebugFairMutex); -create_mutex_wrapper!(parking_lot::RawMutex, TracingMutex, DebugMutex); +debug_variant!( + DebugRawFairMutex, + TracingRawFairMutex, + parking_lot::RawFairMutex +); +debug_variant!(DebugRawMutex, TracingRawMutex, parking_lot::RawMutex); +/// Dependency tracking fair mutex. See: [`parking_lot::FairMutex`]. +pub type TracingFairMutex = lock_api::Mutex; +/// Mutex guard for [`TracingFairMutex`]. +pub type TracingFairMutexGuard<'a, T> = lock_api::MutexGuard<'a, TracingRawFairMutex, T>; +/// Debug-only dependency tracking fair mutex. +/// +/// If debug assertions are enabled this resolves to [`TracingFairMutex`] and to +/// [`parking_lot::FairMutex`] if it's not. +pub type DebugFairMutex = lock_api::Mutex; +/// Mutex guard for [`DebugFairMutex`]. +pub type DebugFairMutexGuard<'a, T> = lock_api::MutexGuard<'a, DebugRawFairMutex, T>; + +/// Dependency tracking mutex. See: [`parking_lot::Mutex`]. +pub type TracingMutex = lock_api::Mutex; +/// Mutex guard for [`TracingMutex`]. +pub type TracingMutexGuard<'a, T> = lock_api::MutexGuard<'a, TracingRawMutex, T>; +/// Debug-only dependency tracking mutex. +/// +/// If debug assertions are enabled this resolves to [`TracingMutex`] and to [`parking_lot::Mutex`] +/// if it's not. +pub type DebugMutex = lock_api::Mutex; +/// Mutex guard for [`DebugMutex`]. +pub type DebugMutexGuard<'a, T> = lock_api::MutexGuard<'a, DebugRawMutex, T>; + +/// Dependency tracking reentrant mutex. See: [`parking_lot::ReentrantMutex`]. pub type TracingReentrantMutex = lock_api::ReentrantMutex, parking_lot::RawThreadId, T>; -#[cfg(debug_assertions)] -pub type DebugReentrantMutex = TracingReentrantMutex; -#[cfg(not(debug_assertions))] -pub type DebugReentrantMutex = parking_lot::ReentrantMutex; +/// Mutex guard for [`TracingReentrantMutex`]. +pub type TracingReentrantMutexGuard<'a, T> = lock_api::ReentrantMutexGuard< + 'a, + TracingWrapper, + parking_lot::RawThreadId, + T, +>; + +/// Debug-only dependency tracking reentrant mutex. +/// +/// If debug assertions are enabled this resolves to [`TracingReentrantMutex`] and to +/// [`parking_lot::ReentrantMutex`] if it's not. +pub type DebugReentrantMutex = + lock_api::ReentrantMutex; +/// Mutex guard for [`DebugReentrantMutex`]. +pub type DebugReentrantMutexGuard<'a, T> = + lock_api::ReentrantMutexGuard<'a, DebugRawMutex, parking_lot::RawThreadId, T>; #[cfg(test)] mod tests { From 618a11f94047dcbc87aece721aca1cf49d725c3d Mon Sep 17 00:00:00 2001 From: Bert Peters Date: Thu, 27 May 2021 22:19:57 +0200 Subject: [PATCH 6/9] Implement a wrapper for parking_lot::Once --- src/parkinglot.rs | 82 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 79 insertions(+), 3 deletions(-) diff --git a/src/parkinglot.rs b/src/parkinglot.rs index 25b258c..2524aa3 100644 --- a/src/parkinglot.rs +++ b/src/parkinglot.rs @@ -1,4 +1,8 @@ +use parking_lot::Once; +use parking_lot::OnceState; + use crate::lockapi::TracingWrapper; +use crate::LazyMutexId; macro_rules! debug_variant { ($debug_name:ident, $tracing_name:ident, $normal_name:ty) => { @@ -25,7 +29,7 @@ pub type TracingFairMutexGuard<'a, T> = lock_api::MutexGuard<'a, TracingRawFairM /// Debug-only dependency tracking fair mutex. /// /// If debug assertions are enabled this resolves to [`TracingFairMutex`] and to -/// [`parking_lot::FairMutex`] if it's not. +/// [`parking_lot::FairMutex`] otherwise. pub type DebugFairMutex = lock_api::Mutex; /// Mutex guard for [`DebugFairMutex`]. pub type DebugFairMutexGuard<'a, T> = lock_api::MutexGuard<'a, DebugRawFairMutex, T>; @@ -37,7 +41,7 @@ pub type TracingMutexGuard<'a, T> = lock_api::MutexGuard<'a, TracingRawMutex, T> /// Debug-only dependency tracking mutex. /// /// If debug assertions are enabled this resolves to [`TracingMutex`] and to [`parking_lot::Mutex`] -/// if it's not. +/// otherwise. pub type DebugMutex = lock_api::Mutex; /// Mutex guard for [`DebugMutex`]. pub type DebugMutexGuard<'a, T> = lock_api::MutexGuard<'a, DebugRawMutex, T>; @@ -56,13 +60,65 @@ pub type TracingReentrantMutexGuard<'a, T> = lock_api::ReentrantMutexGuard< /// Debug-only dependency tracking reentrant mutex. /// /// If debug assertions are enabled this resolves to [`TracingReentrantMutex`] and to -/// [`parking_lot::ReentrantMutex`] if it's not. +/// [`parking_lot::ReentrantMutex`] otherwise. pub type DebugReentrantMutex = lock_api::ReentrantMutex; /// Mutex guard for [`DebugReentrantMutex`]. pub type DebugReentrantMutexGuard<'a, T> = lock_api::ReentrantMutexGuard<'a, DebugRawMutex, parking_lot::RawThreadId, T>; +/// A dependency-tracking wrapper for [`parking_lot::Once`]. +#[derive(Debug, Default)] +pub struct TracingOnce { + inner: Once, + id: LazyMutexId, +} + +impl TracingOnce { + /// Create a new `TracingOnce` value. + pub const fn new() -> Self { + Self { + inner: Once::new(), + id: LazyMutexId::new(), + } + } + + /// Returns the current state of this `Once`. + pub fn state(&self) -> OnceState { + self.inner.state() + } + + /// + /// This call is considered as "locking this `TracingOnce`" and it participates in dependency + /// tracking as such. + /// + /// # Panics + /// + /// This method will panic if `f` panics, poisoning this `Once`. In addition, this function + /// panics when the lock acquisition order is determined to be inconsistent. + pub fn call_once(&self, f: impl FnOnce()) { + let _borrow = self.id.get_borrowed(); + self.inner.call_once(f); + } + + /// Performs the given initialization routeine once and only once. + /// + /// This method is identical to [`TracingOnce::call_once`] except it ignores poisining. + pub fn call_once_force(&self, f: impl FnOnce(OnceState)) { + let _borrow = self.id.get_borrowed(); + self.inner.call_once_force(f); + } +} + +/// Debug-only `Once`. +/// +/// If debug assertions are enabled this resolves to [`TracingOnce`] and to [`parking_lot::Once`] +/// otherwise. +#[cfg(debug_assertions)] +pub type DebugOnce = TracingOnce; +#[cfg(not(debug_assertions))] +pub type DebugOnce = Once; + #[cfg(test)] mod tests { use std::sync::Arc; @@ -97,4 +153,24 @@ mod tests { let _second_lock = mutexes[(i + 1) % 3].lock(); } } + + #[test] + fn test_once_usage() { + let once = Arc::new(TracingOnce::new()); + let once_clone = once.clone(); + + assert!(!once_clone.state().done()); + + let handle = thread::spawn(move || { + assert!(!once_clone.state().done()); + + once_clone.call_once(|| {}); + + assert!(once_clone.state().done()); + }); + + handle.join().unwrap(); + + assert!(once.state().done()); + } } From 4c70d999d6ecbf634dfb84de2d1a3195ad63f9bb Mon Sep 17 00:00:00 2001 From: Bert Peters Date: Sat, 10 Jul 2021 12:07:40 +0200 Subject: [PATCH 7/9] Create type aliases for parking_lot::RwLock --- src/parkinglot.rs | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/src/parkinglot.rs b/src/parkinglot.rs index 2524aa3..7158623 100644 --- a/src/parkinglot.rs +++ b/src/parkinglot.rs @@ -21,6 +21,7 @@ debug_variant!( parking_lot::RawFairMutex ); debug_variant!(DebugRawMutex, TracingRawMutex, parking_lot::RawMutex); +debug_variant!(DebugRawRwLock, TracingRawRwLock, parking_lot::RawRwLock); /// Dependency tracking fair mutex. See: [`parking_lot::FairMutex`]. pub type TracingFairMutex = lock_api::Mutex; @@ -47,6 +48,10 @@ pub type DebugMutex = lock_api::Mutex; pub type DebugMutexGuard<'a, T> = lock_api::MutexGuard<'a, DebugRawMutex, T>; /// Dependency tracking reentrant mutex. See: [`parking_lot::ReentrantMutex`]. +/// +/// **Note:** due to the way dependencies are tracked, this mutex can only be acquired directly +/// after itself. Acquiring any other mutex in between introduces a dependency cycle, and will +/// therefore be rejected. pub type TracingReentrantMutex = lock_api::ReentrantMutex, parking_lot::RawThreadId, T>; /// Mutex guard for [`TracingReentrantMutex`]. @@ -67,6 +72,23 @@ pub type DebugReentrantMutex = pub type DebugReentrantMutexGuard<'a, T> = lock_api::ReentrantMutexGuard<'a, DebugRawMutex, parking_lot::RawThreadId, T>; +/// Dependency tracking RwLock. See: [`parking_lot::RwLock`]. +pub type TracingRwLock = lock_api::RwLock; +/// Read guard for [`TracingRwLock`]. +pub type TracingRwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, TracingRawRwLock, T>; +/// Write guard for [`TracingRwLock`]. +pub type TracingRwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, TracingRawRwLock, T>; + +/// Debug-only dependency tracking RwLock. +/// +/// If debug assertions are enabled this resolved to [`TracingRwLock`] and to +/// [`parking_lot::RwLock`] otherwise. +pub type DebugRwLock = lock_api::RwLock; +/// Read guard for [`TracingRwLock`]. +pub type DebugRwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, DebugRawRwLock, T>; +/// Write guard for [`TracingRwLock`]. +pub type DebugRwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, DebugRawRwLock, T>; + /// A dependency-tracking wrapper for [`parking_lot::Once`]. #[derive(Debug, Default)] pub struct TracingOnce { @@ -154,6 +176,21 @@ mod tests { } } + #[test] + fn test_rwlock_usage() { + let lock = Arc::new(TracingRwLock::new(())); + let lock2 = Arc::clone(&lock); + + let _read_lock = lock.read(); + + // Should be able to acquire lock in the background + thread::spawn(move || { + let _read_lock = lock2.read(); + }) + .join() + .unwrap(); + } + #[test] fn test_once_usage() { let once = Arc::new(TracingOnce::new()); From 17761af5a856ed758ce420e36aaa18382415ef13 Mon Sep 17 00:00:00 2001 From: Bert Peters Date: Sat, 10 Jul 2021 13:05:41 +0200 Subject: [PATCH 8/9] Add type aliases for mapped mutex guards --- src/parkinglot.rs | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/src/parkinglot.rs b/src/parkinglot.rs index 7158623..5d44ab1 100644 --- a/src/parkinglot.rs +++ b/src/parkinglot.rs @@ -27,6 +27,9 @@ debug_variant!(DebugRawRwLock, TracingRawRwLock, parking_lot::RawRwLock); pub type TracingFairMutex = lock_api::Mutex; /// Mutex guard for [`TracingFairMutex`]. pub type TracingFairMutexGuard<'a, T> = lock_api::MutexGuard<'a, TracingRawFairMutex, T>; +/// RAII guard for `TracingFairMutexGuard::map`. +pub type TracingMappedFairMutexGuard<'a, T> = + lock_api::MappedMutexGuard<'a, TracingRawFairMutex, T>; /// Debug-only dependency tracking fair mutex. /// /// If debug assertions are enabled this resolves to [`TracingFairMutex`] and to @@ -34,11 +37,15 @@ pub type TracingFairMutexGuard<'a, T> = lock_api::MutexGuard<'a, TracingRawFairM pub type DebugFairMutex = lock_api::Mutex; /// Mutex guard for [`DebugFairMutex`]. pub type DebugFairMutexGuard<'a, T> = lock_api::MutexGuard<'a, DebugRawFairMutex, T>; +/// RAII guard for `DebugFairMutexGuard::map`. +pub type DebugMappedFairMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, DebugRawFairMutex, T>; /// Dependency tracking mutex. See: [`parking_lot::Mutex`]. pub type TracingMutex = lock_api::Mutex; /// Mutex guard for [`TracingMutex`]. pub type TracingMutexGuard<'a, T> = lock_api::MutexGuard<'a, TracingRawMutex, T>; +/// RAII guard for `TracingMutexGuard::map`. +pub type TracingMappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, TracingRawMutex, T>; /// Debug-only dependency tracking mutex. /// /// If debug assertions are enabled this resolves to [`TracingMutex`] and to [`parking_lot::Mutex`] @@ -46,6 +53,8 @@ pub type TracingMutexGuard<'a, T> = lock_api::MutexGuard<'a, TracingRawMutex, T> pub type DebugMutex = lock_api::Mutex; /// Mutex guard for [`DebugMutex`]. pub type DebugMutexGuard<'a, T> = lock_api::MutexGuard<'a, DebugRawMutex, T>; +/// RAII guard for `TracingMutexGuard::map`. +pub type DebugMappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, DebugRawMutex, T>; /// Dependency tracking reentrant mutex. See: [`parking_lot::ReentrantMutex`]. /// @@ -61,6 +70,9 @@ pub type TracingReentrantMutexGuard<'a, T> = lock_api::ReentrantMutexGuard< parking_lot::RawThreadId, T, >; +/// RAII guard for `TracingReentrantMutexGuard::map`. +pub type TracingMappedReentrantMutexGuard<'a, T> = + lock_api::MappedReentrantMutexGuard<'a, TracingRawMutex, parking_lot::RawThreadId, T>; /// Debug-only dependency tracking reentrant mutex. /// @@ -71,6 +83,9 @@ pub type DebugReentrantMutex = /// Mutex guard for [`DebugReentrantMutex`]. pub type DebugReentrantMutexGuard<'a, T> = lock_api::ReentrantMutexGuard<'a, DebugRawMutex, parking_lot::RawThreadId, T>; +/// RAII guard for `DebugReentrantMutexGuard::map`. +pub type DebugMappedReentrantMutexGuard<'a, T> = + lock_api::MappedReentrantMutexGuard<'a, DebugRawMutex, parking_lot::RawThreadId, T>; /// Dependency tracking RwLock. See: [`parking_lot::RwLock`]. pub type TracingRwLock = lock_api::RwLock; @@ -78,6 +93,12 @@ pub type TracingRwLock = lock_api::RwLock; pub type TracingRwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, TracingRawRwLock, T>; /// Write guard for [`TracingRwLock`]. pub type TracingRwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, TracingRawRwLock, T>; +/// RAII guard for `TracingRwLockReadGuard::map`. +pub type TracingMappedRwLockReadGuard<'a, T> = + lock_api::MappedRwLockReadGuard<'a, TracingRawRwLock, T>; +/// RAII guard for `TracingRwLockWriteGuard::map`. +pub type TracingMappedRwLockWriteGuard<'a, T> = + lock_api::MappedRwLockWriteGuard<'a, TracingRawRwLock, T>; /// Debug-only dependency tracking RwLock. /// @@ -88,6 +109,11 @@ pub type DebugRwLock = lock_api::RwLock; pub type DebugRwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, DebugRawRwLock, T>; /// Write guard for [`TracingRwLock`]. pub type DebugRwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, DebugRawRwLock, T>; +/// RAII guard for `DebugRwLockReadGuard::map`. +pub type DebugMappedRwLockReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, DebugRawRwLock, T>; +/// RAII guard for `DebugRwLockWriteGuard::map`. +pub type DebugMappedRwLockWriteGuard<'a, T> = + lock_api::MappedRwLockWriteGuard<'a, DebugRawRwLock, T>; /// A dependency-tracking wrapper for [`parking_lot::Once`]. #[derive(Debug, Default)] From 680e335ccfb6f7472ce439504af2ae99942e9466 Mon Sep 17 00:00:00 2001 From: Bert Peters Date: Sat, 10 Jul 2021 17:19:34 +0200 Subject: [PATCH 9/9] Document new modules --- CHANGELOG.md | 11 +++++++++++ src/lockapi.rs | 9 +++++++++ src/parkinglot.rs | 39 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 59 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e45d5f6..d3264fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,14 @@ adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [Unreleased] +### Added +- Added generic support for wrapping mutexes that implement the traits provided by the + [`lock_api`][lock_api] crate. This can be used for creating support for other mutex providers that + implement it. + +- Added support for [`parking_lot`][parking_lot] mutexes. Support includes type aliases for all + provided mutex types as well as a dedicated `Once` wrapper. + ## [0.1.2] - 2021-05-27 ### Added @@ -33,3 +41,6 @@ Initial release. [0.1.2]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.2...v0.1.2 [0.1.1]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.0...v0.1.1 [0.1.0]: https://github.com/bertptrs/tracing-mutex/releases/tag/v0.1.0 + +[lock_api]: https://docs.rs/lock_api/ +[parking_lot]: https://docs.rs/parking_lot/ diff --git a/src/lockapi.rs b/src/lockapi.rs index c9f82f4..b76cb8c 100644 --- a/src/lockapi.rs +++ b/src/lockapi.rs @@ -1,4 +1,13 @@ //! Wrapper implementations for [`lock_api`]. +//! +//! This module does not provide any particular mutex implementation by itself, but rather can be +//! used to add dependency tracking to mutexes that already exist. It implements all of the traits +//! in `lock_api` based on the one it wraps. Crates such as `spin` and `parking_lot` provide base +//! primitives that can be wrapped. +//! +//! Wrapped mutexes are at least one `usize` larger than the types they wrapped, and must be aligned +//! to `usize` boundaries. As such, libraries with many mutexes may want to consider the additional +//! required memory. use lock_api::GuardNoSend; use lock_api::RawMutex; use lock_api::RawMutexFair; diff --git a/src/parkinglot.rs b/src/parkinglot.rs index 5d44ab1..d046f1a 100644 --- a/src/parkinglot.rs +++ b/src/parkinglot.rs @@ -1,3 +1,42 @@ +//! Wrapper types and type aliases for tracing [`parking_lot`] mutexes. +//! +//! This module provides type aliases that use the [`lockapi`][crate::lockapi] module to provide +//! tracing variants of the `parking_lot` primitives. Each of the `TracingX` type aliases wraps an +//! `X` in the `parkint_lot` api with dependency tracking, and a `DebugX` will refer to a `TracingX` +//! when `debug_assertions` are enabled and to `X` when they're not. This can be used to aid +//! debugging in development while enjoying maximum performance in production. +//! +//! # Usage +//! +//! ``` +//! # use std::sync::Arc; +//! # use std::thread; +//! # use lock_api::Mutex; +//! # use tracing_mutex::parkinglot::TracingMutex; +//! let mutex = Arc::new(TracingMutex::new(0)); +//! +//! let handles: Vec<_> = (0..10).map(|_| { +//! let mutex = Arc::clone(&mutex); +//! thread::spawn(move || *mutex.lock() += 1) +//! }).collect(); +//! +//! handles.into_iter().for_each(|handle| handle.join().unwrap()); +//! +//! // All threads completed so the value should be 10. +//! assert_eq!(10, *mutex.lock()); +//! ``` +//! +//! # Limitations +//! +//! The main lock for the global state is still provided by `std::sync` and the tracing primitives +//! are larger than the `parking_lot` primitives they wrap, so there can be a performance +//! degradation between using this and using `parking_lot` directly. If this is of concern to you, +//! try using the `DebugX`-structs, which provide cycle detection only when `debug_assertions` are +//! enabled and have no overhead when they're not. +//! +//! In addition, the mutex guards returned by the tracing wrappers are `!Send`, regardless of +//! whether `parking_lot` is configured to have `Send` mutex guards. This is a limitation of the +//! current bookkeeping system. use parking_lot::Once; use parking_lot::OnceState;