From 6a3cb83d013d9888f40a544870d84ff25e6f9913 Mon Sep 17 00:00:00 2001 From: Bert Peters Date: Mon, 24 May 2021 22:03:03 +0200 Subject: [PATCH] Implement Mutex behaviour for lock_api --- Cargo.toml | 5 +++ src/lib.rs | 54 +++++++++++++++------- src/lockapi.rs | 118 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 161 insertions(+), 16 deletions(-) create mode 100644 src/lockapi.rs diff --git a/Cargo.toml b/Cargo.toml index 9be107a..1182071 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,6 +13,11 @@ repository = "https://github.com/bertptrs/tracing-mutex" [dependencies] lazy_static = "1" +lock_api = { version = "0.4", optional = true } [dev-dependencies] rand = "0.8" + +[features] +# Feature names do not match crate names pending namespaced features. +lockapi = ["lock_api"] diff --git a/src/lib.rs b/src/lib.rs index a578753..64ded52 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -60,10 +60,14 @@ use std::sync::Once; use std::sync::PoisonError; use lazy_static::lazy_static; +#[cfg(feature = "lockapi")] +pub use lock_api; use crate::graph::DiGraph; mod graph; +#[cfg(feature = "lockapi")] +pub mod lockapi; pub mod stdsync; /// Counter for Mutex IDs. Atomic avoids the need for locking. @@ -120,6 +124,16 @@ impl MutexId { /// /// This method panics if the new dependency would introduce a cycle. pub fn get_borrowed(&self) -> BorrowedMutex { + self.mark_held(); + BorrowedMutex(self) + } + + /// Mark this lock as held for the purposes of dependency tracking. + /// + /// # Panics + /// + /// This method panics if the new dependency would introduce a cycle. + pub fn mark_held(&self) { let creates_cycle = HELD_LOCKS.with(|locks| { if let Some(&previous) = locks.borrow().last() { let mut graph = get_dependency_graph(); @@ -136,7 +150,22 @@ impl MutexId { } HELD_LOCKS.with(|locks| locks.borrow_mut().push(self.value())); - BorrowedMutex(self) + } + + pub unsafe fn mark_released(&self) { + HELD_LOCKS.with(|locks| { + let mut locks = locks.borrow_mut(); + + for (i, &lock) in locks.iter().enumerate().rev() { + if lock == self.value() { + locks.remove(i); + return; + } + } + + // Drop impls shouldn't panic but if this happens something is seriously broken. + unreachable!("Tried to drop lock for mutex {:?} but it wasn't held", self) + }); } } @@ -187,6 +216,12 @@ impl fmt::Debug for LazyMutexId { } } +impl Default for LazyMutexId { + fn default() -> Self { + Self::new() + } +} + /// Safety: the UnsafeCell is guaranteed to only be accessed mutably from a `Once`. unsafe impl Sync for LazyMutexId {} @@ -241,21 +276,8 @@ struct BorrowedMutex<'a>(&'a MutexId); /// that is an indication of a serious design flaw in this library. impl<'a> Drop for BorrowedMutex<'a> { fn drop(&mut self) { - let id = self.0; - - HELD_LOCKS.with(|locks| { - let mut locks = locks.borrow_mut(); - - for (i, &lock) in locks.iter().enumerate().rev() { - if lock == id.value() { - locks.remove(i); - return; - } - } - - // Drop impls shouldn't panic but if this happens something is seriously broken. - unreachable!("Tried to drop lock for mutex {:?} but it wasn't held", id) - }); + // Safety: the only way to get a BorrowedMutex is by locking the mutex. + unsafe { self.0.mark_released() }; } } diff --git a/src/lockapi.rs b/src/lockapi.rs new file mode 100644 index 0000000..b957d6d --- /dev/null +++ b/src/lockapi.rs @@ -0,0 +1,118 @@ +//! Wrapper implementations for [`lock_api`]. +use lock_api::GuardNoSend; +use lock_api::RawMutex; +use lock_api::RawMutexFair; +use lock_api::RawMutexTimed; + +use crate::LazyMutexId; + +/// Tracing wrapper for all [`lock_api`] traits. +/// +/// This wrapper implements any of the locking traits available, given that the wrapped type +/// implements them. As such, this wrapper can be used both for normal mutexes and rwlocks. +#[derive(Debug, Default)] +pub struct TracingWrapper { + inner: T, + id: LazyMutexId, +} + +impl TracingWrapper { + /// Mark this lock as held in the dependency graph. + fn mark_held(&self) { + self.id.mark_held(); + } + + /// Mark this lock as released in the dependency graph. + /// + /// # Safety + /// + /// This function should only be called when the lock has been previously acquired by this + /// thread. + unsafe fn mark_released(&self) { + self.id.mark_released(); + } + + /// Conditionally lock the mutex. + /// + /// First acquires the lock, then runs the provided function. If that function returns true, + /// then the lock is kept, otherwise the mutex is immediately marked as relased. + fn conditionally_lock(&self, f: impl FnOnce() -> bool) -> bool { + // Mark as locked while we try to do the thing + self.mark_held(); + + if f() { + true + } else { + // Safety: we just locked it above. + unsafe { self.mark_released() } + false + } + } +} + +unsafe impl RawMutex for TracingWrapper +where + T: RawMutex, +{ + const INIT: Self = Self { + inner: T::INIT, + id: LazyMutexId::new(), + }; + + /// Always equal to [`GuardNoSend`], as an implementation detail in the tracking system requires + /// this behaviour. May change in the future to reflect the actual guard type from the wrapped + /// primitive. + type GuardMarker = GuardNoSend; + + fn lock(&self) { + self.mark_held(); + self.inner.lock(); + } + + fn try_lock(&self) -> bool { + self.conditionally_lock(|| self.inner.try_lock()) + } + + unsafe fn unlock(&self) { + self.inner.unlock(); + self.mark_released(); + } + + fn is_locked(&self) -> bool { + // Can't use the default implementation as the inner type might've overwritten it. + self.inner.is_locked() + } +} + +unsafe impl RawMutexFair for TracingWrapper +where + T: RawMutexFair, +{ + unsafe fn unlock_fair(&self) { + self.inner.unlock_fair(); + self.mark_released(); + } + + unsafe fn bump(&self) { + // Bumping effectively doesn't change which locks are held, so we don't need to manage the + // lock state. + self.inner.bump(); + } +} + +unsafe impl RawMutexTimed for TracingWrapper +where + T: RawMutexTimed, +{ + type Duration = T::Duration; + + type Instant = T::Instant; + + fn try_lock_for(&self, timeout: Self::Duration) -> bool { + self.conditionally_lock(|| self.inner.try_lock_for(timeout)) + } + + fn try_lock_until(&self, timeout: Self::Instant) -> bool { + self.conditionally_lock(|| self.inner.try_lock_until(timeout)) + } +}