Merge pull request #3 from bertptrs/locking-api-support

This commit is contained in:
2021-07-10 17:28:20 +02:00
committed by GitHub
6 changed files with 689 additions and 17 deletions

View File

@@ -30,10 +30,12 @@ jobs:
- uses: actions-rs/cargo@v1
with:
command: build
args: --all-features
- uses: actions-rs/cargo@v1
with:
command: test
args: --all-features
- uses: actions-rs/cargo@v1
with:
@@ -43,4 +45,4 @@ jobs:
- uses: actions-rs/cargo@v1
with:
command: clippy
args: -- -D warnings
args: --all-features -- -D warnings

View File

@@ -6,6 +6,14 @@ adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Added
- Added generic support for wrapping mutexes that implement the traits provided by the
[`lock_api`][lock_api] crate. This can be used for creating support for other mutex providers that
implement it.
- Added support for [`parking_lot`][parking_lot] mutexes. Support includes type aliases for all
provided mutex types as well as a dedicated `Once` wrapper.
## [0.1.2] - 2021-05-27
### Added
@@ -33,3 +41,6 @@ Initial release.
[0.1.2]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.2...v0.1.2
[0.1.1]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.0...v0.1.1
[0.1.0]: https://github.com/bertptrs/tracing-mutex/releases/tag/v0.1.0
[lock_api]: https://docs.rs/lock_api/
[parking_lot]: https://docs.rs/parking_lot/

View File

@@ -13,6 +13,13 @@ repository = "https://github.com/bertptrs/tracing-mutex"
[dependencies]
lazy_static = "1"
lock_api = { version = "0.4", optional = true }
parking_lot = { version = "0.11", optional = true }
[dev-dependencies]
rand = "0.8"
[features]
# Feature names do not match crate names pending namespaced features.
lockapi = ["lock_api"]
parkinglot = ["parking_lot", "lockapi"]

View File

@@ -60,10 +60,18 @@ use std::sync::Once;
use std::sync::PoisonError;
use lazy_static::lazy_static;
#[cfg(feature = "lockapi")]
pub use lock_api;
#[cfg(feature = "parkinglot")]
pub use parking_lot;
use crate::graph::DiGraph;
mod graph;
#[cfg(feature = "lockapi")]
pub mod lockapi;
#[cfg(feature = "lockapi")]
pub mod parkinglot;
pub mod stdsync;
/// Counter for Mutex IDs. Atomic avoids the need for locking.
@@ -120,6 +128,16 @@ impl MutexId {
///
/// This method panics if the new dependency would introduce a cycle.
pub fn get_borrowed(&self) -> BorrowedMutex {
self.mark_held();
BorrowedMutex(self)
}
/// Mark this lock as held for the purposes of dependency tracking.
///
/// # Panics
///
/// This method panics if the new dependency would introduce a cycle.
pub fn mark_held(&self) {
let creates_cycle = HELD_LOCKS.with(|locks| {
if let Some(&previous) = locks.borrow().last() {
let mut graph = get_dependency_graph();
@@ -136,7 +154,22 @@ impl MutexId {
}
HELD_LOCKS.with(|locks| locks.borrow_mut().push(self.value()));
BorrowedMutex(self)
}
pub unsafe fn mark_released(&self) {
HELD_LOCKS.with(|locks| {
let mut locks = locks.borrow_mut();
for (i, &lock) in locks.iter().enumerate().rev() {
if lock == self.value() {
locks.remove(i);
return;
}
}
// Drop impls shouldn't panic but if this happens something is seriously broken.
unreachable!("Tried to drop lock for mutex {:?} but it wasn't held", self)
});
}
}
@@ -187,6 +220,12 @@ impl fmt::Debug for LazyMutexId {
}
}
impl Default for LazyMutexId {
fn default() -> Self {
Self::new()
}
}
/// Safety: the UnsafeCell is guaranteed to only be accessed mutably from a `Once`.
unsafe impl Sync for LazyMutexId {}
@@ -241,21 +280,8 @@ struct BorrowedMutex<'a>(&'a MutexId);
/// that is an indication of a serious design flaw in this library.
impl<'a> Drop for BorrowedMutex<'a> {
fn drop(&mut self) {
let id = self.0;
HELD_LOCKS.with(|locks| {
let mut locks = locks.borrow_mut();
for (i, &lock) in locks.iter().enumerate().rev() {
if lock == id.value() {
locks.remove(i);
return;
}
}
// Drop impls shouldn't panic but if this happens something is seriously broken.
unreachable!("Tried to drop lock for mutex {:?} but it wasn't held", id)
});
// Safety: the only way to get a BorrowedMutex is by locking the mutex.
unsafe { self.0.mark_released() };
}
}

348
src/lockapi.rs Normal file
View File

@@ -0,0 +1,348 @@
//! Wrapper implementations for [`lock_api`].
//!
//! This module does not provide any particular mutex implementation by itself, but rather can be
//! used to add dependency tracking to mutexes that already exist. It implements all of the traits
//! in `lock_api` based on the one it wraps. Crates such as `spin` and `parking_lot` provide base
//! primitives that can be wrapped.
//!
//! Wrapped mutexes are at least one `usize` larger than the types they wrapped, and must be aligned
//! to `usize` boundaries. As such, libraries with many mutexes may want to consider the additional
//! required memory.
use lock_api::GuardNoSend;
use lock_api::RawMutex;
use lock_api::RawMutexFair;
use lock_api::RawMutexTimed;
use lock_api::RawRwLock;
use lock_api::RawRwLockDowngrade;
use lock_api::RawRwLockFair;
use lock_api::RawRwLockRecursive;
use lock_api::RawRwLockRecursiveTimed;
use lock_api::RawRwLockTimed;
use lock_api::RawRwLockUpgrade;
use lock_api::RawRwLockUpgradeDowngrade;
use lock_api::RawRwLockUpgradeFair;
use lock_api::RawRwLockUpgradeTimed;
use crate::LazyMutexId;
/// Tracing wrapper for all [`lock_api`] traits.
///
/// This wrapper implements any of the locking traits available, given that the wrapped type
/// implements them. As such, this wrapper can be used both for normal mutexes and rwlocks.
#[derive(Debug, Default)]
pub struct TracingWrapper<T> {
inner: T,
// Need to use a lazy mutex ID to intialize statically.
id: LazyMutexId,
}
impl<T> TracingWrapper<T> {
/// Mark this lock as held in the dependency graph.
fn mark_held(&self) {
self.id.mark_held();
}
/// Mark this lock as released in the dependency graph.
///
/// # Safety
///
/// This function should only be called when the lock has been previously acquired by this
/// thread.
unsafe fn mark_released(&self) {
self.id.mark_released();
}
/// First mark ourselves as held, then call the locking function.
fn lock(&self, f: impl FnOnce()) {
self.mark_held();
f();
}
/// First call the unlocking function, then mark ourselves as realeased.
unsafe fn unlock(&self, f: impl FnOnce()) {
f();
self.mark_released();
}
/// Conditionally lock the mutex.
///
/// First acquires the lock, then runs the provided function. If that function returns true,
/// then the lock is kept, otherwise the mutex is immediately marked as relased.
///
/// # Returns
///
/// The value returned from the callback.
fn conditionally_lock(&self, f: impl FnOnce() -> bool) -> bool {
// Mark as locked while we try to do the thing
self.mark_held();
if f() {
true
} else {
// Safety: we just locked it above.
unsafe { self.mark_released() }
false
}
}
}
unsafe impl<T> RawMutex for TracingWrapper<T>
where
T: RawMutex,
{
const INIT: Self = Self {
inner: T::INIT,
id: LazyMutexId::new(),
};
/// Always equal to [`GuardNoSend`], as an implementation detail in the tracking system requires
/// this behaviour. May change in the future to reflect the actual guard type from the wrapped
/// primitive.
type GuardMarker = GuardNoSend;
fn lock(&self) {
self.lock(|| self.inner.lock());
}
fn try_lock(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock())
}
unsafe fn unlock(&self) {
self.unlock(|| self.inner.unlock());
}
fn is_locked(&self) -> bool {
// Can't use the default implementation as the inner type might've overwritten it.
self.inner.is_locked()
}
}
unsafe impl<T> RawMutexFair for TracingWrapper<T>
where
T: RawMutexFair,
{
unsafe fn unlock_fair(&self) {
self.unlock(|| self.inner.unlock_fair())
}
unsafe fn bump(&self) {
// Bumping effectively doesn't change which locks are held, so we don't need to manage the
// lock state.
self.inner.bump();
}
}
unsafe impl<T> RawMutexTimed for TracingWrapper<T>
where
T: RawMutexTimed,
{
type Duration = T::Duration;
type Instant = T::Instant;
fn try_lock_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_for(timeout))
}
fn try_lock_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_until(timeout))
}
}
unsafe impl<T> RawRwLock for TracingWrapper<T>
where
T: RawRwLock,
{
const INIT: Self = Self {
inner: T::INIT,
id: LazyMutexId::new(),
};
/// Always equal to [`GuardNoSend`], as an implementation detail in the tracking system requires
/// this behaviour. May change in the future to reflect the actual guard type from the wrapped
/// primitive.
type GuardMarker = GuardNoSend;
fn lock_shared(&self) {
self.lock(|| self.inner.lock_shared());
}
fn try_lock_shared(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared())
}
unsafe fn unlock_shared(&self) {
self.unlock(|| self.inner.unlock_shared());
}
fn lock_exclusive(&self) {
self.lock(|| self.inner.lock_exclusive());
}
fn try_lock_exclusive(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock_exclusive())
}
unsafe fn unlock_exclusive(&self) {
self.lock(|| self.inner.unlock_exclusive());
}
fn is_locked(&self) -> bool {
self.inner.is_locked()
}
}
unsafe impl<T> RawRwLockDowngrade for TracingWrapper<T>
where
T: RawRwLockDowngrade,
{
unsafe fn downgrade(&self) {
// Downgrading does not require tracking
self.inner.downgrade()
}
}
unsafe impl<T> RawRwLockUpgrade for TracingWrapper<T>
where
T: RawRwLockUpgrade,
{
fn lock_upgradable(&self) {
self.lock(|| self.inner.lock_upgradable());
}
fn try_lock_upgradable(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock_upgradable())
}
unsafe fn unlock_upgradable(&self) {
self.unlock(|| self.inner.unlock_upgradable());
}
unsafe fn upgrade(&self) {
self.inner.upgrade();
}
unsafe fn try_upgrade(&self) -> bool {
self.inner.try_upgrade()
}
}
unsafe impl<T> RawRwLockFair for TracingWrapper<T>
where
T: RawRwLockFair,
{
unsafe fn unlock_shared_fair(&self) {
self.unlock(|| self.inner.unlock_shared_fair());
}
unsafe fn unlock_exclusive_fair(&self) {
self.unlock(|| self.inner.unlock_exclusive_fair());
}
unsafe fn bump_shared(&self) {
self.inner.bump_shared();
}
unsafe fn bump_exclusive(&self) {
self.inner.bump_exclusive();
}
}
unsafe impl<T> RawRwLockRecursive for TracingWrapper<T>
where
T: RawRwLockRecursive,
{
fn lock_shared_recursive(&self) {
self.lock(|| self.inner.lock_shared_recursive());
}
fn try_lock_shared_recursive(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_recursive())
}
}
unsafe impl<T> RawRwLockRecursiveTimed for TracingWrapper<T>
where
T: RawRwLockRecursiveTimed,
{
fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_recursive_for(timeout))
}
fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_recursive_until(timeout))
}
}
unsafe impl<T> RawRwLockTimed for TracingWrapper<T>
where
T: RawRwLockTimed,
{
type Duration = T::Duration;
type Instant = T::Instant;
fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_for(timeout))
}
fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_until(timeout))
}
fn try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_exclusive_for(timeout))
}
fn try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_exclusive_until(timeout))
}
}
unsafe impl<T> RawRwLockUpgradeDowngrade for TracingWrapper<T>
where
T: RawRwLockUpgradeDowngrade,
{
unsafe fn downgrade_upgradable(&self) {
self.inner.downgrade_upgradable()
}
unsafe fn downgrade_to_upgradable(&self) {
self.inner.downgrade_to_upgradable()
}
}
unsafe impl<T> RawRwLockUpgradeFair for TracingWrapper<T>
where
T: RawRwLockUpgradeFair,
{
unsafe fn unlock_upgradable_fair(&self) {
self.lock(|| self.inner.unlock_upgradable_fair())
}
unsafe fn bump_upgradable(&self) {
self.inner.bump_upgradable()
}
}
unsafe impl<T> RawRwLockUpgradeTimed for TracingWrapper<T>
where
T: RawRwLockUpgradeTimed,
{
fn try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_upgradable_for(timeout))
}
fn try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_upgradable_until(timeout))
}
unsafe fn try_upgrade_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_upgrade_for(timeout))
}
unsafe fn try_upgrade_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_upgrade_until(timeout))
}
}

278
src/parkinglot.rs Normal file
View File

@@ -0,0 +1,278 @@
//! Wrapper types and type aliases for tracing [`parking_lot`] mutexes.
//!
//! This module provides type aliases that use the [`lockapi`][crate::lockapi] module to provide
//! tracing variants of the `parking_lot` primitives. Each of the `TracingX` type aliases wraps an
//! `X` in the `parkint_lot` api with dependency tracking, and a `DebugX` will refer to a `TracingX`
//! when `debug_assertions` are enabled and to `X` when they're not. This can be used to aid
//! debugging in development while enjoying maximum performance in production.
//!
//! # Usage
//!
//! ```
//! # use std::sync::Arc;
//! # use std::thread;
//! # use lock_api::Mutex;
//! # use tracing_mutex::parkinglot::TracingMutex;
//! let mutex = Arc::new(TracingMutex::new(0));
//!
//! let handles: Vec<_> = (0..10).map(|_| {
//! let mutex = Arc::clone(&mutex);
//! thread::spawn(move || *mutex.lock() += 1)
//! }).collect();
//!
//! handles.into_iter().for_each(|handle| handle.join().unwrap());
//!
//! // All threads completed so the value should be 10.
//! assert_eq!(10, *mutex.lock());
//! ```
//!
//! # Limitations
//!
//! The main lock for the global state is still provided by `std::sync` and the tracing primitives
//! are larger than the `parking_lot` primitives they wrap, so there can be a performance
//! degradation between using this and using `parking_lot` directly. If this is of concern to you,
//! try using the `DebugX`-structs, which provide cycle detection only when `debug_assertions` are
//! enabled and have no overhead when they're not.
//!
//! In addition, the mutex guards returned by the tracing wrappers are `!Send`, regardless of
//! whether `parking_lot` is configured to have `Send` mutex guards. This is a limitation of the
//! current bookkeeping system.
use parking_lot::Once;
use parking_lot::OnceState;
use crate::lockapi::TracingWrapper;
use crate::LazyMutexId;
macro_rules! debug_variant {
($debug_name:ident, $tracing_name:ident, $normal_name:ty) => {
type $tracing_name = TracingWrapper<$normal_name>;
#[cfg(debug_assertions)]
type $debug_name = TracingWrapper<$normal_name>;
#[cfg(not(debug_assertions))]
type $debug_name = $normal_name;
};
}
debug_variant!(
DebugRawFairMutex,
TracingRawFairMutex,
parking_lot::RawFairMutex
);
debug_variant!(DebugRawMutex, TracingRawMutex, parking_lot::RawMutex);
debug_variant!(DebugRawRwLock, TracingRawRwLock, parking_lot::RawRwLock);
/// Dependency tracking fair mutex. See: [`parking_lot::FairMutex`].
pub type TracingFairMutex<T> = lock_api::Mutex<TracingRawFairMutex, T>;
/// Mutex guard for [`TracingFairMutex`].
pub type TracingFairMutexGuard<'a, T> = lock_api::MutexGuard<'a, TracingRawFairMutex, T>;
/// RAII guard for `TracingFairMutexGuard::map`.
pub type TracingMappedFairMutexGuard<'a, T> =
lock_api::MappedMutexGuard<'a, TracingRawFairMutex, T>;
/// Debug-only dependency tracking fair mutex.
///
/// If debug assertions are enabled this resolves to [`TracingFairMutex`] and to
/// [`parking_lot::FairMutex`] otherwise.
pub type DebugFairMutex<T> = lock_api::Mutex<DebugRawFairMutex, T>;
/// Mutex guard for [`DebugFairMutex`].
pub type DebugFairMutexGuard<'a, T> = lock_api::MutexGuard<'a, DebugRawFairMutex, T>;
/// RAII guard for `DebugFairMutexGuard::map`.
pub type DebugMappedFairMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, DebugRawFairMutex, T>;
/// Dependency tracking mutex. See: [`parking_lot::Mutex`].
pub type TracingMutex<T> = lock_api::Mutex<TracingRawMutex, T>;
/// Mutex guard for [`TracingMutex`].
pub type TracingMutexGuard<'a, T> = lock_api::MutexGuard<'a, TracingRawMutex, T>;
/// RAII guard for `TracingMutexGuard::map`.
pub type TracingMappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, TracingRawMutex, T>;
/// Debug-only dependency tracking mutex.
///
/// If debug assertions are enabled this resolves to [`TracingMutex`] and to [`parking_lot::Mutex`]
/// otherwise.
pub type DebugMutex<T> = lock_api::Mutex<DebugRawMutex, T>;
/// Mutex guard for [`DebugMutex`].
pub type DebugMutexGuard<'a, T> = lock_api::MutexGuard<'a, DebugRawMutex, T>;
/// RAII guard for `TracingMutexGuard::map`.
pub type DebugMappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, DebugRawMutex, T>;
/// Dependency tracking reentrant mutex. See: [`parking_lot::ReentrantMutex`].
///
/// **Note:** due to the way dependencies are tracked, this mutex can only be acquired directly
/// after itself. Acquiring any other mutex in between introduces a dependency cycle, and will
/// therefore be rejected.
pub type TracingReentrantMutex<T> =
lock_api::ReentrantMutex<TracingWrapper<parking_lot::RawMutex>, parking_lot::RawThreadId, T>;
/// Mutex guard for [`TracingReentrantMutex`].
pub type TracingReentrantMutexGuard<'a, T> = lock_api::ReentrantMutexGuard<
'a,
TracingWrapper<parking_lot::RawMutex>,
parking_lot::RawThreadId,
T,
>;
/// RAII guard for `TracingReentrantMutexGuard::map`.
pub type TracingMappedReentrantMutexGuard<'a, T> =
lock_api::MappedReentrantMutexGuard<'a, TracingRawMutex, parking_lot::RawThreadId, T>;
/// Debug-only dependency tracking reentrant mutex.
///
/// If debug assertions are enabled this resolves to [`TracingReentrantMutex`] and to
/// [`parking_lot::ReentrantMutex`] otherwise.
pub type DebugReentrantMutex<T> =
lock_api::ReentrantMutex<DebugRawMutex, parking_lot::RawThreadId, T>;
/// Mutex guard for [`DebugReentrantMutex`].
pub type DebugReentrantMutexGuard<'a, T> =
lock_api::ReentrantMutexGuard<'a, DebugRawMutex, parking_lot::RawThreadId, T>;
/// RAII guard for `DebugReentrantMutexGuard::map`.
pub type DebugMappedReentrantMutexGuard<'a, T> =
lock_api::MappedReentrantMutexGuard<'a, DebugRawMutex, parking_lot::RawThreadId, T>;
/// Dependency tracking RwLock. See: [`parking_lot::RwLock`].
pub type TracingRwLock<T> = lock_api::RwLock<TracingRawRwLock, T>;
/// Read guard for [`TracingRwLock`].
pub type TracingRwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, TracingRawRwLock, T>;
/// Write guard for [`TracingRwLock`].
pub type TracingRwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, TracingRawRwLock, T>;
/// RAII guard for `TracingRwLockReadGuard::map`.
pub type TracingMappedRwLockReadGuard<'a, T> =
lock_api::MappedRwLockReadGuard<'a, TracingRawRwLock, T>;
/// RAII guard for `TracingRwLockWriteGuard::map`.
pub type TracingMappedRwLockWriteGuard<'a, T> =
lock_api::MappedRwLockWriteGuard<'a, TracingRawRwLock, T>;
/// Debug-only dependency tracking RwLock.
///
/// If debug assertions are enabled this resolved to [`TracingRwLock`] and to
/// [`parking_lot::RwLock`] otherwise.
pub type DebugRwLock<T> = lock_api::RwLock<DebugRawRwLock, T>;
/// Read guard for [`TracingRwLock`].
pub type DebugRwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, DebugRawRwLock, T>;
/// Write guard for [`TracingRwLock`].
pub type DebugRwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, DebugRawRwLock, T>;
/// RAII guard for `DebugRwLockReadGuard::map`.
pub type DebugMappedRwLockReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, DebugRawRwLock, T>;
/// RAII guard for `DebugRwLockWriteGuard::map`.
pub type DebugMappedRwLockWriteGuard<'a, T> =
lock_api::MappedRwLockWriteGuard<'a, DebugRawRwLock, T>;
/// A dependency-tracking wrapper for [`parking_lot::Once`].
#[derive(Debug, Default)]
pub struct TracingOnce {
inner: Once,
id: LazyMutexId,
}
impl TracingOnce {
/// Create a new `TracingOnce` value.
pub const fn new() -> Self {
Self {
inner: Once::new(),
id: LazyMutexId::new(),
}
}
/// Returns the current state of this `Once`.
pub fn state(&self) -> OnceState {
self.inner.state()
}
///
/// This call is considered as "locking this `TracingOnce`" and it participates in dependency
/// tracking as such.
///
/// # Panics
///
/// This method will panic if `f` panics, poisoning this `Once`. In addition, this function
/// panics when the lock acquisition order is determined to be inconsistent.
pub fn call_once(&self, f: impl FnOnce()) {
let _borrow = self.id.get_borrowed();
self.inner.call_once(f);
}
/// Performs the given initialization routeine once and only once.
///
/// This method is identical to [`TracingOnce::call_once`] except it ignores poisining.
pub fn call_once_force(&self, f: impl FnOnce(OnceState)) {
let _borrow = self.id.get_borrowed();
self.inner.call_once_force(f);
}
}
/// Debug-only `Once`.
///
/// If debug assertions are enabled this resolves to [`TracingOnce`] and to [`parking_lot::Once`]
/// otherwise.
#[cfg(debug_assertions)]
pub type DebugOnce = TracingOnce;
#[cfg(not(debug_assertions))]
pub type DebugOnce = Once;
#[cfg(test)]
mod tests {
use std::sync::Arc;
use std::thread;
use super::*;
#[test]
fn test_mutex_usage() {
let mutex = Arc::new(TracingMutex::new(()));
let local_lock = mutex.lock();
drop(local_lock);
thread::spawn(move || {
let _remote_lock = mutex.lock();
})
.join()
.unwrap();
}
#[test]
#[should_panic]
fn test_mutex_conflict() {
let mutexes = [
TracingMutex::new(()),
TracingMutex::new(()),
TracingMutex::new(()),
];
for i in 0..3 {
let _first_lock = mutexes[i].lock();
let _second_lock = mutexes[(i + 1) % 3].lock();
}
}
#[test]
fn test_rwlock_usage() {
let lock = Arc::new(TracingRwLock::new(()));
let lock2 = Arc::clone(&lock);
let _read_lock = lock.read();
// Should be able to acquire lock in the background
thread::spawn(move || {
let _read_lock = lock2.read();
})
.join()
.unwrap();
}
#[test]
fn test_once_usage() {
let once = Arc::new(TracingOnce::new());
let once_clone = once.clone();
assert!(!once_clone.state().done());
let handle = thread::spawn(move || {
assert!(!once_clone.state().done());
once_clone.call_once(|| {});
assert!(once_clone.state().done());
});
handle.join().unwrap();
assert!(once.state().done());
}
}