46 Commits

Author SHA1 Message Date
5232bac582 Bump version 2022-05-23 08:59:47 +02:00
bors[bot]
6472f4b807 Merge #21
21: Prepare for release v0.2.1 r=bertptrs a=bertptrs



Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-05-23 06:55:45 +00:00
6afe7b1c48 Update README and CHANGELOG 2022-05-23 08:53:56 +02:00
9238ef53ee Update copyright 2022-05-23 08:37:26 +02:00
bors[bot]
c08addff7d Merge #17
17: Fix typos r=bertptrs a=quisar



Co-authored-by: Benjamin Lerman <qsr@chromium.org>
2022-05-23 06:33:21 +00:00
bors[bot]
c1ce9df8ad Merge #19
19: Add a wrapper for `std::sync::Condvar` r=bertptrs a=bertptrs

This wrapper does not do any tracing itself but supports the use of a tracing mutex guard instead of an `std::sync` one.

Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-05-17 19:50:02 +00:00
312eaa8649 Add a wrapper for std::sync::Condvar
This wrapper does not do any tracing itself but supports the use of a
tracing mutex guard instead of an `std::sync` one.
2022-05-17 21:45:25 +02:00
bors[bot]
1f7e6921aa Merge #18
18: Enable bors for nicer merging r=bertptrs a=bertptrs



Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-05-15 21:40:40 +00:00
f7048f265f Enable CI builds on staging/trying 2022-05-15 23:35:00 +02:00
64e56fdb86 Add minimal bors config 2022-05-15 23:35:00 +02:00
Benjamin Lerman
8e3278fdd2 Fix typos 2022-05-10 10:30:20 +02:00
9ea993e737 Add missing date 2022-05-07 18:15:50 +02:00
062850fc3e Merge pull request #16 from bertptrs/docsrs_feature_docs
Fix documentation builds for features
2022-05-07 17:56:09 +02:00
0d2622d5c6 Build documentation on CI 2022-05-07 17:52:32 +02:00
d1417e0b0c Tag module docs with their required features 2022-05-07 17:52:32 +02:00
fcc64e2cef Automatically build documentation for all features 2022-05-07 17:03:45 +02:00
fd0d05307c Update README and copyright year 2022-05-07 16:54:37 +02:00
2f6e214784 Merge pull request #15 from bertptrs/pre-release-cleanup 2022-05-07 16:50:10 +02:00
3ec7e83e00 Update changelog and version 2022-05-07 16:43:31 +02:00
ea8e0208a0 Explicitly test for disallowed self-cycles 2022-05-07 16:43:31 +02:00
8926af4e13 Also deny clippy warnings on other targets 2022-05-07 16:43:31 +02:00
77676ea04d Fix formatting 2022-05-07 16:43:31 +02:00
46c92cfbbf Merge pull request #13 from quisar/add_upgradable 2022-05-06 10:17:54 +02:00
Benjamin Lerman
743cc83669 Add TracingRwLockUpgradableReadGuard wrapper for parking_lot 2022-05-03 10:16:27 +02:00
4faaae8d8f Merge pull request #10 from quisar/fix_locking_issues_in_lockapi 2022-05-02 08:32:13 +02:00
Benjamin Lerman
b78af9150d Fix a number of issues in the lockapi wrappers. 2022-05-02 08:20:23 +02:00
515930c0a2 Merge pull request #9 from quisar/prevent-reentrant-locking 2022-05-02 08:13:48 +02:00
Benjamin Lerman
b5a5ca16c3 Do not allow recursive locks. 2022-05-02 08:11:37 +02:00
aef99d4f65 Merge pull request #8 from quisar/fix_deref 2022-05-02 08:04:55 +02:00
Benjamin Lerman
6073c6c78d Fix Target for Deref of stdsync::TracingMutexGuard 2022-05-01 16:53:26 +02:00
cdd44f74fa Merge pull request #11 from bertptrs/fix-bitrot 2022-05-01 14:06:23 +02:00
38b3b226cc Move to edition 2021 altogether 2022-05-01 12:03:50 +02:00
3b9b908460 Correctly mark mutex reference as unused 2022-05-01 11:50:50 +02:00
ef421e20eb Deal with IntoIter deprecation 2022-05-01 11:50:37 +02:00
66576e5b0e Merge pull request #5 from bertptrs/benchmarking
Implement minimal benchmarking of dependency tracking
2021-07-10 22:17:44 +02:00
308af218e1 Implement minimal benchmarking of dependency tracking 2021-07-10 22:14:33 +02:00
79ed599a2f Merge pull request #3 from bertptrs/locking-api-support 2021-07-10 17:28:20 +02:00
680e335ccf Document new modules 2021-07-10 17:25:42 +02:00
17761af5a8 Add type aliases for mapped mutex guards 2021-07-10 13:05:41 +02:00
4c70d999d6 Create type aliases for parking_lot::RwLock 2021-07-10 12:17:35 +02:00
618a11f940 Implement a wrapper for parking_lot::Once 2021-05-27 22:19:57 +02:00
77cd603363 Implement minimal mutexes for parking_lot. 2021-05-27 22:00:37 +02:00
73b4c8b1af Minimal parking_lot support 2021-05-27 21:16:24 +02:00
b21a63e74b Implement RwLock-based traits for lockapi worker. 2021-05-27 21:16:24 +02:00
6a3cb83d01 Implement Mutex behaviour for lock_api 2021-05-27 21:16:24 +02:00
08cfb17234 Build all features on CI 2021-05-27 21:16:24 +02:00
13 changed files with 1065 additions and 43 deletions

View File

@@ -2,12 +2,14 @@ on:
push: push:
branches: branches:
- master - master
- staging
- trying
pull_request: pull_request:
name: Continuous integration name: Continuous integration
jobs: jobs:
ci: tests:
name: Rust project name: Rust project
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
@@ -30,10 +32,13 @@ jobs:
- uses: actions-rs/cargo@v1 - uses: actions-rs/cargo@v1
with: with:
command: build command: build
# --all-targets ensures that we also build the benchmarks and tests already.
args: --all-features --all-targets
- uses: actions-rs/cargo@v1 - uses: actions-rs/cargo@v1
with: with:
command: test command: test
args: --all-features
- uses: actions-rs/cargo@v1 - uses: actions-rs/cargo@v1
with: with:
@@ -43,4 +48,23 @@ jobs:
- uses: actions-rs/cargo@v1 - uses: actions-rs/cargo@v1
with: with:
command: clippy command: clippy
args: -- -D warnings args: --all-features --all-targets -- -D warnings
docs:
name: Documentation build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly
override: true
- name: Build documentation
env:
# Build the docs like docs.rs builds it
RUSTDOCFLAGS: --cfg docsrs
run: cargo doc --all-features

View File

@@ -6,6 +6,44 @@ adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased] ## [Unreleased]
## [0.2.1] - 2022-05-23
### Added
- Build [docs.rs] documentation with all features enabled for completeness.
- Add support for `std::sync::Condvar`
### Fixed
- The `parkinglot` module is now correctly enabled by the `parkinglot` feature rather than the
`lockapi` feature.
## [0.2.0] - 2022-05-07
### Added
- Generic support for wrapping mutexes that implement the traits provided by the
[`lock_api`][lock_api] crate. This can be used for creating support for other mutex providers that
implement it.
- Support for [`parking_lot`][parking_lot] mutexes. Support includes type aliases for all
provided mutex types as well as a dedicated `Once` wrapper.
- Simple benchmark to track the rough performance penalty incurred by dependency tracking.
### Breaking
- The library now requires edition 2021.
- The `Mutex`- and `RwLockGuards` now dereference to `T` rather than the lock guard they wrap. This
is technically a bugfix but can theoretically break existing code.
- Self-cycles are no longer allowed for lock dependencies. They previously were because it usually
isn't a problem, but it can create RWR deadlocks with `RwLocks`.
### Changed
- The project now targets edition 2021
## [0.1.2] - 2021-05-27 ## [0.1.2] - 2021-05-27
### Added ### Added
@@ -29,7 +67,13 @@ adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
Initial release. Initial release.
[Unreleased]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.2...HEAD [Unreleased]: https://github.com/bertptrs/tracing-mutex/compare/v0.2.1...HEAD
[0.1.2]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.2...v0.1.2 [0.2.1]: https://github.com/bertptrs/tracing-mutex/compare/v0.2.0...v0.2.1
[0.2.0]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.2...v0.2.0
[0.1.2]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.1...v0.1.2
[0.1.1]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.0...v0.1.1 [0.1.1]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.0...v0.1.1
[0.1.0]: https://github.com/bertptrs/tracing-mutex/releases/tag/v0.1.0 [0.1.0]: https://github.com/bertptrs/tracing-mutex/releases/tag/v0.1.0
[docs.rs]: https://docs.rs/tracing-mutex/latest/tracing_mutex/
[lock_api]: https://docs.rs/lock_api/
[parking_lot]: https://docs.rs/parking_lot/

View File

@@ -1,8 +1,8 @@
[package] [package]
name = "tracing-mutex" name = "tracing-mutex"
version = "0.1.2" version = "0.2.1"
authors = ["Bert Peters <bert@bertptrs.nl>"] authors = ["Bert Peters <bert@bertptrs.nl>"]
edition = "2018" edition = "2021"
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
documentation = "https://docs.rs/tracing-mutex" documentation = "https://docs.rs/tracing-mutex"
categories = ["concurrency", "development-tools::debugging"] categories = ["concurrency", "development-tools::debugging"]
@@ -11,8 +11,26 @@ description = "Ensure deadlock-free mutexes by allocating in order, or else."
readme = "README.md" readme = "README.md"
repository = "https://github.com/bertptrs/tracing-mutex" repository = "https://github.com/bertptrs/tracing-mutex"
[package.metadata.docs.rs]
# Build docs for all features so the documentation is more complete
all-features = true
# Set custom cfg so we can enable docs.rs magic
rustdoc-args = ["--cfg", "docsrs"]
[dependencies] [dependencies]
lazy_static = "1" lazy_static = "1"
lock_api = { version = "0.4", optional = true }
parking_lot = { version = "0.11", optional = true }
[dev-dependencies] [dev-dependencies]
criterion = "0.3"
rand = "0.8" rand = "0.8"
[[bench]]
name = "mutex"
harness = false
[features]
# Feature names do not match crate names pending namespaced features.
lockapi = ["lock_api"]
parkinglot = ["parking_lot", "lockapi"]

View File

@@ -186,7 +186,7 @@ APPENDIX: How to apply the Apache License to your work.
same "printed page" as the copyright notice for easier same "printed page" as the copyright notice for easier
identification within third-party archives. identification within third-party archives.
Copyright [yyyy] [name of copyright owner] Copyright 2022 Bert Peters
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
Copyright © 2021 Bert Peters Copyright © 2022 Bert Peters
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the “Software”), to deal in the Software without restriction, associated documentation files (the “Software”), to deal in the Software without restriction,

View File

@@ -34,7 +34,7 @@ Add this dependency to your `Cargo.lock` file like any other:
```toml ```toml
[dependencies] [dependencies]
tracing-mutex = "0.1" tracing-mutex = "0.2"
``` ```
Then use the locks provided by this library instead of the ones you would use otherwise. Then use the locks provided by this library instead of the ones you would use otherwise.
@@ -59,12 +59,23 @@ performance penalty in your production environment, this library also offers deb
when debug assertions are enabled, and to `Mutex` when they are not. Similar helper types are when debug assertions are enabled, and to `Mutex` when they are not. Similar helper types are
available for other synchronization primitives. available for other synchronization primitives.
### Features
- Dependency-tracking wrappers for all locking primitives
- Optional opt-out for release mode code
- Support for primitives from:
- `std::sync`
- `parking_lot`
- Any library that implements the `lock_api` traits
## Future improvements ## Future improvements
- Improve performance in lock tracing - Improve performance in lock tracing
- Optional logging to make debugging easier - Optional logging to make debugging easier
- Better and configurable error handling when detecting cyclic dependencies - Better and configurable error handling when detecting cyclic dependencies
- Support for other locking libraries, such as `parking_lot` - Support for other locking libraries
- Support for async locking libraries
- Support for `Send` mutex guards
**Note:** `parking_lot` has already began work on its own deadlock detection mechanism, which works **Note:** `parking_lot` has already began work on its own deadlock detection mechanism, which works
in a different way. Both can be complimentary. in a different way. Both can be complimentary.

82
benches/mutex.rs Normal file
View File

@@ -0,0 +1,82 @@
use std::sync::Arc;
use std::sync::Mutex;
use criterion::criterion_group;
use criterion::criterion_main;
use criterion::BenchmarkId;
use criterion::Criterion;
use criterion::Throughput;
use rand::prelude::*;
use tracing_mutex::stdsync::TracingMutex;
const SAMPLE_SIZES: [usize; 5] = [10, 30, 100, 300, 1000];
/// Reproducibly generate random combinations a, b where the index(a) < index(b)
///
/// All combinations are generated
fn generate_combinations<T>(options: &[Arc<T>]) -> Vec<(Arc<T>, Arc<T>)> {
let mut combinations = Vec::new();
for (i, first) in options.iter().enumerate() {
for second in options.iter().skip(i + 1) {
combinations.push((Arc::clone(first), Arc::clone(second)));
}
}
let mut rng = StdRng::seed_from_u64(42);
combinations.shuffle(&mut rng);
combinations
}
/// Take two arbitrary mutexes, lock the first, lock the second while holding the first.
fn benchmark_baseline(c: &mut Criterion) {
let mut group = c.benchmark_group("baseline");
for nodes in SAMPLE_SIZES {
group.throughput(Throughput::Elements((nodes * (nodes - 1) / 2) as u64));
group.bench_with_input(BenchmarkId::from_parameter(nodes), &nodes, |b, &s| {
b.iter_batched(
|| {
let mutexes: Vec<_> = (0..s).map(|_| Arc::new(Mutex::new(()))).collect();
generate_combinations(&mutexes)
},
|combinations| {
for (first, second) in combinations {
let _first = first.lock();
let _second = second.lock();
}
},
criterion::BatchSize::SmallInput,
)
});
}
}
/// Same as [`benchmark_baseline`] but now while tracking dependencies.
fn benchmark_tracing_mutex(c: &mut Criterion) {
let mut group = c.benchmark_group("tracing_mutex");
for nodes in SAMPLE_SIZES {
group.throughput(Throughput::Elements((nodes * (nodes - 1) / 2) as u64));
group.bench_with_input(BenchmarkId::from_parameter(nodes), &nodes, |b, &s| {
b.iter_batched(
|| {
let mutexes: Vec<_> = (0..s).map(|_| Arc::new(TracingMutex::new(()))).collect();
generate_combinations(&mutexes)
},
|combinations| {
for (first, second) in combinations {
let _first = first.lock();
let _second = second.lock();
}
},
criterion::BatchSize::SmallInput,
)
});
}
}
criterion_group!(benches, benchmark_baseline, benchmark_tracing_mutex);
criterion_main!(benches);

5
bors.toml Normal file
View File

@@ -0,0 +1,5 @@
status = [
'Rust project (stable)',
'Rust project (beta)',
'Documentation build',
]

View File

@@ -1,4 +1,3 @@
use std::array::IntoIter;
use std::cell::Cell; use std::cell::Cell;
use std::collections::HashMap; use std::collections::HashMap;
use std::collections::HashSet; use std::collections::HashSet;
@@ -99,8 +98,8 @@ where
/// would introduce a cycle, the edge is rejected and `false` is returned. /// would introduce a cycle, the edge is rejected and `false` is returned.
pub(crate) fn add_edge(&mut self, x: V, y: V) -> bool { pub(crate) fn add_edge(&mut self, x: V, y: V) -> bool {
if x == y { if x == y {
// self-edges are not considered cycles // self-edges are always considered cycles
return true; return false;
} }
let (_, out_edges, ub) = self.add_node(x); let (_, out_edges, ub) = self.add_node(x);
@@ -116,7 +115,7 @@ where
if lb < ub { if lb < ub {
// This edge might introduce a cycle, need to recompute the topological sort // This edge might introduce a cycle, need to recompute the topological sort
let mut visited = IntoIter::new([x, y]).collect(); let mut visited = [x, y].into_iter().collect();
let mut delta_f = Vec::new(); let mut delta_f = Vec::new();
let mut delta_b = Vec::new(); let mut delta_b = Vec::new();
@@ -227,6 +226,14 @@ mod tests {
use super::*; use super::*;
#[test]
fn test_no_self_cycle() {
// Regression test for https://github.com/bertptrs/tracing-mutex/issues/7
let mut graph = DiGraph::default();
assert!(!graph.add_edge(1, 1));
}
#[test] #[test]
fn test_digraph() { fn test_digraph() {
let mut graph = DiGraph::default(); let mut graph = DiGraph::default();
@@ -259,7 +266,9 @@ mod tests {
for i in 0..NUM_NODES { for i in 0..NUM_NODES {
for j in i..NUM_NODES { for j in i..NUM_NODES {
edges.push((i, j)); if i != j {
edges.push((i, j));
}
} }
} }

View File

@@ -45,6 +45,7 @@
//! enabled, and to the underlying mutex when they're not. //! enabled, and to the underlying mutex when they're not.
//! //!
//! [paper]: https://whileydave.com/publications/pk07_jea/ //! [paper]: https://whileydave.com/publications/pk07_jea/
#![cfg_attr(docsrs, feature(doc_cfg))]
use std::cell::RefCell; use std::cell::RefCell;
use std::cell::UnsafeCell; use std::cell::UnsafeCell;
use std::fmt; use std::fmt;
@@ -60,10 +61,22 @@ use std::sync::Once;
use std::sync::PoisonError; use std::sync::PoisonError;
use lazy_static::lazy_static; use lazy_static::lazy_static;
#[cfg(feature = "lockapi")]
#[cfg_attr(docsrs, doc(cfg(feature = "lockapi")))]
pub use lock_api;
#[cfg(feature = "parkinglot")]
#[cfg_attr(docsrs, doc(cfg(feature = "parkinglot")))]
pub use parking_lot;
use crate::graph::DiGraph; use crate::graph::DiGraph;
mod graph; mod graph;
#[cfg(feature = "lockapi")]
#[cfg_attr(docsrs, doc(cfg(feature = "lockapi")))]
pub mod lockapi;
#[cfg(feature = "parkinglot")]
#[cfg_attr(docsrs, doc(cfg(feature = "parkinglot")))]
pub mod parkinglot;
pub mod stdsync; pub mod stdsync;
/// Counter for Mutex IDs. Atomic avoids the need for locking. /// Counter for Mutex IDs. Atomic avoids the need for locking.
@@ -120,6 +133,16 @@ impl MutexId {
/// ///
/// This method panics if the new dependency would introduce a cycle. /// This method panics if the new dependency would introduce a cycle.
pub fn get_borrowed(&self) -> BorrowedMutex { pub fn get_borrowed(&self) -> BorrowedMutex {
self.mark_held();
BorrowedMutex(self)
}
/// Mark this lock as held for the purposes of dependency tracking.
///
/// # Panics
///
/// This method panics if the new dependency would introduce a cycle.
pub fn mark_held(&self) {
let creates_cycle = HELD_LOCKS.with(|locks| { let creates_cycle = HELD_LOCKS.with(|locks| {
if let Some(&previous) = locks.borrow().last() { if let Some(&previous) = locks.borrow().last() {
let mut graph = get_dependency_graph(); let mut graph = get_dependency_graph();
@@ -136,7 +159,22 @@ impl MutexId {
} }
HELD_LOCKS.with(|locks| locks.borrow_mut().push(self.value())); HELD_LOCKS.with(|locks| locks.borrow_mut().push(self.value()));
BorrowedMutex(self) }
pub unsafe fn mark_released(&self) {
HELD_LOCKS.with(|locks| {
let mut locks = locks.borrow_mut();
for (i, &lock) in locks.iter().enumerate().rev() {
if lock == self.value() {
locks.remove(i);
return;
}
}
// Drop impls shouldn't panic but if this happens something is seriously broken.
unreachable!("Tried to drop lock for mutex {:?} but it wasn't held", self)
});
} }
} }
@@ -187,6 +225,12 @@ impl fmt::Debug for LazyMutexId {
} }
} }
impl Default for LazyMutexId {
fn default() -> Self {
Self::new()
}
}
/// Safety: the UnsafeCell is guaranteed to only be accessed mutably from a `Once`. /// Safety: the UnsafeCell is guaranteed to only be accessed mutably from a `Once`.
unsafe impl Sync for LazyMutexId {} unsafe impl Sync for LazyMutexId {}
@@ -241,21 +285,8 @@ struct BorrowedMutex<'a>(&'a MutexId);
/// that is an indication of a serious design flaw in this library. /// that is an indication of a serious design flaw in this library.
impl<'a> Drop for BorrowedMutex<'a> { impl<'a> Drop for BorrowedMutex<'a> {
fn drop(&mut self) { fn drop(&mut self) {
let id = self.0; // Safety: the only way to get a BorrowedMutex is by locking the mutex.
unsafe { self.0.mark_released() };
HELD_LOCKS.with(|locks| {
let mut locks = locks.borrow_mut();
for (i, &lock) in locks.iter().enumerate().rev() {
if lock == id.value() {
locks.remove(i);
return;
}
}
// Drop impls shouldn't panic but if this happens something is seriously broken.
unreachable!("Tried to drop lock for mutex {:?} but it wasn't held", id)
});
} }
} }
@@ -331,7 +362,9 @@ mod tests {
let mut edges = Vec::with_capacity(NUM_NODES * NUM_NODES); let mut edges = Vec::with_capacity(NUM_NODES * NUM_NODES);
for i in 0..NUM_NODES { for i in 0..NUM_NODES {
for j in i..NUM_NODES { for j in i..NUM_NODES {
edges.push((i, j)); if i != j {
edges.push((i, j));
}
} }
} }

348
src/lockapi.rs Normal file
View File

@@ -0,0 +1,348 @@
//! Wrapper implementations for [`lock_api`].
//!
//! This module does not provide any particular mutex implementation by itself, but rather can be
//! used to add dependency tracking to mutexes that already exist. It implements all of the traits
//! in `lock_api` based on the one it wraps. Crates such as `spin` and `parking_lot` provide base
//! primitives that can be wrapped.
//!
//! Wrapped mutexes are at least one `usize` larger than the types they wrapped, and must be aligned
//! to `usize` boundaries. As such, libraries with many mutexes may want to consider the additional
//! required memory.
use lock_api::GuardNoSend;
use lock_api::RawMutex;
use lock_api::RawMutexFair;
use lock_api::RawMutexTimed;
use lock_api::RawRwLock;
use lock_api::RawRwLockDowngrade;
use lock_api::RawRwLockFair;
use lock_api::RawRwLockRecursive;
use lock_api::RawRwLockRecursiveTimed;
use lock_api::RawRwLockTimed;
use lock_api::RawRwLockUpgrade;
use lock_api::RawRwLockUpgradeDowngrade;
use lock_api::RawRwLockUpgradeFair;
use lock_api::RawRwLockUpgradeTimed;
use crate::LazyMutexId;
/// Tracing wrapper for all [`lock_api`] traits.
///
/// This wrapper implements any of the locking traits available, given that the wrapped type
/// implements them. As such, this wrapper can be used both for normal mutexes and rwlocks.
#[derive(Debug, Default)]
pub struct TracingWrapper<T> {
inner: T,
// Need to use a lazy mutex ID to intialize statically.
id: LazyMutexId,
}
impl<T> TracingWrapper<T> {
/// Mark this lock as held in the dependency graph.
fn mark_held(&self) {
self.id.mark_held();
}
/// Mark this lock as released in the dependency graph.
///
/// # Safety
///
/// This function should only be called when the lock has been previously acquired by this
/// thread.
unsafe fn mark_released(&self) {
self.id.mark_released();
}
/// First mark ourselves as held, then call the locking function.
fn lock(&self, f: impl FnOnce()) {
self.mark_held();
f();
}
/// First call the unlocking function, then mark ourselves as realeased.
unsafe fn unlock(&self, f: impl FnOnce()) {
f();
self.mark_released();
}
/// Conditionally lock the mutex.
///
/// First acquires the lock, then runs the provided function. If that function returns true,
/// then the lock is kept, otherwise the mutex is immediately marked as relased.
///
/// # Returns
///
/// The value returned from the callback.
fn conditionally_lock(&self, f: impl FnOnce() -> bool) -> bool {
// Mark as locked while we try to do the thing
self.mark_held();
if f() {
true
} else {
// Safety: we just locked it above.
unsafe { self.mark_released() }
false
}
}
}
unsafe impl<T> RawMutex for TracingWrapper<T>
where
T: RawMutex,
{
const INIT: Self = Self {
inner: T::INIT,
id: LazyMutexId::new(),
};
/// Always equal to [`GuardNoSend`], as an implementation detail in the tracking system requires
/// this behaviour. May change in the future to reflect the actual guard type from the wrapped
/// primitive.
type GuardMarker = GuardNoSend;
fn lock(&self) {
self.lock(|| self.inner.lock());
}
fn try_lock(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock())
}
unsafe fn unlock(&self) {
self.unlock(|| self.inner.unlock());
}
fn is_locked(&self) -> bool {
// Can't use the default implementation as the inner type might've overwritten it.
self.inner.is_locked()
}
}
unsafe impl<T> RawMutexFair for TracingWrapper<T>
where
T: RawMutexFair,
{
unsafe fn unlock_fair(&self) {
self.unlock(|| self.inner.unlock_fair())
}
unsafe fn bump(&self) {
// Bumping effectively doesn't change which locks are held, so we don't need to manage the
// lock state.
self.inner.bump();
}
}
unsafe impl<T> RawMutexTimed for TracingWrapper<T>
where
T: RawMutexTimed,
{
type Duration = T::Duration;
type Instant = T::Instant;
fn try_lock_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_for(timeout))
}
fn try_lock_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_until(timeout))
}
}
unsafe impl<T> RawRwLock for TracingWrapper<T>
where
T: RawRwLock,
{
const INIT: Self = Self {
inner: T::INIT,
id: LazyMutexId::new(),
};
/// Always equal to [`GuardNoSend`], as an implementation detail in the tracking system requires
/// this behaviour. May change in the future to reflect the actual guard type from the wrapped
/// primitive.
type GuardMarker = GuardNoSend;
fn lock_shared(&self) {
self.lock(|| self.inner.lock_shared());
}
fn try_lock_shared(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared())
}
unsafe fn unlock_shared(&self) {
self.unlock(|| self.inner.unlock_shared());
}
fn lock_exclusive(&self) {
self.lock(|| self.inner.lock_exclusive());
}
fn try_lock_exclusive(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock_exclusive())
}
unsafe fn unlock_exclusive(&self) {
self.unlock(|| self.inner.unlock_exclusive());
}
fn is_locked(&self) -> bool {
self.inner.is_locked()
}
}
unsafe impl<T> RawRwLockDowngrade for TracingWrapper<T>
where
T: RawRwLockDowngrade,
{
unsafe fn downgrade(&self) {
// Downgrading does not require tracking
self.inner.downgrade()
}
}
unsafe impl<T> RawRwLockUpgrade for TracingWrapper<T>
where
T: RawRwLockUpgrade,
{
fn lock_upgradable(&self) {
self.lock(|| self.inner.lock_upgradable());
}
fn try_lock_upgradable(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock_upgradable())
}
unsafe fn unlock_upgradable(&self) {
self.unlock(|| self.inner.unlock_upgradable());
}
unsafe fn upgrade(&self) {
self.inner.upgrade();
}
unsafe fn try_upgrade(&self) -> bool {
self.inner.try_upgrade()
}
}
unsafe impl<T> RawRwLockFair for TracingWrapper<T>
where
T: RawRwLockFair,
{
unsafe fn unlock_shared_fair(&self) {
self.unlock(|| self.inner.unlock_shared_fair());
}
unsafe fn unlock_exclusive_fair(&self) {
self.unlock(|| self.inner.unlock_exclusive_fair());
}
unsafe fn bump_shared(&self) {
self.inner.bump_shared();
}
unsafe fn bump_exclusive(&self) {
self.inner.bump_exclusive();
}
}
unsafe impl<T> RawRwLockRecursive for TracingWrapper<T>
where
T: RawRwLockRecursive,
{
fn lock_shared_recursive(&self) {
self.lock(|| self.inner.lock_shared_recursive());
}
fn try_lock_shared_recursive(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_recursive())
}
}
unsafe impl<T> RawRwLockRecursiveTimed for TracingWrapper<T>
where
T: RawRwLockRecursiveTimed,
{
fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_recursive_for(timeout))
}
fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_recursive_until(timeout))
}
}
unsafe impl<T> RawRwLockTimed for TracingWrapper<T>
where
T: RawRwLockTimed,
{
type Duration = T::Duration;
type Instant = T::Instant;
fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_for(timeout))
}
fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_until(timeout))
}
fn try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_exclusive_for(timeout))
}
fn try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_exclusive_until(timeout))
}
}
unsafe impl<T> RawRwLockUpgradeDowngrade for TracingWrapper<T>
where
T: RawRwLockUpgradeDowngrade,
{
unsafe fn downgrade_upgradable(&self) {
self.inner.downgrade_upgradable()
}
unsafe fn downgrade_to_upgradable(&self) {
self.inner.downgrade_to_upgradable()
}
}
unsafe impl<T> RawRwLockUpgradeFair for TracingWrapper<T>
where
T: RawRwLockUpgradeFair,
{
unsafe fn unlock_upgradable_fair(&self) {
self.unlock(|| self.inner.unlock_upgradable_fair())
}
unsafe fn bump_upgradable(&self) {
self.inner.bump_upgradable()
}
}
unsafe impl<T> RawRwLockUpgradeTimed for TracingWrapper<T>
where
T: RawRwLockUpgradeTimed,
{
fn try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_upgradable_for(timeout))
}
fn try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_upgradable_until(timeout))
}
unsafe fn try_upgrade_for(&self, timeout: Self::Duration) -> bool {
self.inner.try_upgrade_for(timeout)
}
unsafe fn try_upgrade_until(&self, timeout: Self::Instant) -> bool {
self.inner.try_upgrade_until(timeout)
}
}

296
src/parkinglot.rs Normal file
View File

@@ -0,0 +1,296 @@
//! Wrapper types and type aliases for tracing [`parking_lot`] mutexes.
//!
//! This module provides type aliases that use the [`lockapi`][crate::lockapi] module to provide
//! tracing variants of the `parking_lot` primitives. Each of the `TracingX` type aliases wraps an
//! `X` in the `parkint_lot` api with dependency tracking, and a `DebugX` will refer to a `TracingX`
//! when `debug_assertions` are enabled and to `X` when they're not. This can be used to aid
//! debugging in development while enjoying maximum performance in production.
//!
//! # Usage
//!
//! ```
//! # use std::sync::Arc;
//! # use std::thread;
//! # use lock_api::Mutex;
//! # use tracing_mutex::parkinglot::TracingMutex;
//! let mutex = Arc::new(TracingMutex::new(0));
//!
//! let handles: Vec<_> = (0..10).map(|_| {
//! let mutex = Arc::clone(&mutex);
//! thread::spawn(move || *mutex.lock() += 1)
//! }).collect();
//!
//! handles.into_iter().for_each(|handle| handle.join().unwrap());
//!
//! // All threads completed so the value should be 10.
//! assert_eq!(10, *mutex.lock());
//! ```
//!
//! # Limitations
//!
//! The main lock for the global state is still provided by `std::sync` and the tracing primitives
//! are larger than the `parking_lot` primitives they wrap, so there can be a performance
//! degradation between using this and using `parking_lot` directly. If this is of concern to you,
//! try using the `DebugX`-structs, which provide cycle detection only when `debug_assertions` are
//! enabled and have no overhead when they're not.
//!
//! In addition, the mutex guards returned by the tracing wrappers are `!Send`, regardless of
//! whether `parking_lot` is configured to have `Send` mutex guards. This is a limitation of the
//! current bookkeeping system.
use parking_lot::Once;
use parking_lot::OnceState;
use crate::lockapi::TracingWrapper;
use crate::LazyMutexId;
macro_rules! debug_variant {
($debug_name:ident, $tracing_name:ident, $normal_name:ty) => {
type $tracing_name = TracingWrapper<$normal_name>;
#[cfg(debug_assertions)]
type $debug_name = TracingWrapper<$normal_name>;
#[cfg(not(debug_assertions))]
type $debug_name = $normal_name;
};
}
debug_variant!(
DebugRawFairMutex,
TracingRawFairMutex,
parking_lot::RawFairMutex
);
debug_variant!(DebugRawMutex, TracingRawMutex, parking_lot::RawMutex);
debug_variant!(DebugRawRwLock, TracingRawRwLock, parking_lot::RawRwLock);
/// Dependency tracking fair mutex. See: [`parking_lot::FairMutex`].
pub type TracingFairMutex<T> = lock_api::Mutex<TracingRawFairMutex, T>;
/// Mutex guard for [`TracingFairMutex`].
pub type TracingFairMutexGuard<'a, T> = lock_api::MutexGuard<'a, TracingRawFairMutex, T>;
/// RAII guard for `TracingFairMutexGuard::map`.
pub type TracingMappedFairMutexGuard<'a, T> =
lock_api::MappedMutexGuard<'a, TracingRawFairMutex, T>;
/// Debug-only dependency tracking fair mutex.
///
/// If debug assertions are enabled this resolves to [`TracingFairMutex`] and to
/// [`parking_lot::FairMutex`] otherwise.
pub type DebugFairMutex<T> = lock_api::Mutex<DebugRawFairMutex, T>;
/// Mutex guard for [`DebugFairMutex`].
pub type DebugFairMutexGuard<'a, T> = lock_api::MutexGuard<'a, DebugRawFairMutex, T>;
/// RAII guard for `DebugFairMutexGuard::map`.
pub type DebugMappedFairMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, DebugRawFairMutex, T>;
/// Dependency tracking mutex. See: [`parking_lot::Mutex`].
pub type TracingMutex<T> = lock_api::Mutex<TracingRawMutex, T>;
/// Mutex guard for [`TracingMutex`].
pub type TracingMutexGuard<'a, T> = lock_api::MutexGuard<'a, TracingRawMutex, T>;
/// RAII guard for `TracingMutexGuard::map`.
pub type TracingMappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, TracingRawMutex, T>;
/// Debug-only dependency tracking mutex.
///
/// If debug assertions are enabled this resolves to [`TracingMutex`] and to [`parking_lot::Mutex`]
/// otherwise.
pub type DebugMutex<T> = lock_api::Mutex<DebugRawMutex, T>;
/// Mutex guard for [`DebugMutex`].
pub type DebugMutexGuard<'a, T> = lock_api::MutexGuard<'a, DebugRawMutex, T>;
/// RAII guard for `TracingMutexGuard::map`.
pub type DebugMappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, DebugRawMutex, T>;
/// Dependency tracking reentrant mutex. See: [`parking_lot::ReentrantMutex`].
///
/// **Note:** due to the way dependencies are tracked, this mutex can only be acquired directly
/// after itself. Acquiring any other mutex in between introduces a dependency cycle, and will
/// therefore be rejected.
pub type TracingReentrantMutex<T> =
lock_api::ReentrantMutex<TracingWrapper<parking_lot::RawMutex>, parking_lot::RawThreadId, T>;
/// Mutex guard for [`TracingReentrantMutex`].
pub type TracingReentrantMutexGuard<'a, T> = lock_api::ReentrantMutexGuard<
'a,
TracingWrapper<parking_lot::RawMutex>,
parking_lot::RawThreadId,
T,
>;
/// RAII guard for `TracingReentrantMutexGuard::map`.
pub type TracingMappedReentrantMutexGuard<'a, T> =
lock_api::MappedReentrantMutexGuard<'a, TracingRawMutex, parking_lot::RawThreadId, T>;
/// Debug-only dependency tracking reentrant mutex.
///
/// If debug assertions are enabled this resolves to [`TracingReentrantMutex`] and to
/// [`parking_lot::ReentrantMutex`] otherwise.
pub type DebugReentrantMutex<T> =
lock_api::ReentrantMutex<DebugRawMutex, parking_lot::RawThreadId, T>;
/// Mutex guard for [`DebugReentrantMutex`].
pub type DebugReentrantMutexGuard<'a, T> =
lock_api::ReentrantMutexGuard<'a, DebugRawMutex, parking_lot::RawThreadId, T>;
/// RAII guard for `DebugReentrantMutexGuard::map`.
pub type DebugMappedReentrantMutexGuard<'a, T> =
lock_api::MappedReentrantMutexGuard<'a, DebugRawMutex, parking_lot::RawThreadId, T>;
/// Dependency tracking RwLock. See: [`parking_lot::RwLock`].
pub type TracingRwLock<T> = lock_api::RwLock<TracingRawRwLock, T>;
/// Read guard for [`TracingRwLock`].
pub type TracingRwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, TracingRawRwLock, T>;
/// Upgradable Read guard for [`TracingRwLock`].
pub type TracingRwLockUpgradableReadGuard<'a, T> =
lock_api::RwLockUpgradableReadGuard<'a, TracingRawRwLock, T>;
/// Write guard for [`TracingRwLock`].
pub type TracingRwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, TracingRawRwLock, T>;
/// RAII guard for `TracingRwLockReadGuard::map`.
pub type TracingMappedRwLockReadGuard<'a, T> =
lock_api::MappedRwLockReadGuard<'a, TracingRawRwLock, T>;
/// RAII guard for `TracingRwLockWriteGuard::map`.
pub type TracingMappedRwLockWriteGuard<'a, T> =
lock_api::MappedRwLockWriteGuard<'a, TracingRawRwLock, T>;
/// Debug-only dependency tracking RwLock.
///
/// If debug assertions are enabled this resolved to [`TracingRwLock`] and to
/// [`parking_lot::RwLock`] otherwise.
pub type DebugRwLock<T> = lock_api::RwLock<DebugRawRwLock, T>;
/// Read guard for [`TracingRwLock`].
pub type DebugRwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, DebugRawRwLock, T>;
/// Upgradable Read guard for [`TracingRwLock`].
pub type DebugRwLockUpgradableReadGuard<'a, T> =
lock_api::RwLockUpgradableReadGuard<'a, DebugRawRwLock, T>;
/// Write guard for [`TracingRwLock`].
pub type DebugRwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, DebugRawRwLock, T>;
/// RAII guard for `DebugRwLockReadGuard::map`.
pub type DebugMappedRwLockReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, DebugRawRwLock, T>;
/// RAII guard for `DebugRwLockWriteGuard::map`.
pub type DebugMappedRwLockWriteGuard<'a, T> =
lock_api::MappedRwLockWriteGuard<'a, DebugRawRwLock, T>;
/// A dependency-tracking wrapper for [`parking_lot::Once`].
#[derive(Debug, Default)]
pub struct TracingOnce {
inner: Once,
id: LazyMutexId,
}
impl TracingOnce {
/// Create a new `TracingOnce` value.
pub const fn new() -> Self {
Self {
inner: Once::new(),
id: LazyMutexId::new(),
}
}
/// Returns the current state of this `Once`.
pub fn state(&self) -> OnceState {
self.inner.state()
}
///
/// This call is considered as "locking this `TracingOnce`" and it participates in dependency
/// tracking as such.
///
/// # Panics
///
/// This method will panic if `f` panics, poisoning this `Once`. In addition, this function
/// panics when the lock acquisition order is determined to be inconsistent.
pub fn call_once(&self, f: impl FnOnce()) {
let _borrow = self.id.get_borrowed();
self.inner.call_once(f);
}
/// Performs the given initialization routine once and only once.
///
/// This method is identical to [`TracingOnce::call_once`] except it ignores poisoning.
pub fn call_once_force(&self, f: impl FnOnce(OnceState)) {
let _borrow = self.id.get_borrowed();
self.inner.call_once_force(f);
}
}
/// Debug-only `Once`.
///
/// If debug assertions are enabled this resolves to [`TracingOnce`] and to [`parking_lot::Once`]
/// otherwise.
#[cfg(debug_assertions)]
pub type DebugOnce = TracingOnce;
#[cfg(not(debug_assertions))]
pub type DebugOnce = Once;
#[cfg(test)]
mod tests {
use std::sync::Arc;
use std::thread;
use super::*;
#[test]
fn test_mutex_usage() {
let mutex = Arc::new(TracingMutex::new(()));
let local_lock = mutex.lock();
drop(local_lock);
thread::spawn(move || {
let _remote_lock = mutex.lock();
})
.join()
.unwrap();
}
#[test]
#[should_panic]
fn test_mutex_conflict() {
let mutexes = [
TracingMutex::new(()),
TracingMutex::new(()),
TracingMutex::new(()),
];
for i in 0..3 {
let _first_lock = mutexes[i].lock();
let _second_lock = mutexes[(i + 1) % 3].lock();
}
}
#[test]
fn test_rwlock_usage() {
let lock = Arc::new(TracingRwLock::new(()));
let lock2 = Arc::clone(&lock);
let _read_lock = lock.read();
// Should be able to acquire lock in the background
thread::spawn(move || {
let _read_lock = lock2.read();
})
.join()
.unwrap();
}
#[test]
fn test_rwlock_upgradable_read_usage() {
let lock = TracingRwLock::new(());
// Should be able to acquire an upgradable read lock.
let upgradable_guard: TracingRwLockUpgradableReadGuard<'_, _> = lock.upgradable_read();
// Should be able to upgrade the guard.
let _write_guard: TracingRwLockWriteGuard<'_, _> =
TracingRwLockUpgradableReadGuard::upgrade(upgradable_guard);
}
#[test]
fn test_once_usage() {
let once = Arc::new(TracingOnce::new());
let once_clone = once.clone();
assert!(!once_clone.state().done());
let handle = thread::spawn(move || {
assert!(!once_clone.state().done());
once_clone.call_once(|| {});
assert!(once_clone.state().done());
});
handle.join().unwrap();
assert!(once.state().done());
}
}

View File

@@ -16,6 +16,7 @@
use std::fmt; use std::fmt;
use std::ops::Deref; use std::ops::Deref;
use std::ops::DerefMut; use std::ops::DerefMut;
use std::sync::Condvar;
use std::sync::LockResult; use std::sync::LockResult;
use std::sync::Mutex; use std::sync::Mutex;
use std::sync::MutexGuard; use std::sync::MutexGuard;
@@ -27,6 +28,8 @@ use std::sync::RwLockReadGuard;
use std::sync::RwLockWriteGuard; use std::sync::RwLockWriteGuard;
use std::sync::TryLockError; use std::sync::TryLockError;
use std::sync::TryLockResult; use std::sync::TryLockResult;
use std::sync::WaitTimeoutResult;
use std::time::Duration;
use crate::BorrowedMutex; use crate::BorrowedMutex;
use crate::LazyMutexId; use crate::LazyMutexId;
@@ -48,6 +51,14 @@ pub type DebugMutexGuard<'a, T> = TracingMutexGuard<'a, T>;
#[cfg(not(debug_assertions))] #[cfg(not(debug_assertions))]
pub type DebugMutexGuard<'a, T> = MutexGuard<'a, T>; pub type DebugMutexGuard<'a, T> = MutexGuard<'a, T>;
/// Debug-only `Condvar`
///
/// Type alias that accepts the mutex guard emitted from [`DebugMutex`].
#[cfg(debug_assertions)]
pub type DebugCondvar = TracingCondvar;
#[cfg(not(debug_assertions))]
pub type DebugCondvar = Condvar;
/// Debug-only tracing `RwLock`. /// Debug-only tracing `RwLock`.
/// ///
/// Type alias that resolves to [`TracingRwLock`] when debug assertions are enabled and to /// Type alias that resolves to [`TracingRwLock`] when debug assertions are enabled and to
@@ -97,7 +108,7 @@ pub struct TracingMutex<T> {
#[derive(Debug)] #[derive(Debug)]
pub struct TracingMutexGuard<'a, T> { pub struct TracingMutexGuard<'a, T> {
inner: MutexGuard<'a, T>, inner: MutexGuard<'a, T>,
mutex: BorrowedMutex<'a>, _mutex: BorrowedMutex<'a>,
} }
fn map_lockresult<T, I, F>(result: LockResult<I>, mapper: F) -> LockResult<T> fn map_lockresult<T, I, F>(result: LockResult<I>, mapper: F) -> LockResult<T>
@@ -144,7 +155,7 @@ impl<T> TracingMutex<T> {
let result = self.inner.lock(); let result = self.inner.lock();
let mapper = |guard| TracingMutexGuard { let mapper = |guard| TracingMutexGuard {
mutex, _mutex: mutex,
inner: guard, inner: guard,
}; };
@@ -163,7 +174,7 @@ impl<T> TracingMutex<T> {
let result = self.inner.try_lock(); let result = self.inner.try_lock();
let mapper = |guard| TracingMutexGuard { let mapper = |guard| TracingMutexGuard {
mutex, _mutex: mutex,
inner: guard, inner: guard,
}; };
@@ -195,7 +206,7 @@ impl<T> From<T> for TracingMutex<T> {
} }
impl<'a, T> Deref for TracingMutexGuard<'a, T> { impl<'a, T> Deref for TracingMutexGuard<'a, T> {
type Target = MutexGuard<'a, T>; type Target = T;
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
&self.inner &self.inner
@@ -214,6 +225,123 @@ impl<'a, T: fmt::Display> fmt::Display for TracingMutexGuard<'a, T> {
} }
} }
/// Wrapper around [`std::sync::Condvar`].
///
/// Allows `TracingMutexGuard` to be used with a `Condvar`. Unlike other structs in this module,
/// this wrapper does not add any additional dependency tracking or other overhead on top of the
/// primitive it wraps. All dependency tracking happens through the mutexes itself.
///
/// # Panics
///
/// This struct does not add any panics over the base implementation of `Condvar`, but panics due to
/// dependency tracking may poison associated mutexes.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use std::thread;
///
/// use tracing_mutex::stdsync::{TracingCondvar, TracingMutex};
///
/// let pair = Arc::new((TracingMutex::new(false), TracingCondvar::new()));
/// let pair2 = Arc::clone(&pair);
///
/// // Spawn a thread that will unlock the condvar
/// thread::spawn(move || {
/// let (lock, condvar) = &*pair2;
/// *lock.lock().unwrap() = true;
/// condvar.notify_one();
/// });
///
/// // Wait until the thread unlocks the condvar
/// let (lock, condvar) = &*pair;
/// let guard = lock.lock().unwrap();
/// let guard = condvar.wait_while(guard, |started| !*started).unwrap();
///
/// // Guard should read true now
/// assert!(*guard);
/// ```
#[derive(Debug, Default)]
pub struct TracingCondvar(Condvar);
impl TracingCondvar {
/// Creates a new condition variable which is ready to be waited on and notified.
pub fn new() -> Self {
Default::default()
}
/// Wrapper for [`std::sync::Condvar::wait`].
pub fn wait<'a, T>(
&self,
guard: TracingMutexGuard<'a, T>,
) -> LockResult<TracingMutexGuard<'a, T>> {
let TracingMutexGuard { _mutex, inner } = guard;
map_lockresult(self.0.wait(inner), |inner| TracingMutexGuard {
_mutex,
inner,
})
}
/// Wrapper for [`std::sync::Condvar::wait_while`].
pub fn wait_while<'a, T, F>(
&self,
guard: TracingMutexGuard<'a, T>,
condition: F,
) -> LockResult<TracingMutexGuard<'a, T>>
where
F: FnMut(&mut T) -> bool,
{
let TracingMutexGuard { _mutex, inner } = guard;
map_lockresult(self.0.wait_while(inner, condition), |inner| {
TracingMutexGuard { _mutex, inner }
})
}
/// Wrapper for [`std::sync::Condvar::wait_timeout`].
pub fn wait_timeout<'a, T>(
&self,
guard: TracingMutexGuard<'a, T>,
dur: Duration,
) -> LockResult<(TracingMutexGuard<'a, T>, WaitTimeoutResult)> {
let TracingMutexGuard { _mutex, inner } = guard;
map_lockresult(self.0.wait_timeout(inner, dur), |(inner, result)| {
(TracingMutexGuard { _mutex, inner }, result)
})
}
/// Wrapper for [`std::sync::Condvar::wait_timeout_while`].
pub fn wait_timeout_while<'a, T, F>(
&self,
guard: TracingMutexGuard<'a, T>,
dur: Duration,
condition: F,
) -> LockResult<(TracingMutexGuard<'a, T>, WaitTimeoutResult)>
where
F: FnMut(&mut T) -> bool,
{
let TracingMutexGuard { _mutex, inner } = guard;
map_lockresult(
self.0.wait_timeout_while(inner, dur, condition),
|(inner, result)| (TracingMutexGuard { _mutex, inner }, result),
)
}
/// Wrapper for [`std::sync::Condvar::notify_one`].
pub fn notify_one(&self) {
self.0.notify_one();
}
/// Wrapper for [`std::sync::Condvar::notify_all`].
pub fn notify_all(&self) {
self.0.notify_all();
}
}
/// Wrapper for [`std::sync::RwLock`]. /// Wrapper for [`std::sync::RwLock`].
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub struct TracingRwLock<T> { pub struct TracingRwLock<T> {
@@ -227,7 +355,7 @@ pub struct TracingRwLock<T> {
#[derive(Debug)] #[derive(Debug)]
pub struct TracingRwLockGuard<'a, L> { pub struct TracingRwLockGuard<'a, L> {
inner: L, inner: L,
mutex: BorrowedMutex<'a>, _mutex: BorrowedMutex<'a>,
} }
/// Wrapper around [`std::sync::RwLockReadGuard`]. /// Wrapper around [`std::sync::RwLockReadGuard`].
@@ -254,7 +382,10 @@ impl<T> TracingRwLock<T> {
let mutex = self.id.get_borrowed(); let mutex = self.id.get_borrowed();
let result = self.inner.read(); let result = self.inner.read();
map_lockresult(result, |inner| TracingRwLockGuard { inner, mutex }) map_lockresult(result, |inner| TracingRwLockGuard {
inner,
_mutex: mutex,
})
} }
/// Wrapper for [`std::sync::RwLock::write`]. /// Wrapper for [`std::sync::RwLock::write`].
@@ -268,7 +399,10 @@ impl<T> TracingRwLock<T> {
let mutex = self.id.get_borrowed(); let mutex = self.id.get_borrowed();
let result = self.inner.write(); let result = self.inner.write();
map_lockresult(result, |inner| TracingRwLockGuard { inner, mutex }) map_lockresult(result, |inner| TracingRwLockGuard {
inner,
_mutex: mutex,
})
} }
/// Wrapper for [`std::sync::RwLock::try_read`]. /// Wrapper for [`std::sync::RwLock::try_read`].
@@ -282,7 +416,10 @@ impl<T> TracingRwLock<T> {
let mutex = self.id.get_borrowed(); let mutex = self.id.get_borrowed();
let result = self.inner.try_read(); let result = self.inner.try_read();
map_trylockresult(result, |inner| TracingRwLockGuard { inner, mutex }) map_trylockresult(result, |inner| TracingRwLockGuard {
inner,
_mutex: mutex,
})
} }
/// Wrapper for [`std::sync::RwLock::try_write`]. /// Wrapper for [`std::sync::RwLock::try_write`].
@@ -296,7 +433,10 @@ impl<T> TracingRwLock<T> {
let mutex = self.id.get_borrowed(); let mutex = self.id.get_borrowed();
let result = self.inner.try_write(); let result = self.inner.try_write();
map_trylockresult(result, |inner| TracingRwLockGuard { inner, mutex }) map_trylockresult(result, |inner| TracingRwLockGuard {
inner,
_mutex: mutex,
})
} }
/// Return a mutable reference to the underlying data. /// Return a mutable reference to the underlying data.
@@ -401,7 +541,12 @@ mod tests {
#[test] #[test]
fn test_mutex_usage() { fn test_mutex_usage() {
let mutex = Arc::new(TracingMutex::new(())); let mutex = Arc::new(TracingMutex::new(0));
assert_eq!(*mutex.lock().unwrap(), 0);
*mutex.lock().unwrap() = 1;
assert_eq!(*mutex.lock().unwrap(), 1);
let mutex_clone = mutex.clone(); let mutex_clone = mutex.clone();
let _guard = mutex.lock().unwrap(); let _guard = mutex.lock().unwrap();
@@ -418,7 +563,14 @@ mod tests {
#[test] #[test]
fn test_rwlock_usage() { fn test_rwlock_usage() {
let rwlock = Arc::new(TracingRwLock::new(())); let rwlock = Arc::new(TracingRwLock::new(0));
assert_eq!(*rwlock.read().unwrap(), 0);
assert_eq!(*rwlock.write().unwrap(), 0);
*rwlock.write().unwrap() = 1;
assert_eq!(*rwlock.read().unwrap(), 1);
assert_eq!(*rwlock.write().unwrap(), 1);
let rwlock_clone = rwlock.clone(); let rwlock_clone = rwlock.clone();
let _read_lock = rwlock.read().unwrap(); let _read_lock = rwlock.read().unwrap();