58 Commits

Author SHA1 Message Date
bors[bot]
61d19f866c Merge #25
25: Restructure modules r=bertptrs a=bertptrs

The `TracingFoo`, `DebugFoo` versions of every `Foo` resulted in quite verbose types everywhere. This PR restructures them to separate modules. The new modules map onto the old types as follows:

- `tracing_mutex::foo::TracingFoo` -> `tracing_mutex::foo::tracing::Foo`
- `tracing_mutex::foo::DebugFoo` -> `tracing_mutex::foo::Foo`

Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-08-27 08:18:29 +00:00
f78969ebf7 Update documentation 2022-08-27 10:08:51 +02:00
56b0604448 Restructure parking_lot wrappers 2022-08-27 10:06:31 +02:00
6e5516eaa7 Restructure std::sync wrappers 2022-08-27 10:01:51 +02:00
764d3df454 Add parking_lot to changelog 2022-08-24 10:28:51 +02:00
bors[bot]
e543860d8b Merge #24
24: Update parking_lot dependency to 0.12 r=bertptrs a=djkoloski

The changelog for parking_lot 0.12 can be found [here](https://github.com/Amanieu/parking_lot/blob/master/CHANGELOG.md#parking_lot-0120-parking_lot_core-090-lock_api-046-2022-01-28):
```
- The MSRV is bumped to 1.49.0.
- Disabled eventual fairness on wasm32-unknown-unknown. (#302)
- Added a rwlock method to report if lock is held exclusively. (#303)
- Use new asm! macro. (#304)
- Use windows-rs instead of winapi for faster builds. (#311)
- Moved hardware lock elision support to a separate Cargo feature. (#313)
- Removed used of deprecated spin_loop_hint. (#314)
```

Co-authored-by: David Koloski <dkoloski@google.com>
2022-08-24 08:20:55 +00:00
David Koloski
ed04552af3 Update parking_lot dependency to 0.12 2022-08-23 11:34:31 -04:00
bors[bot]
c5a506436c Merge #23
23: Ensure `BorrowedMutex` is `!Send` r=bertptrs a=bertptrs

This should prevent the bugs found in #22.

Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-06-23 20:02:10 +00:00
33cb6014a3 Ensure BorrowedMutex is !Send 2022-06-23 21:54:25 +02:00
5232bac582 Bump version 2022-05-23 08:59:47 +02:00
bors[bot]
6472f4b807 Merge #21
21: Prepare for release v0.2.1 r=bertptrs a=bertptrs



Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-05-23 06:55:45 +00:00
6afe7b1c48 Update README and CHANGELOG 2022-05-23 08:53:56 +02:00
9238ef53ee Update copyright 2022-05-23 08:37:26 +02:00
bors[bot]
c08addff7d Merge #17
17: Fix typos r=bertptrs a=quisar



Co-authored-by: Benjamin Lerman <qsr@chromium.org>
2022-05-23 06:33:21 +00:00
bors[bot]
c1ce9df8ad Merge #19
19: Add a wrapper for `std::sync::Condvar` r=bertptrs a=bertptrs

This wrapper does not do any tracing itself but supports the use of a tracing mutex guard instead of an `std::sync` one.

Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-05-17 19:50:02 +00:00
312eaa8649 Add a wrapper for std::sync::Condvar
This wrapper does not do any tracing itself but supports the use of a
tracing mutex guard instead of an `std::sync` one.
2022-05-17 21:45:25 +02:00
bors[bot]
1f7e6921aa Merge #18
18: Enable bors for nicer merging r=bertptrs a=bertptrs



Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-05-15 21:40:40 +00:00
f7048f265f Enable CI builds on staging/trying 2022-05-15 23:35:00 +02:00
64e56fdb86 Add minimal bors config 2022-05-15 23:35:00 +02:00
Benjamin Lerman
8e3278fdd2 Fix typos 2022-05-10 10:30:20 +02:00
9ea993e737 Add missing date 2022-05-07 18:15:50 +02:00
062850fc3e Merge pull request #16 from bertptrs/docsrs_feature_docs
Fix documentation builds for features
2022-05-07 17:56:09 +02:00
0d2622d5c6 Build documentation on CI 2022-05-07 17:52:32 +02:00
d1417e0b0c Tag module docs with their required features 2022-05-07 17:52:32 +02:00
fcc64e2cef Automatically build documentation for all features 2022-05-07 17:03:45 +02:00
fd0d05307c Update README and copyright year 2022-05-07 16:54:37 +02:00
2f6e214784 Merge pull request #15 from bertptrs/pre-release-cleanup 2022-05-07 16:50:10 +02:00
3ec7e83e00 Update changelog and version 2022-05-07 16:43:31 +02:00
ea8e0208a0 Explicitly test for disallowed self-cycles 2022-05-07 16:43:31 +02:00
8926af4e13 Also deny clippy warnings on other targets 2022-05-07 16:43:31 +02:00
77676ea04d Fix formatting 2022-05-07 16:43:31 +02:00
46c92cfbbf Merge pull request #13 from quisar/add_upgradable 2022-05-06 10:17:54 +02:00
Benjamin Lerman
743cc83669 Add TracingRwLockUpgradableReadGuard wrapper for parking_lot 2022-05-03 10:16:27 +02:00
4faaae8d8f Merge pull request #10 from quisar/fix_locking_issues_in_lockapi 2022-05-02 08:32:13 +02:00
Benjamin Lerman
b78af9150d Fix a number of issues in the lockapi wrappers. 2022-05-02 08:20:23 +02:00
515930c0a2 Merge pull request #9 from quisar/prevent-reentrant-locking 2022-05-02 08:13:48 +02:00
Benjamin Lerman
b5a5ca16c3 Do not allow recursive locks. 2022-05-02 08:11:37 +02:00
aef99d4f65 Merge pull request #8 from quisar/fix_deref 2022-05-02 08:04:55 +02:00
Benjamin Lerman
6073c6c78d Fix Target for Deref of stdsync::TracingMutexGuard 2022-05-01 16:53:26 +02:00
cdd44f74fa Merge pull request #11 from bertptrs/fix-bitrot 2022-05-01 14:06:23 +02:00
38b3b226cc Move to edition 2021 altogether 2022-05-01 12:03:50 +02:00
3b9b908460 Correctly mark mutex reference as unused 2022-05-01 11:50:50 +02:00
ef421e20eb Deal with IntoIter deprecation 2022-05-01 11:50:37 +02:00
66576e5b0e Merge pull request #5 from bertptrs/benchmarking
Implement minimal benchmarking of dependency tracking
2021-07-10 22:17:44 +02:00
308af218e1 Implement minimal benchmarking of dependency tracking 2021-07-10 22:14:33 +02:00
79ed599a2f Merge pull request #3 from bertptrs/locking-api-support 2021-07-10 17:28:20 +02:00
680e335ccf Document new modules 2021-07-10 17:25:42 +02:00
17761af5a8 Add type aliases for mapped mutex guards 2021-07-10 13:05:41 +02:00
4c70d999d6 Create type aliases for parking_lot::RwLock 2021-07-10 12:17:35 +02:00
618a11f940 Implement a wrapper for parking_lot::Once 2021-05-27 22:19:57 +02:00
77cd603363 Implement minimal mutexes for parking_lot. 2021-05-27 22:00:37 +02:00
73b4c8b1af Minimal parking_lot support 2021-05-27 21:16:24 +02:00
b21a63e74b Implement RwLock-based traits for lockapi worker. 2021-05-27 21:16:24 +02:00
6a3cb83d01 Implement Mutex behaviour for lock_api 2021-05-27 21:16:24 +02:00
08cfb17234 Build all features on CI 2021-05-27 21:16:24 +02:00
536ee31138 Prepare for relesae 2021-05-27 21:13:24 +02:00
e2db0eaca8 Fix a graph invariant violation on cycle detection 2021-05-27 20:31:00 +02:00
158e5353bb Add missing guard type aliases 2021-05-24 20:28:49 +02:00
13 changed files with 1440 additions and 459 deletions

View File

@@ -2,12 +2,14 @@ on:
push:
branches:
- master
- staging
- trying
pull_request:
name: Continuous integration
jobs:
ci:
tests:
name: Rust project
runs-on: ubuntu-latest
strategy:
@@ -30,10 +32,13 @@ jobs:
- uses: actions-rs/cargo@v1
with:
command: build
# --all-targets ensures that we also build the benchmarks and tests already.
args: --all-features --all-targets
- uses: actions-rs/cargo@v1
with:
command: test
args: --all-features
- uses: actions-rs/cargo@v1
with:
@@ -43,4 +48,23 @@ jobs:
- uses: actions-rs/cargo@v1
with:
command: clippy
args: -- -D warnings
args: --all-features --all-targets -- -D warnings
docs:
name: Documentation build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly
override: true
- name: Build documentation
env:
# Build the docs like docs.rs builds it
RUSTDOCFLAGS: --cfg docsrs
run: cargo doc --all-features

View File

@@ -6,18 +6,84 @@ adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Breaking
- Update [`parking_lot`][parking_lot] dependency to `0.12`.
- Restructured the crate to reduce typename verbosity. For details, see: #25.
### Fixed
- Enforce that all internal mutex guards are `!Send`. They already should be according to other
reasons, but this adds extra security through the type system.
## [0.2.1] - 2022-05-23
### Added
- Build [docs.rs] documentation with all features enabled for completeness.
- Add support for `std::sync::Condvar`
### Fixed
- The `parkinglot` module is now correctly enabled by the `parkinglot` feature rather than the
`lockapi` feature.
## [0.2.0] - 2022-05-07
### Added
- Generic support for wrapping mutexes that implement the traits provided by the
[`lock_api`][lock_api] crate. This can be used for creating support for other mutex providers that
implement it.
- Support for [`parking_lot`][parking_lot] mutexes. Support includes type aliases for all
provided mutex types as well as a dedicated `Once` wrapper.
- Simple benchmark to track the rough performance penalty incurred by dependency tracking.
### Breaking
- The library now requires edition 2021.
- The `Mutex`- and `RwLockGuards` now dereference to `T` rather than the lock guard they wrap. This
is technically a bugfix but can theoretically break existing code.
- Self-cycles are no longer allowed for lock dependencies. They previously were because it usually
isn't a problem, but it can create RWR deadlocks with `RwLocks`.
### Changed
- The project now targets edition 2021
## [0.1.2] - 2021-05-27
### Added
- Added missing type aliases for the guards returned by `DebugMutex` and `DebugRwLock`. These new
type aliases function the same as the ones they belong to, resolving to either the tracing
versions when debug assertions are enabled or the standard one when they're not.
### Fixed
- Fixed a corruption error where deallocating a previously cyclic mutex could result in a panic.
## [0.1.1] - 2021-05-24
### Changed
- New data structure for interal dependency graph, resulting in quicker graph updates.
### Fixed
- Fixed an issue where internal graph ordering indices were exponential rather than sequential.
- Fixed an issue where internal graph ordering indices were exponential rather than sequential. This
caused the available IDs to run out way more quickly than intended.
## [0.1.0] - 2021-05-16 [YANKED]
Initial release.
[Unreleased]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.1...HEAD
[Unreleased]: https://github.com/bertptrs/tracing-mutex/compare/v0.2.1...HEAD
[0.2.1]: https://github.com/bertptrs/tracing-mutex/compare/v0.2.0...v0.2.1
[0.2.0]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.2...v0.2.0
[0.1.2]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.1...v0.1.2
[0.1.1]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.0...v0.1.1
[0.1.0]: https://github.com/bertptrs/tracing-mutex/releases/tag/v0.1.0
[docs.rs]: https://docs.rs/tracing-mutex/latest/tracing_mutex/
[lock_api]: https://docs.rs/lock_api/
[parking_lot]: https://docs.rs/parking_lot/

View File

@@ -1,8 +1,8 @@
[package]
name = "tracing-mutex"
version = "0.1.1"
version = "0.2.1"
authors = ["Bert Peters <bert@bertptrs.nl>"]
edition = "2018"
edition = "2021"
license = "MIT OR Apache-2.0"
documentation = "https://docs.rs/tracing-mutex"
categories = ["concurrency", "development-tools::debugging"]
@@ -11,8 +11,26 @@ description = "Ensure deadlock-free mutexes by allocating in order, or else."
readme = "README.md"
repository = "https://github.com/bertptrs/tracing-mutex"
[package.metadata.docs.rs]
# Build docs for all features so the documentation is more complete
all-features = true
# Set custom cfg so we can enable docs.rs magic
rustdoc-args = ["--cfg", "docsrs"]
[dependencies]
lazy_static = "1"
lock_api = { version = "0.4", optional = true }
parking_lot = { version = "0.12", optional = true }
[dev-dependencies]
criterion = "0.3"
rand = "0.8"
[[bench]]
name = "mutex"
harness = false
[features]
# Feature names do not match crate names pending namespaced features.
lockapi = ["lock_api"]
parkinglot = ["parking_lot", "lockapi"]

View File

@@ -186,7 +186,7 @@ APPENDIX: How to apply the Apache License to your work.
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Copyright 2022 Bert Peters
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
Copyright © 2021 Bert Peters
Copyright © 2022 Bert Peters
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the “Software”), to deal in the Software without restriction,

View File

@@ -34,7 +34,7 @@ Add this dependency to your `Cargo.lock` file like any other:
```toml
[dependencies]
tracing-mutex = "0.1"
tracing-mutex = "0.2"
```
Then use the locks provided by this library instead of the ones you would use otherwise.
@@ -42,9 +42,9 @@ Replacements for the synchronization primitives in `std::sync` can be found in t
Support for other synchronization primitives is planned.
```rust
use tracing_mutex::stdsync::TracingMutex;
use tracing_mutex::stdsync::Mutex;
let some_mutex = TracingMutex::new(42);
let some_mutex = Mutex::new(42);
*some_mutex.lock().unwrap() += 1;
println!("{:?}", some_mutex);
```
@@ -59,12 +59,23 @@ performance penalty in your production environment, this library also offers deb
when debug assertions are enabled, and to `Mutex` when they are not. Similar helper types are
available for other synchronization primitives.
### Features
- Dependency-tracking wrappers for all locking primitives
- Optional opt-out for release mode code
- Support for primitives from:
- `std::sync`
- `parking_lot`
- Any library that implements the `lock_api` traits
## Future improvements
- Improve performance in lock tracing
- Optional logging to make debugging easier
- Better and configurable error handling when detecting cyclic dependencies
- Support for other locking libraries, such as `parking_lot`
- Support for other locking libraries
- Support for async locking libraries
- Support for `Send` mutex guards
**Note:** `parking_lot` has already began work on its own deadlock detection mechanism, which works
in a different way. Both can be complimentary.

82
benches/mutex.rs Normal file
View File

@@ -0,0 +1,82 @@
use std::sync::Arc;
use std::sync::Mutex;
use criterion::criterion_group;
use criterion::criterion_main;
use criterion::BenchmarkId;
use criterion::Criterion;
use criterion::Throughput;
use rand::prelude::*;
use tracing_mutex::stdsync::tracing::Mutex as TracingMutex;
const SAMPLE_SIZES: [usize; 5] = [10, 30, 100, 300, 1000];
/// Reproducibly generate random combinations a, b where the index(a) < index(b)
///
/// All combinations are generated
fn generate_combinations<T>(options: &[Arc<T>]) -> Vec<(Arc<T>, Arc<T>)> {
let mut combinations = Vec::new();
for (i, first) in options.iter().enumerate() {
for second in options.iter().skip(i + 1) {
combinations.push((Arc::clone(first), Arc::clone(second)));
}
}
let mut rng = StdRng::seed_from_u64(42);
combinations.shuffle(&mut rng);
combinations
}
/// Take two arbitrary mutexes, lock the first, lock the second while holding the first.
fn benchmark_baseline(c: &mut Criterion) {
let mut group = c.benchmark_group("baseline");
for nodes in SAMPLE_SIZES {
group.throughput(Throughput::Elements((nodes * (nodes - 1) / 2) as u64));
group.bench_with_input(BenchmarkId::from_parameter(nodes), &nodes, |b, &s| {
b.iter_batched(
|| {
let mutexes: Vec<_> = (0..s).map(|_| Arc::new(Mutex::new(()))).collect();
generate_combinations(&mutexes)
},
|combinations| {
for (first, second) in combinations {
let _first = first.lock();
let _second = second.lock();
}
},
criterion::BatchSize::SmallInput,
)
});
}
}
/// Same as [`benchmark_baseline`] but now while tracking dependencies.
fn benchmark_tracing_mutex(c: &mut Criterion) {
let mut group = c.benchmark_group("tracing_mutex");
for nodes in SAMPLE_SIZES {
group.throughput(Throughput::Elements((nodes * (nodes - 1) / 2) as u64));
group.bench_with_input(BenchmarkId::from_parameter(nodes), &nodes, |b, &s| {
b.iter_batched(
|| {
let mutexes: Vec<_> = (0..s).map(|_| Arc::new(TracingMutex::new(()))).collect();
generate_combinations(&mutexes)
},
|combinations| {
for (first, second) in combinations {
let _first = first.lock();
let _second = second.lock();
}
},
criterion::BatchSize::SmallInput,
)
});
}
}
criterion_group!(benches, benchmark_baseline, benchmark_tracing_mutex);
criterion_main!(benches);

5
bors.toml Normal file
View File

@@ -0,0 +1,5 @@
status = [
'Rust project (stable)',
'Rust project (beta)',
'Documentation build',
]

View File

@@ -1,4 +1,3 @@
use std::array::IntoIter;
use std::cell::Cell;
use std::collections::HashMap;
use std::collections::HashSet;
@@ -99,8 +98,8 @@ where
/// would introduce a cycle, the edge is rejected and `false` is returned.
pub(crate) fn add_edge(&mut self, x: V, y: V) -> bool {
if x == y {
// self-edges are not considered cycles
return true;
// self-edges are always considered cycles
return false;
}
let (_, out_edges, ub) = self.add_node(x);
@@ -116,7 +115,7 @@ where
if lb < ub {
// This edge might introduce a cycle, need to recompute the topological sort
let mut visited = IntoIter::new([x, y]).collect();
let mut visited = [x, y].into_iter().collect();
let mut delta_f = Vec::new();
let mut delta_b = Vec::new();
@@ -127,7 +126,7 @@ where
// We use map instead of unwrap to avoid an `unwrap()` but we know that these
// entries are present as we just added them above.
self.nodes.get_mut(&y).map(|node| node.in_edges.remove(&x));
self.nodes.get_mut(&x).map(|node| node.out_edges.remove(&x));
self.nodes.get_mut(&x).map(|node| node.out_edges.remove(&y));
// No edge was added
return false;
@@ -227,6 +226,14 @@ mod tests {
use super::*;
#[test]
fn test_no_self_cycle() {
// Regression test for https://github.com/bertptrs/tracing-mutex/issues/7
let mut graph = DiGraph::default();
assert!(!graph.add_edge(1, 1));
}
#[test]
fn test_digraph() {
let mut graph = DiGraph::default();
@@ -259,9 +266,11 @@ mod tests {
for i in 0..NUM_NODES {
for j in i..NUM_NODES {
if i != j {
edges.push((i, j));
}
}
}
edges.shuffle(&mut thread_rng());

View File

@@ -41,10 +41,11 @@
//!
//! These operations have been reasonably optimized, but the performance penalty may yet be too much
//! for production use. In those cases, it may be beneficial to instead use debug-only versions
//! (such as [`stdsync::DebugMutex`]) which evaluate to a tracing mutex when debug assertions are
//! (such as [`stdsync::Mutex`]) which evaluate to a tracing mutex when debug assertions are
//! enabled, and to the underlying mutex when they're not.
//!
//! [paper]: https://whileydave.com/publications/pk07_jea/
#![cfg_attr(docsrs, feature(doc_cfg))]
use std::cell::RefCell;
use std::cell::UnsafeCell;
use std::fmt;
@@ -56,14 +57,27 @@ use std::ptr;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::sync::Mutex;
use std::sync::MutexGuard;
use std::sync::Once;
use std::sync::PoisonError;
use lazy_static::lazy_static;
#[cfg(feature = "lockapi")]
#[cfg_attr(docsrs, doc(cfg(feature = "lockapi")))]
pub use lock_api;
#[cfg(feature = "parkinglot")]
#[cfg_attr(docsrs, doc(cfg(feature = "parkinglot")))]
pub use parking_lot;
use crate::graph::DiGraph;
mod graph;
#[cfg(feature = "lockapi")]
#[cfg_attr(docsrs, doc(cfg(feature = "lockapi")))]
pub mod lockapi;
#[cfg(feature = "parkinglot")]
#[cfg_attr(docsrs, doc(cfg(feature = "parkinglot")))]
pub mod parkinglot;
pub mod stdsync;
/// Counter for Mutex IDs. Atomic avoids the need for locking.
@@ -120,6 +134,19 @@ impl MutexId {
///
/// This method panics if the new dependency would introduce a cycle.
pub fn get_borrowed(&self) -> BorrowedMutex {
self.mark_held();
BorrowedMutex {
id: self,
_not_send: PhantomData,
}
}
/// Mark this lock as held for the purposes of dependency tracking.
///
/// # Panics
///
/// This method panics if the new dependency would introduce a cycle.
pub fn mark_held(&self) {
let creates_cycle = HELD_LOCKS.with(|locks| {
if let Some(&previous) = locks.borrow().last() {
let mut graph = get_dependency_graph();
@@ -136,7 +163,22 @@ impl MutexId {
}
HELD_LOCKS.with(|locks| locks.borrow_mut().push(self.value()));
BorrowedMutex(self)
}
pub unsafe fn mark_released(&self) {
HELD_LOCKS.with(|locks| {
let mut locks = locks.borrow_mut();
for (i, &lock) in locks.iter().enumerate().rev() {
if lock == self.value() {
locks.remove(i);
return;
}
}
// Drop impls shouldn't panic but if this happens something is seriously broken.
unreachable!("Tried to drop lock for mutex {:?} but it wasn't held", self)
});
}
}
@@ -187,6 +229,12 @@ impl fmt::Debug for LazyMutexId {
}
}
impl Default for LazyMutexId {
fn default() -> Self {
Self::new()
}
}
/// Safety: the UnsafeCell is guaranteed to only be accessed mutably from a `Once`.
unsafe impl Sync for LazyMutexId {}
@@ -230,8 +278,22 @@ impl Drop for LazyMutexId {
}
}
/// Borrowed mutex ID
///
/// This type should be used as part of a mutex guard wrapper. It can be acquired through
/// [`MutexId::get_borrowed`] and will automatically mark the mutex as not borrowed when it is
/// dropped.
///
/// This type intentionally is [`!Send`](std::marker::Send) because the ownership tracking is based
/// on a thread-local stack which doesn't work if a guard gets released in a different thread from
/// where they're acquired.
#[derive(Debug)]
struct BorrowedMutex<'a>(&'a MutexId);
struct BorrowedMutex<'a> {
/// Reference to the mutex we're borrowing from
id: &'a MutexId,
/// This value serves no purpose but to make the type [`!Send`](std::marker::Send)
_not_send: PhantomData<MutexGuard<'static, ()>>,
}
/// Drop a lock held by the current thread.
///
@@ -241,21 +303,8 @@ struct BorrowedMutex<'a>(&'a MutexId);
/// that is an indication of a serious design flaw in this library.
impl<'a> Drop for BorrowedMutex<'a> {
fn drop(&mut self) {
let id = self.0;
HELD_LOCKS.with(|locks| {
let mut locks = locks.borrow_mut();
for (i, &lock) in locks.iter().enumerate().rev() {
if lock == id.value() {
locks.remove(i);
return;
}
}
// Drop impls shouldn't panic but if this happens something is seriously broken.
unreachable!("Tried to drop lock for mutex {:?} but it wasn't held", id)
});
// Safety: the only way to get a BorrowedMutex is by locking the mutex.
unsafe { self.id.mark_released() };
}
}
@@ -304,6 +353,18 @@ mod tests {
assert!(get_dependency_graph().add_edge(c.value(), a.value()));
}
/// Test creating a cycle, then panicking.
#[test]
#[should_panic]
fn test_mutex_id_conflict() {
let ids = [MutexId::new(), MutexId::new(), MutexId::new()];
for i in 0..3 {
let _first_lock = ids[i].get_borrowed();
let _second_lock = ids[(i + 1) % 3].get_borrowed();
}
}
/// Fuzz the global dependency graph by fake-acquiring lots of mutexes in a valid order.
///
/// This test generates all possible forward edges in a 100-node graph consisting of natural
@@ -319,9 +380,11 @@ mod tests {
let mut edges = Vec::with_capacity(NUM_NODES * NUM_NODES);
for i in 0..NUM_NODES {
for j in i..NUM_NODES {
if i != j {
edges.push((i, j));
}
}
}
edges.shuffle(&mut thread_rng());

348
src/lockapi.rs Normal file
View File

@@ -0,0 +1,348 @@
//! Wrapper implementations for [`lock_api`].
//!
//! This module does not provide any particular mutex implementation by itself, but rather can be
//! used to add dependency tracking to mutexes that already exist. It implements all of the traits
//! in `lock_api` based on the one it wraps. Crates such as `spin` and `parking_lot` provide base
//! primitives that can be wrapped.
//!
//! Wrapped mutexes are at least one `usize` larger than the types they wrapped, and must be aligned
//! to `usize` boundaries. As such, libraries with many mutexes may want to consider the additional
//! required memory.
use lock_api::GuardNoSend;
use lock_api::RawMutex;
use lock_api::RawMutexFair;
use lock_api::RawMutexTimed;
use lock_api::RawRwLock;
use lock_api::RawRwLockDowngrade;
use lock_api::RawRwLockFair;
use lock_api::RawRwLockRecursive;
use lock_api::RawRwLockRecursiveTimed;
use lock_api::RawRwLockTimed;
use lock_api::RawRwLockUpgrade;
use lock_api::RawRwLockUpgradeDowngrade;
use lock_api::RawRwLockUpgradeFair;
use lock_api::RawRwLockUpgradeTimed;
use crate::LazyMutexId;
/// Tracing wrapper for all [`lock_api`] traits.
///
/// This wrapper implements any of the locking traits available, given that the wrapped type
/// implements them. As such, this wrapper can be used both for normal mutexes and rwlocks.
#[derive(Debug, Default)]
pub struct TracingWrapper<T> {
inner: T,
// Need to use a lazy mutex ID to intialize statically.
id: LazyMutexId,
}
impl<T> TracingWrapper<T> {
/// Mark this lock as held in the dependency graph.
fn mark_held(&self) {
self.id.mark_held();
}
/// Mark this lock as released in the dependency graph.
///
/// # Safety
///
/// This function should only be called when the lock has been previously acquired by this
/// thread.
unsafe fn mark_released(&self) {
self.id.mark_released();
}
/// First mark ourselves as held, then call the locking function.
fn lock(&self, f: impl FnOnce()) {
self.mark_held();
f();
}
/// First call the unlocking function, then mark ourselves as realeased.
unsafe fn unlock(&self, f: impl FnOnce()) {
f();
self.mark_released();
}
/// Conditionally lock the mutex.
///
/// First acquires the lock, then runs the provided function. If that function returns true,
/// then the lock is kept, otherwise the mutex is immediately marked as relased.
///
/// # Returns
///
/// The value returned from the callback.
fn conditionally_lock(&self, f: impl FnOnce() -> bool) -> bool {
// Mark as locked while we try to do the thing
self.mark_held();
if f() {
true
} else {
// Safety: we just locked it above.
unsafe { self.mark_released() }
false
}
}
}
unsafe impl<T> RawMutex for TracingWrapper<T>
where
T: RawMutex,
{
const INIT: Self = Self {
inner: T::INIT,
id: LazyMutexId::new(),
};
/// Always equal to [`GuardNoSend`], as an implementation detail in the tracking system requires
/// this behaviour. May change in the future to reflect the actual guard type from the wrapped
/// primitive.
type GuardMarker = GuardNoSend;
fn lock(&self) {
self.lock(|| self.inner.lock());
}
fn try_lock(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock())
}
unsafe fn unlock(&self) {
self.unlock(|| self.inner.unlock());
}
fn is_locked(&self) -> bool {
// Can't use the default implementation as the inner type might've overwritten it.
self.inner.is_locked()
}
}
unsafe impl<T> RawMutexFair for TracingWrapper<T>
where
T: RawMutexFair,
{
unsafe fn unlock_fair(&self) {
self.unlock(|| self.inner.unlock_fair())
}
unsafe fn bump(&self) {
// Bumping effectively doesn't change which locks are held, so we don't need to manage the
// lock state.
self.inner.bump();
}
}
unsafe impl<T> RawMutexTimed for TracingWrapper<T>
where
T: RawMutexTimed,
{
type Duration = T::Duration;
type Instant = T::Instant;
fn try_lock_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_for(timeout))
}
fn try_lock_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_until(timeout))
}
}
unsafe impl<T> RawRwLock for TracingWrapper<T>
where
T: RawRwLock,
{
const INIT: Self = Self {
inner: T::INIT,
id: LazyMutexId::new(),
};
/// Always equal to [`GuardNoSend`], as an implementation detail in the tracking system requires
/// this behaviour. May change in the future to reflect the actual guard type from the wrapped
/// primitive.
type GuardMarker = GuardNoSend;
fn lock_shared(&self) {
self.lock(|| self.inner.lock_shared());
}
fn try_lock_shared(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared())
}
unsafe fn unlock_shared(&self) {
self.unlock(|| self.inner.unlock_shared());
}
fn lock_exclusive(&self) {
self.lock(|| self.inner.lock_exclusive());
}
fn try_lock_exclusive(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock_exclusive())
}
unsafe fn unlock_exclusive(&self) {
self.unlock(|| self.inner.unlock_exclusive());
}
fn is_locked(&self) -> bool {
self.inner.is_locked()
}
}
unsafe impl<T> RawRwLockDowngrade for TracingWrapper<T>
where
T: RawRwLockDowngrade,
{
unsafe fn downgrade(&self) {
// Downgrading does not require tracking
self.inner.downgrade()
}
}
unsafe impl<T> RawRwLockUpgrade for TracingWrapper<T>
where
T: RawRwLockUpgrade,
{
fn lock_upgradable(&self) {
self.lock(|| self.inner.lock_upgradable());
}
fn try_lock_upgradable(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock_upgradable())
}
unsafe fn unlock_upgradable(&self) {
self.unlock(|| self.inner.unlock_upgradable());
}
unsafe fn upgrade(&self) {
self.inner.upgrade();
}
unsafe fn try_upgrade(&self) -> bool {
self.inner.try_upgrade()
}
}
unsafe impl<T> RawRwLockFair for TracingWrapper<T>
where
T: RawRwLockFair,
{
unsafe fn unlock_shared_fair(&self) {
self.unlock(|| self.inner.unlock_shared_fair());
}
unsafe fn unlock_exclusive_fair(&self) {
self.unlock(|| self.inner.unlock_exclusive_fair());
}
unsafe fn bump_shared(&self) {
self.inner.bump_shared();
}
unsafe fn bump_exclusive(&self) {
self.inner.bump_exclusive();
}
}
unsafe impl<T> RawRwLockRecursive for TracingWrapper<T>
where
T: RawRwLockRecursive,
{
fn lock_shared_recursive(&self) {
self.lock(|| self.inner.lock_shared_recursive());
}
fn try_lock_shared_recursive(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_recursive())
}
}
unsafe impl<T> RawRwLockRecursiveTimed for TracingWrapper<T>
where
T: RawRwLockRecursiveTimed,
{
fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_recursive_for(timeout))
}
fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_recursive_until(timeout))
}
}
unsafe impl<T> RawRwLockTimed for TracingWrapper<T>
where
T: RawRwLockTimed,
{
type Duration = T::Duration;
type Instant = T::Instant;
fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_for(timeout))
}
fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_until(timeout))
}
fn try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_exclusive_for(timeout))
}
fn try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_exclusive_until(timeout))
}
}
unsafe impl<T> RawRwLockUpgradeDowngrade for TracingWrapper<T>
where
T: RawRwLockUpgradeDowngrade,
{
unsafe fn downgrade_upgradable(&self) {
self.inner.downgrade_upgradable()
}
unsafe fn downgrade_to_upgradable(&self) {
self.inner.downgrade_to_upgradable()
}
}
unsafe impl<T> RawRwLockUpgradeFair for TracingWrapper<T>
where
T: RawRwLockUpgradeFair,
{
unsafe fn unlock_upgradable_fair(&self) {
self.unlock(|| self.inner.unlock_upgradable_fair())
}
unsafe fn bump_upgradable(&self) {
self.inner.bump_upgradable()
}
}
unsafe impl<T> RawRwLockUpgradeTimed for TracingWrapper<T>
where
T: RawRwLockUpgradeTimed,
{
fn try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_upgradable_for(timeout))
}
fn try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_upgradable_until(timeout))
}
unsafe fn try_upgrade_for(&self, timeout: Self::Duration) -> bool {
self.inner.try_upgrade_for(timeout)
}
unsafe fn try_upgrade_until(&self, timeout: Self::Instant) -> bool {
self.inner.try_upgrade_until(timeout)
}
}

236
src/parkinglot.rs Normal file
View File

@@ -0,0 +1,236 @@
//! Wrapper types and type aliases for tracing [`parking_lot`] mutexes.
//!
//! This module provides type aliases that use the [`lockapi`][crate::lockapi] module to provide
//! tracing variants of the `parking_lot` primitives. The [`tracing`] module contains type aliases
//! that use dependency tracking, while the main `parking_lot` primitives are reexported as [`raw`].
//!
//! This main module imports from [`tracing`] when `debug_assertions` are enabled, and from [`raw`]
//! when they're not. Note that primitives for which no tracing wrapper exists are not imported into
//! the main module.
//!
//! # Usage
//!
//! ```
//! # use std::sync::Arc;
//! # use std::thread;
//! use tracing_mutex::parkinglot::Mutex;
//! let mutex = Arc::new(Mutex::new(0));
//!
//! let handles: Vec<_> = (0..10).map(|_| {
//! let mutex = Arc::clone(&mutex);
//! thread::spawn(move || *mutex.lock() += 1)
//! }).collect();
//!
//! handles.into_iter().for_each(|handle| handle.join().unwrap());
//!
//! // All threads completed so the value should be 10.
//! assert_eq!(10, *mutex.lock());
//! ```
//!
//! # Limitations
//!
//! The main lock for the global state is still provided by `std::sync` and the tracing primitives
//! are larger than the `parking_lot` primitives they wrap, so there can be a performance
//! degradation between using this and using `parking_lot` directly. If this is of concern to you,
//! try using the `DebugX`-structs, which provide cycle detection only when `debug_assertions` are
//! enabled and have no overhead when they're not.
//!
//! In addition, the mutex guards returned by the tracing wrappers are `!Send`, regardless of
//! whether `parking_lot` is configured to have `Send` mutex guards. This is a limitation of the
//! current bookkeeping system.
pub use parking_lot as raw;
#[cfg(debug_assertions)]
pub use tracing::{
FairMutex, FairMutexGuard, MappedFairMutexGuard, MappedMutexGuard, MappedReentrantMutexGuard,
MappedRwLockReadGuard, MappedRwLockWriteGuard, Mutex, MutexGuard, Once, OnceState,
ReentrantMutex, ReentrantMutexGuard, RwLock, RwLockReadGuard, RwLockUpgradableReadGuard,
RwLockWriteGuard,
};
#[cfg(not(debug_assertions))]
pub use parking_lot::{
FairMutex, FairMutexGuard, MappedFairMutexGuard, MappedMutexGuard, MappedReentrantMutexGuard,
MappedRwLockReadGuard, MappedRwLockWriteGuard, Mutex, MutexGuard, Once, OnceState,
ReentrantMutex, ReentrantMutexGuard, RwLock, RwLockReadGuard, RwLockUpgradableReadGuard,
RwLockWriteGuard,
};
/// Dependency tracing wrappers for [`parking_lot`].
pub mod tracing {
pub use parking_lot::OnceState;
use crate::lockapi::TracingWrapper;
use crate::LazyMutexId;
type RawFairMutex = TracingWrapper<parking_lot::RawFairMutex>;
type RawMutex = TracingWrapper<parking_lot::RawMutex>;
type RawRwLock = TracingWrapper<parking_lot::RawRwLock>;
/// Dependency tracking fair mutex. See: [`parking_lot::FairMutex`].
pub type FairMutex<T> = lock_api::Mutex<RawFairMutex, T>;
/// Mutex guard for [`FairMutex`].
pub type FairMutexGuard<'a, T> = lock_api::MutexGuard<'a, RawFairMutex, T>;
/// RAII guard for [`FairMutexGuard::map`].
pub type MappedFairMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawFairMutex, T>;
/// Dependency tracking mutex. See: [`parking_lot::Mutex`].
pub type Mutex<T> = lock_api::Mutex<RawMutex, T>;
/// Mutex guard for [`Mutex`].
pub type MutexGuard<'a, T> = lock_api::MutexGuard<'a, RawMutex, T>;
/// RAII guard for [`MutexGuard::map`].
pub type MappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawMutex, T>;
/// Dependency tracking reentrant mutex. See: [`parking_lot::ReentrantMutex`].
///
/// **Note:** due to the way dependencies are tracked, this mutex can only be acquired directly
/// after itself. Acquiring any other mutex in between introduces a dependency cycle, and will
/// therefore be rejected.
pub type ReentrantMutex<T> = lock_api::ReentrantMutex<RawMutex, parking_lot::RawThreadId, T>;
/// Mutex guard for [`ReentrantMutex`].
pub type ReentrantMutexGuard<'a, T> =
lock_api::ReentrantMutexGuard<'a, RawMutex, parking_lot::RawThreadId, T>;
/// RAII guard for `ReentrantMutexGuard::map`.
pub type MappedReentrantMutexGuard<'a, T> =
lock_api::MappedReentrantMutexGuard<'a, RawMutex, parking_lot::RawThreadId, T>;
/// Dependency tracking RwLock. See: [`parking_lot::RwLock`].
pub type RwLock<T> = lock_api::RwLock<RawRwLock, T>;
/// Read guard for [`RwLock`].
pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, RawRwLock, T>;
/// Upgradable Read guard for [`RwLock`].
pub type RwLockUpgradableReadGuard<'a, T> =
lock_api::RwLockUpgradableReadGuard<'a, RawRwLock, T>;
/// Write guard for [`RwLock`].
pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, RawRwLock, T>;
/// RAII guard for `RwLockReadGuard::map`.
pub type MappedRwLockReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, RawRwLock, T>;
/// RAII guard for `RwLockWriteGuard::map`.
pub type MappedRwLockWriteGuard<'a, T> = lock_api::MappedRwLockWriteGuard<'a, RawRwLock, T>;
/// A dependency-tracking wrapper for [`parking_lot::Once`].
#[derive(Debug, Default)]
pub struct Once {
inner: parking_lot::Once,
id: LazyMutexId,
}
impl Once {
/// Create a new `Once` value.
pub const fn new() -> Self {
Self {
inner: parking_lot::Once::new(),
id: LazyMutexId::new(),
}
}
/// Returns the current state of this `Once`.
pub fn state(&self) -> OnceState {
self.inner.state()
}
/// This call is considered as "locking this `Once`" and it participates in dependency
/// tracking as such.
///
/// # Panics
///
/// This method will panic if `f` panics, poisoning this `Once`. In addition, this function
/// panics when the lock acquisition order is determined to be inconsistent.
pub fn call_once(&self, f: impl FnOnce()) {
let _borrow = self.id.get_borrowed();
self.inner.call_once(f);
}
/// Performs the given initialization routine once and only once.
///
/// This method is identical to [`Once::call_once`] except it ignores poisoning.
pub fn call_once_force(&self, f: impl FnOnce(OnceState)) {
let _borrow = self.id.get_borrowed();
self.inner.call_once_force(f);
}
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use std::thread;
use super::tracing;
#[test]
fn test_mutex_usage() {
let mutex = Arc::new(tracing::Mutex::new(()));
let local_lock = mutex.lock();
drop(local_lock);
thread::spawn(move || {
let _remote_lock = mutex.lock();
})
.join()
.unwrap();
}
#[test]
#[should_panic]
fn test_mutex_conflict() {
let mutexes = [
tracing::Mutex::new(()),
tracing::Mutex::new(()),
tracing::Mutex::new(()),
];
for i in 0..3 {
let _first_lock = mutexes[i].lock();
let _second_lock = mutexes[(i + 1) % 3].lock();
}
}
#[test]
fn test_rwlock_usage() {
let lock = Arc::new(tracing::RwLock::new(()));
let lock2 = Arc::clone(&lock);
let _read_lock = lock.read();
// Should be able to acquire lock in the background
thread::spawn(move || {
let _read_lock = lock2.read();
})
.join()
.unwrap();
}
#[test]
fn test_rwlock_upgradable_read_usage() {
let lock = tracing::RwLock::new(());
// Should be able to acquire an upgradable read lock.
let upgradable_guard: tracing::RwLockUpgradableReadGuard<'_, _> = lock.upgradable_read();
// Should be able to upgrade the guard.
let _write_guard: tracing::RwLockWriteGuard<'_, _> =
tracing::RwLockUpgradableReadGuard::upgrade(upgradable_guard);
}
#[test]
fn test_once_usage() {
let once = Arc::new(tracing::Once::new());
let once_clone = once.clone();
assert!(!once_clone.state().done());
let handle = thread::spawn(move || {
assert!(!once_clone.state().done());
once_clone.call_once(|| {});
assert!(once_clone.state().done());
});
handle.join().unwrap();
assert!(once.state().done());
}
}

View File

@@ -1,101 +1,83 @@
//! Tracing mutex wrappers for locks found in `std::sync`.
//!
//! This module provides wrappers for `std::sync` primitives with exactly the same API and
//! functionality as their counterparts, with the exception that their acquisition order is
//! tracked.
//! functionality as their counterparts, with the exception that their acquisition order is tracked.
//!
//! Dedicated wrappers that provide the dependency tracing can be found in the [`tracing`] module.
//! The original primitives are available from [`std::sync`], imported as [`raw`] for convenience.
//!
//! If debug assertions are enabled, this module imports the primitives from [`tracing`], otherwise
//! it will import from [`raw`].
//!
//! ```rust
//! # use tracing_mutex::stdsync::TracingMutex;
//! # use tracing_mutex::stdsync::TracingRwLock;
//! let mutex = TracingMutex::new(());
//! # use tracing_mutex::stdsync::tracing::Mutex;
//! # use tracing_mutex::stdsync::tracing::RwLock;
//! let mutex = Mutex::new(());
//! mutex.lock().unwrap();
//!
//! let rwlock = TracingRwLock::new(());
//! let rwlock = RwLock::new(());
//! rwlock.read().unwrap();
//! ```
use std::fmt;
use std::ops::Deref;
use std::ops::DerefMut;
use std::sync::LockResult;
use std::sync::Mutex;
use std::sync::MutexGuard;
use std::sync::Once;
use std::sync::OnceState;
use std::sync::PoisonError;
use std::sync::RwLock;
use std::sync::RwLockReadGuard;
use std::sync::RwLockWriteGuard;
use std::sync::TryLockError;
use std::sync::TryLockResult;
pub use std::sync as raw;
use crate::BorrowedMutex;
use crate::LazyMutexId;
use crate::MutexId;
/// Debug-only tracing `Mutex`.
///
/// Type alias that resolves to [`TracingMutex`] when debug assertions are enabled and to
/// [`std::sync::Mutex`] when they're not. Use this if you want to have the benefits of cycle
/// detection in development but do not want to pay the performance penalty in release.
#[cfg(debug_assertions)]
pub type DebugMutex<T> = TracingMutex<T>;
#[cfg(not(debug_assertions))]
pub type DebugMutex<T> = Mutex<T>;
pub use std::sync::{Condvar, Mutex, MutexGuard, Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
/// Debug-only tracing `RwLock`.
///
/// Type alias that resolves to [`TracingRwLock`] when debug assertions are enabled and to
/// [`std::sync::RwLock`] when they're not. Use this if you want to have the benefits of cycle
/// detection in development but do not want to pay the performance penalty in release.
#[cfg(debug_assertions)]
pub type DebugRwLock<T> = TracingRwLock<T>;
#[cfg(not(debug_assertions))]
pub type DebugRwLock<T> = RwLock<T>;
pub use tracing::{Condvar, Mutex, MutexGuard, Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
/// Debug-only tracing `Once`.
///
/// Type alias that resolves to [`TracingOnce`] when debug assertions are enabled and to
/// [`std::sync::Once`] when they're not. Use this if you want to have the benefits of cycle
/// detection in development but do not want to pay the performance penalty in release.
#[cfg(debug_assertions)]
pub type DebugOnce = TracingOnce;
#[cfg(not(debug_assertions))]
pub type DebugOnce = Once;
/// Dependency tracing versions of [`std::sync`].
pub mod tracing {
use std::fmt;
use std::ops::Deref;
use std::ops::DerefMut;
use std::sync;
use std::sync::LockResult;
use std::sync::OnceState;
use std::sync::PoisonError;
use std::sync::TryLockError;
use std::sync::TryLockResult;
use std::sync::WaitTimeoutResult;
use std::time::Duration;
/// Wrapper for [`std::sync::Mutex`].
///
/// Refer to the [crate-level][`crate`] documentaiton for the differences between this struct and
/// the one it wraps.
#[derive(Debug, Default)]
pub struct TracingMutex<T> {
inner: Mutex<T>,
use crate::BorrowedMutex;
use crate::LazyMutexId;
use crate::MutexId;
/// Wrapper for [`std::sync::Mutex`].
///
/// Refer to the [crate-level][`crate`] documentation for the differences between this struct and
/// the one it wraps.
#[derive(Debug, Default)]
pub struct Mutex<T> {
inner: sync::Mutex<T>,
id: MutexId,
}
}
/// Wrapper for [`std::sync::MutexGuard`].
///
/// Refer to the [crate-level][`crate`] documentaiton for the differences between this struct and
/// the one it wraps.
#[derive(Debug)]
pub struct TracingMutexGuard<'a, T> {
inner: MutexGuard<'a, T>,
mutex: BorrowedMutex<'a>,
}
/// Wrapper for [`std::sync::MutexGuard`].
///
/// Refer to the [crate-level][`crate`] documentation for the differences between this struct and
/// the one it wraps.
#[derive(Debug)]
pub struct MutexGuard<'a, T> {
inner: sync::MutexGuard<'a, T>,
_mutex: BorrowedMutex<'a>,
}
fn map_lockresult<T, I, F>(result: LockResult<I>, mapper: F) -> LockResult<T>
where
fn map_lockresult<T, I, F>(result: LockResult<I>, mapper: F) -> LockResult<T>
where
F: FnOnce(I) -> T,
{
{
match result {
Ok(inner) => Ok(mapper(inner)),
Err(poisoned) => Err(PoisonError::new(mapper(poisoned.into_inner()))),
}
}
}
fn map_trylockresult<T, I, F>(result: TryLockResult<I>, mapper: F) -> TryLockResult<T>
where
fn map_trylockresult<T, I, F>(result: TryLockResult<I>, mapper: F) -> TryLockResult<T>
where
F: FnOnce(I) -> T,
{
{
match result {
Ok(inner) => Ok(mapper(inner)),
Err(TryLockError::WouldBlock) => Err(TryLockError::WouldBlock),
@@ -103,13 +85,13 @@ where
Err(PoisonError::new(mapper(poisoned.into_inner())).into())
}
}
}
}
impl<T> TracingMutex<T> {
impl<T> Mutex<T> {
/// Create a new tracing mutex with the provided value.
pub fn new(t: T) -> Self {
Self {
inner: Mutex::new(t),
inner: sync::Mutex::new(t),
id: MutexId::new(),
}
}
@@ -121,12 +103,12 @@ impl<T> TracingMutex<T> {
/// This method participates in lock dependency tracking. If acquiring this lock introduces a
/// dependency cycle, this method will panic.
#[track_caller]
pub fn lock(&self) -> LockResult<TracingMutexGuard<T>> {
pub fn lock(&self) -> LockResult<MutexGuard<T>> {
let mutex = self.id.get_borrowed();
let result = self.inner.lock();
let mapper = |guard| TracingMutexGuard {
mutex,
let mapper = |guard| MutexGuard {
_mutex: mutex,
inner: guard,
};
@@ -140,12 +122,12 @@ impl<T> TracingMutex<T> {
/// This method participates in lock dependency tracking. If acquiring this lock introduces a
/// dependency cycle, this method will panic.
#[track_caller]
pub fn try_lock(&self) -> TryLockResult<TracingMutexGuard<T>> {
pub fn try_lock(&self) -> TryLockResult<MutexGuard<T>> {
let mutex = self.id.get_borrowed();
let result = self.inner.try_lock();
let mapper = |guard| TracingMutexGuard {
mutex,
let mapper = |guard| MutexGuard {
_mutex: mutex,
inner: guard,
};
@@ -168,59 +150,171 @@ impl<T> TracingMutex<T> {
pub fn into_inner(self) -> LockResult<T> {
self.inner.into_inner()
}
}
}
impl<T> From<T> for TracingMutex<T> {
impl<T> From<T> for Mutex<T> {
fn from(t: T) -> Self {
Self::new(t)
}
}
}
impl<'a, T> Deref for TracingMutexGuard<'a, T> {
type Target = MutexGuard<'a, T>;
impl<'a, T> Deref for MutexGuard<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
}
impl<'a, T> DerefMut for TracingMutexGuard<'a, T> {
impl<'a, T> DerefMut for MutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
}
impl<'a, T: fmt::Display> fmt::Display for TracingMutexGuard<'a, T> {
impl<'a, T: fmt::Display> fmt::Display for MutexGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.inner.fmt(f)
}
}
}
/// Wrapper for [`std::sync::RwLock`].
#[derive(Debug, Default)]
pub struct TracingRwLock<T> {
inner: RwLock<T>,
/// Wrapper around [`std::sync::Condvar`].
///
/// Allows `TracingMutexGuard` to be used with a `Condvar`. Unlike other structs in this module,
/// this wrapper does not add any additional dependency tracking or other overhead on top of the
/// primitive it wraps. All dependency tracking happens through the mutexes itself.
///
/// # Panics
///
/// This struct does not add any panics over the base implementation of `Condvar`, but panics due to
/// dependency tracking may poison associated mutexes.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use std::thread;
///
/// use tracing_mutex::stdsync::tracing::{Condvar, Mutex};
///
/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
/// let pair2 = Arc::clone(&pair);
///
/// // Spawn a thread that will unlock the condvar
/// thread::spawn(move || {
/// let (lock, condvar) = &*pair2;
/// *lock.lock().unwrap() = true;
/// condvar.notify_one();
/// });
///
/// // Wait until the thread unlocks the condvar
/// let (lock, condvar) = &*pair;
/// let guard = lock.lock().unwrap();
/// let guard = condvar.wait_while(guard, |started| !*started).unwrap();
///
/// // Guard should read true now
/// assert!(*guard);
/// ```
#[derive(Debug, Default)]
pub struct Condvar(sync::Condvar);
impl Condvar {
/// Creates a new condition variable which is ready to be waited on and notified.
pub fn new() -> Self {
Default::default()
}
/// Wrapper for [`std::sync::Condvar::wait`].
pub fn wait<'a, T>(&self, guard: MutexGuard<'a, T>) -> LockResult<MutexGuard<'a, T>> {
let MutexGuard { _mutex, inner } = guard;
map_lockresult(self.0.wait(inner), |inner| MutexGuard { _mutex, inner })
}
/// Wrapper for [`std::sync::Condvar::wait_while`].
pub fn wait_while<'a, T, F>(
&self,
guard: MutexGuard<'a, T>,
condition: F,
) -> LockResult<MutexGuard<'a, T>>
where
F: FnMut(&mut T) -> bool,
{
let MutexGuard { _mutex, inner } = guard;
map_lockresult(self.0.wait_while(inner, condition), |inner| MutexGuard {
_mutex,
inner,
})
}
/// Wrapper for [`std::sync::Condvar::wait_timeout`].
pub fn wait_timeout<'a, T>(
&self,
guard: MutexGuard<'a, T>,
dur: Duration,
) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> {
let MutexGuard { _mutex, inner } = guard;
map_lockresult(self.0.wait_timeout(inner, dur), |(inner, result)| {
(MutexGuard { _mutex, inner }, result)
})
}
/// Wrapper for [`std::sync::Condvar::wait_timeout_while`].
pub fn wait_timeout_while<'a, T, F>(
&self,
guard: MutexGuard<'a, T>,
dur: Duration,
condition: F,
) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)>
where
F: FnMut(&mut T) -> bool,
{
let MutexGuard { _mutex, inner } = guard;
map_lockresult(
self.0.wait_timeout_while(inner, dur, condition),
|(inner, result)| (MutexGuard { _mutex, inner }, result),
)
}
/// Wrapper for [`std::sync::Condvar::notify_one`].
pub fn notify_one(&self) {
self.0.notify_one();
}
/// Wrapper for [`std::sync::Condvar::notify_all`].
pub fn notify_all(&self) {
self.0.notify_all();
}
}
/// Wrapper for [`std::sync::RwLock`].
#[derive(Debug, Default)]
pub struct RwLock<T> {
inner: sync::RwLock<T>,
id: MutexId,
}
}
/// Hybrid wrapper for both [`std::sync::RwLockReadGuard`] and [`std::sync::RwLockWriteGuard`].
///
/// Please refer to [`TracingReadGuard`] and [`TracingWriteGuard`] for usable types.
#[derive(Debug)]
pub struct TracingRwLockGuard<'a, L> {
/// Hybrid wrapper for both [`std::sync::RwLockReadGuard`] and [`std::sync::RwLockWriteGuard`].
///
/// Please refer to [`RwLockReadGuard`] and [`RwLockWriteGuard`] for usable types.
#[derive(Debug)]
pub struct TracingRwLockGuard<'a, L> {
inner: L,
mutex: BorrowedMutex<'a>,
}
_mutex: BorrowedMutex<'a>,
}
/// Wrapper around [`std::sync::RwLockReadGuard`].
pub type TracingReadGuard<'a, T> = TracingRwLockGuard<'a, RwLockReadGuard<'a, T>>;
/// Wrapper around [`std::sync::RwLockWriteGuard`].
pub type TracingWriteGuard<'a, T> = TracingRwLockGuard<'a, RwLockWriteGuard<'a, T>>;
/// Wrapper around [`std::sync::RwLockReadGuard`].
pub type RwLockReadGuard<'a, T> = TracingRwLockGuard<'a, sync::RwLockReadGuard<'a, T>>;
/// Wrapper around [`std::sync::RwLockWriteGuard`].
pub type RwLockWriteGuard<'a, T> = TracingRwLockGuard<'a, sync::RwLockWriteGuard<'a, T>>;
impl<T> TracingRwLock<T> {
impl<T> RwLock<T> {
pub fn new(t: T) -> Self {
Self {
inner: RwLock::new(t),
inner: sync::RwLock::new(t),
id: MutexId::new(),
}
}
@@ -232,11 +326,14 @@ impl<T> TracingRwLock<T> {
/// This method participates in lock dependency tracking. If acquiring this lock introduces a
/// dependency cycle, this method will panic.
#[track_caller]
pub fn read(&self) -> LockResult<TracingReadGuard<T>> {
pub fn read(&self) -> LockResult<RwLockReadGuard<T>> {
let mutex = self.id.get_borrowed();
let result = self.inner.read();
map_lockresult(result, |inner| TracingRwLockGuard { inner, mutex })
map_lockresult(result, |inner| TracingRwLockGuard {
inner,
_mutex: mutex,
})
}
/// Wrapper for [`std::sync::RwLock::write`].
@@ -246,11 +343,14 @@ impl<T> TracingRwLock<T> {
/// This method participates in lock dependency tracking. If acquiring this lock introduces a
/// dependency cycle, this method will panic.
#[track_caller]
pub fn write(&self) -> LockResult<TracingWriteGuard<T>> {
pub fn write(&self) -> LockResult<RwLockWriteGuard<T>> {
let mutex = self.id.get_borrowed();
let result = self.inner.write();
map_lockresult(result, |inner| TracingRwLockGuard { inner, mutex })
map_lockresult(result, |inner| TracingRwLockGuard {
inner,
_mutex: mutex,
})
}
/// Wrapper for [`std::sync::RwLock::try_read`].
@@ -260,11 +360,14 @@ impl<T> TracingRwLock<T> {
/// This method participates in lock dependency tracking. If acquiring this lock introduces a
/// dependency cycle, this method will panic.
#[track_caller]
pub fn try_read(&self) -> TryLockResult<TracingReadGuard<T>> {
pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<T>> {
let mutex = self.id.get_borrowed();
let result = self.inner.try_read();
map_trylockresult(result, |inner| TracingRwLockGuard { inner, mutex })
map_trylockresult(result, |inner| TracingRwLockGuard {
inner,
_mutex: mutex,
})
}
/// Wrapper for [`std::sync::RwLock::try_write`].
@@ -274,11 +377,14 @@ impl<T> TracingRwLock<T> {
/// This method participates in lock dependency tracking. If acquiring this lock introduces a
/// dependency cycle, this method will panic.
#[track_caller]
pub fn try_write(&self) -> TryLockResult<TracingWriteGuard<T>> {
pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<T>> {
let mutex = self.id.get_borrowed();
let result = self.inner.try_write();
map_trylockresult(result, |inner| TracingRwLockGuard { inner, mutex })
map_trylockresult(result, |inner| TracingRwLockGuard {
inner,
_mutex: mutex,
})
}
/// Return a mutable reference to the underlying data.
@@ -292,49 +398,49 @@ impl<T> TracingRwLock<T> {
pub fn into_inner(self) -> LockResult<T> {
self.inner.into_inner()
}
}
}
impl<T> From<T> for TracingRwLock<T> {
impl<T> From<T> for RwLock<T> {
fn from(t: T) -> Self {
Self::new(t)
}
}
}
impl<'a, L, T> Deref for TracingRwLockGuard<'a, L>
where
impl<'a, L, T> Deref for TracingRwLockGuard<'a, L>
where
L: Deref<Target = T>,
{
{
type Target = T;
fn deref(&self) -> &Self::Target {
self.inner.deref()
}
}
}
impl<'a, T, L> DerefMut for TracingRwLockGuard<'a, L>
where
impl<'a, T, L> DerefMut for TracingRwLockGuard<'a, L>
where
L: Deref<Target = T> + DerefMut,
{
{
fn deref_mut(&mut self) -> &mut Self::Target {
self.inner.deref_mut()
}
}
}
/// Wrapper around [`std::sync::Once`].
///
/// Refer to the [crate-level][`crate`] documentaiton for the differences between this struct and
/// the one it wraps.
#[derive(Debug)]
pub struct TracingOnce {
inner: Once,
/// Wrapper around [`std::sync::Once`].
///
/// Refer to the [crate-level][`crate`] documentaiton for the differences between this struct and
/// the one it wraps.
#[derive(Debug)]
pub struct Once {
inner: sync::Once,
mutex_id: LazyMutexId,
}
}
impl TracingOnce {
impl Once {
/// Create a new `Once` value.
pub const fn new() -> Self {
Self {
inner: Once::new(),
inner: sync::Once::new(),
mutex_id: LazyMutexId::new(),
}
}
@@ -353,7 +459,7 @@ impl TracingOnce {
self.inner.call_once(f);
}
/// Performs the same operation as [`call_once`][TracingOnce::call_once] except it ignores
/// Performs the same operation as [`call_once`][Once::call_once] except it ignores
/// poisoning.
///
/// # Panics
@@ -372,10 +478,10 @@ impl TracingOnce {
pub fn is_completed(&self) -> bool {
self.inner.is_completed()
}
}
}
#[cfg(test)]
mod tests {
#[cfg(test)]
mod tests {
use std::sync::Arc;
use std::thread;
@@ -383,7 +489,12 @@ mod tests {
#[test]
fn test_mutex_usage() {
let mutex = Arc::new(TracingMutex::new(()));
let mutex = Arc::new(Mutex::new(0));
assert_eq!(*mutex.lock().unwrap(), 0);
*mutex.lock().unwrap() = 1;
assert_eq!(*mutex.lock().unwrap(), 1);
let mutex_clone = mutex.clone();
let _guard = mutex.lock().unwrap();
@@ -400,7 +511,14 @@ mod tests {
#[test]
fn test_rwlock_usage() {
let rwlock = Arc::new(TracingRwLock::new(()));
let rwlock = Arc::new(RwLock::new(0));
assert_eq!(*rwlock.read().unwrap(), 0);
assert_eq!(*rwlock.write().unwrap(), 0);
*rwlock.write().unwrap() = 1;
assert_eq!(*rwlock.read().unwrap(), 1);
assert_eq!(*rwlock.write().unwrap(), 1);
let rwlock_clone = rwlock.clone();
let _read_lock = rwlock.read().unwrap();
@@ -420,7 +538,7 @@ mod tests {
#[test]
fn test_once_usage() {
let once = Arc::new(TracingOnce::new());
let once = Arc::new(Once::new());
let once_clone = once.clone();
assert!(!once.is_completed());
@@ -441,8 +559,8 @@ mod tests {
#[test]
#[should_panic(expected = "Mutex order graph should not have cycles")]
fn test_detect_cycle() {
let a = TracingMutex::new(());
let b = TracingMutex::new(());
let a = Mutex::new(());
let b = Mutex::new(());
let hold_a = a.lock().unwrap();
let _ = b.lock();
@@ -452,4 +570,5 @@ mod tests {
let _hold_b = b.lock().unwrap();
let _ = a.lock();
}
}
}