92 Commits

Author SHA1 Message Date
9ca5af2c82 Merge pull request #36 from bertptrs/example/show-potential 2023-11-13 08:37:36 +01:00
74b4fe0bb1 Rewrite example to show potential deadlock
The example originally showed a certain deadlock, which was not as clear
as it could be. The new version shows intentionally racy code that may
result in a successful execution but may also deadlock.
2023-11-12 18:36:53 +01:00
bors[bot]
6199598944 Merge #34
34: Fix remaining references to TracingMutex r=bertptrs a=bertptrs

Thanks to `@ReinierMaas` for noticing.

Co-authored-by: Bert Peters <bert@bertptrs.nl>
2023-10-06 07:01:48 +00:00
fd75fc453b Fix remaining references to TracingMutex 2023-10-06 08:59:21 +02:00
bors[bot]
43df59ac1c Merge #33
33: Prepare for release 0.3.0 r=bertptrs a=bertptrs

Double check

- [x] documentation
- [x] changelog
- [x] tests


Co-authored-by: Bert Peters <bert@bertptrs.nl>
2023-09-13 10:03:25 +00:00
1fe44d0a05 Expand changelog 2023-09-11 08:27:06 +02:00
c9083c8bc1 Clarify feature selection 2023-09-09 12:02:52 +02:00
bors[bot]
d8c559fd3f Merge #32
32: Capture backtraces for mutex dependencies r=bertptrs a=bertptrs

Builds on top of #28.

This PR adds backtrace data to the dependency graph, so you can figure out what series of events might have introduced the cycle in dependencies. Only the first backtrace

These changes do have a performance penalty, with a worst case of 20-50% degradation over previous results. This applies to the worst case scenario where every dependency between mutexes is new and thus is unlikely to be as severe.

Below is an example of what this can look like, generated with `examples/mutex_cycle.rs`. The formatting is decidedly suboptimal but backtraces cannot be formatted very well in stable rust at the moment. The exact performance hit depends on a lot of things, such as the level of backtraces captured (off, 1, or full), and how many dependencies are involved.

```
thread 'main' panicked at 'Found cycle in mutex dependency graph:
   0: tracing_mutex::MutexDep::capture
             at ./src/lib.rs:278:23
   1: core::ops::function::FnOnce::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:250:5
   2: tracing_mutex::graph::DiGraph<V,E>::add_edge
             at ./src/graph.rs:131:50
   3: tracing_mutex::MutexId::mark_held::{{closure}}
             at ./src/lib.rs:146:17
   4: std:🧵:local::LocalKey<T>::try_with
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/thread/local.rs:270:16
   5: std:🧵:local::LocalKey<T>::with
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/thread/local.rs:246:9
   6: tracing_mutex::MutexId::mark_held
             at ./src/lib.rs:142:25
   7: tracing_mutex::MutexId::get_borrowed
             at ./src/lib.rs:129:9
   8: tracing_mutex::stdsync::tracing::Mutex<T>::lock
             at ./src/stdsync.rs:110:25
   9: mutex_cycle::main
             at ./examples/mutex_cycle.rs:20:18
  10: core::ops::function::FnOnce::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:250:5
  11: std::sys_common::backtrace::__rust_begin_short_backtrace
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/sys_common/backtrace.rs:135:18
  12: std::rt::lang_start::{{closure}}
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:166:18
  13: core::ops::function::impls::<impl core::ops::function::FnOnce<A> for &F>::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:284:13
  14: std::panicking::try::do_call
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:500:40
  15: std::panicking::try
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:464:19
  16: std::panic::catch_unwind
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panic.rs:142:14
  17: std::rt::lang_start_internal::{{closure}}
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:148:48
  18: std::panicking::try::do_call
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:500:40
  19: std::panicking::try
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:464:19
  20: std::panic::catch_unwind
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panic.rs:142:14
  21: std::rt::lang_start_internal
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:148:20
  22: std::rt::lang_start
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:165:17
  23: main
  24: <unknown>
  25: __libc_start_main
  26: _start

   0: tracing_mutex::MutexDep::capture
             at ./src/lib.rs:278:23
   1: core::ops::function::FnOnce::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:250:5
   2: tracing_mutex::graph::DiGraph<V,E>::add_edge
             at ./src/graph.rs:131:50
   3: tracing_mutex::MutexId::mark_held::{{closure}}
             at ./src/lib.rs:146:17
   4: std:🧵:local::LocalKey<T>::try_with
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/thread/local.rs:270:16
   5: std:🧵:local::LocalKey<T>::with
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/thread/local.rs:246:9
   6: tracing_mutex::MutexId::mark_held
             at ./src/lib.rs:142:25
   7: tracing_mutex::MutexId::get_borrowed
             at ./src/lib.rs:129:9
   8: tracing_mutex::stdsync::tracing::Mutex<T>::lock
             at ./src/stdsync.rs:110:25
   9: mutex_cycle::main
             at ./examples/mutex_cycle.rs:14:18
  10: core::ops::function::FnOnce::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:250:5
  11: std::sys_common::backtrace::__rust_begin_short_backtrace
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/sys_common/backtrace.rs:135:18
  12: std::rt::lang_start::{{closure}}
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:166:18
  13: core::ops::function::impls::<impl core::ops::function::FnOnce<A> for &F>::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:284:13
  14: std::panicking::try::do_call
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:500:40
  15: std::panicking::try
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:464:19
  16: std::panic::catch_unwind
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panic.rs:142:14
  17: std::rt::lang_start_internal::{{closure}}
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:148:48
  18: std::panicking::try::do_call
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:500:40
  19: std::panicking::try
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:464:19
  20: std::panic::catch_unwind
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panic.rs:142:14
  21: std::rt::lang_start_internal
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:148:20
  22: std::rt::lang_start
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:165:17
  23: main
  24: <unknown>
  25: __libc_start_main
  26: _start

', src/lib.rs:163:13
stack backtrace:
   0: rust_begin_unwind
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:593:5
   1: core::panicking::panic_fmt
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/panicking.rs:67:14
   2: tracing_mutex::MutexId::mark_held
             at ./src/lib.rs:163:13
   3: tracing_mutex::MutexId::get_borrowed
             at ./src/lib.rs:129:9
   4: tracing_mutex::stdsync::tracing::Mutex<T>::lock
             at ./src/stdsync.rs:110:25
   5: mutex_cycle::main
             at ./examples/mutex_cycle.rs:25:14
   6: core::ops::function::FnOnce::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:250:5
```

Importantly, the error shows all the dependencies that are already part of the graph, not the one that was just added, since that is already visible from the immediate panic.

Co-authored-by: Bert Peters <bert@bertptrs.nl>
2023-09-09 09:24:46 +00:00
a8e8af6351 Make dependency tracking a compile time setting 2023-09-09 11:21:22 +02:00
068303d81d Show cycle backtraces when they happen 2023-09-09 11:21:22 +02:00
6be3e05cab Capture backtraces of allocations for debugging
Largely based on https://github.com/bertptrs/tracing-mutex/pull/28 with
only minor modifications.
2023-08-27 16:44:02 +02:00
909e934572 Reuse dependency orderings in graph
This avoids a potential panic when adding new nodes to the graph, as
there is no feasible way to overflow IDs any more.
2023-08-27 15:48:57 +02:00
bors[bot]
0ae544a07a Merge #31
31: Update CI dependencies r=bertptrs a=bertptrs

actions-rs uses deprecated features, move to dtolney for the toolchain and just use regular run commands for everything else

Co-authored-by: Bert Peters <bert@bertptrs.nl>
2023-08-27 12:39:35 +00:00
4148d509bf Update CI dependencies
actions-rs uses deprecated features, move to dtolney for the toolchain
and just use regular run commands for everything else
2023-08-27 14:35:40 +02:00
fc1593b76f Bump criterion version
0.3 uses some Rust features that will not work in the future, so upgrade
while we're moving thigns anyway
2023-08-27 14:25:14 +02:00
8f19921e9e Merge pull request #30 from bertptrs/remove-lazy-static 2023-08-27 11:26:58 +02:00
00420d6807 Implement wrapper for OnceLock 2023-08-26 00:58:54 +02:00
49b15bb6bd Bump MSRV to 1.70 2023-08-26 00:58:54 +02:00
29c9daf53e Replace dependency on lazy-static with OnceLock 2023-08-25 08:44:45 +02:00
bors[bot]
8feedb09d2 Merge #27
27: Add MSRV of 1.63 r=bertptrs a=bertptrs

Fixes #26.

Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-08-29 06:34:17 +00:00
de9888a102 Update documentation with MSRV 2022-08-29 08:32:17 +02:00
2d2e03eede Simplify lazy mutex ID drop 2022-08-29 08:26:12 +02:00
e9b577a0f5 Make stdsync wrappers const-constructible 2022-08-27 10:33:15 +02:00
5f6823394d Build and test with Rust 1.63 2022-08-27 10:26:42 +02:00
bors[bot]
61d19f866c Merge #25
25: Restructure modules r=bertptrs a=bertptrs

The `TracingFoo`, `DebugFoo` versions of every `Foo` resulted in quite verbose types everywhere. This PR restructures them to separate modules. The new modules map onto the old types as follows:

- `tracing_mutex::foo::TracingFoo` -> `tracing_mutex::foo::tracing::Foo`
- `tracing_mutex::foo::DebugFoo` -> `tracing_mutex::foo::Foo`

Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-08-27 08:18:29 +00:00
f78969ebf7 Update documentation 2022-08-27 10:08:51 +02:00
56b0604448 Restructure parking_lot wrappers 2022-08-27 10:06:31 +02:00
6e5516eaa7 Restructure std::sync wrappers 2022-08-27 10:01:51 +02:00
764d3df454 Add parking_lot to changelog 2022-08-24 10:28:51 +02:00
bors[bot]
e543860d8b Merge #24
24: Update parking_lot dependency to 0.12 r=bertptrs a=djkoloski

The changelog for parking_lot 0.12 can be found [here](https://github.com/Amanieu/parking_lot/blob/master/CHANGELOG.md#parking_lot-0120-parking_lot_core-090-lock_api-046-2022-01-28):
```
- The MSRV is bumped to 1.49.0.
- Disabled eventual fairness on wasm32-unknown-unknown. (#302)
- Added a rwlock method to report if lock is held exclusively. (#303)
- Use new asm! macro. (#304)
- Use windows-rs instead of winapi for faster builds. (#311)
- Moved hardware lock elision support to a separate Cargo feature. (#313)
- Removed used of deprecated spin_loop_hint. (#314)
```

Co-authored-by: David Koloski <dkoloski@google.com>
2022-08-24 08:20:55 +00:00
David Koloski
ed04552af3 Update parking_lot dependency to 0.12 2022-08-23 11:34:31 -04:00
bors[bot]
c5a506436c Merge #23
23: Ensure `BorrowedMutex` is `!Send` r=bertptrs a=bertptrs

This should prevent the bugs found in #22.

Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-06-23 20:02:10 +00:00
33cb6014a3 Ensure BorrowedMutex is !Send 2022-06-23 21:54:25 +02:00
5232bac582 Bump version 2022-05-23 08:59:47 +02:00
bors[bot]
6472f4b807 Merge #21
21: Prepare for release v0.2.1 r=bertptrs a=bertptrs



Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-05-23 06:55:45 +00:00
6afe7b1c48 Update README and CHANGELOG 2022-05-23 08:53:56 +02:00
9238ef53ee Update copyright 2022-05-23 08:37:26 +02:00
bors[bot]
c08addff7d Merge #17
17: Fix typos r=bertptrs a=quisar



Co-authored-by: Benjamin Lerman <qsr@chromium.org>
2022-05-23 06:33:21 +00:00
bors[bot]
c1ce9df8ad Merge #19
19: Add a wrapper for `std::sync::Condvar` r=bertptrs a=bertptrs

This wrapper does not do any tracing itself but supports the use of a tracing mutex guard instead of an `std::sync` one.

Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-05-17 19:50:02 +00:00
312eaa8649 Add a wrapper for std::sync::Condvar
This wrapper does not do any tracing itself but supports the use of a
tracing mutex guard instead of an `std::sync` one.
2022-05-17 21:45:25 +02:00
bors[bot]
1f7e6921aa Merge #18
18: Enable bors for nicer merging r=bertptrs a=bertptrs



Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-05-15 21:40:40 +00:00
f7048f265f Enable CI builds on staging/trying 2022-05-15 23:35:00 +02:00
64e56fdb86 Add minimal bors config 2022-05-15 23:35:00 +02:00
Benjamin Lerman
8e3278fdd2 Fix typos 2022-05-10 10:30:20 +02:00
9ea993e737 Add missing date 2022-05-07 18:15:50 +02:00
062850fc3e Merge pull request #16 from bertptrs/docsrs_feature_docs
Fix documentation builds for features
2022-05-07 17:56:09 +02:00
0d2622d5c6 Build documentation on CI 2022-05-07 17:52:32 +02:00
d1417e0b0c Tag module docs with their required features 2022-05-07 17:52:32 +02:00
fcc64e2cef Automatically build documentation for all features 2022-05-07 17:03:45 +02:00
fd0d05307c Update README and copyright year 2022-05-07 16:54:37 +02:00
2f6e214784 Merge pull request #15 from bertptrs/pre-release-cleanup 2022-05-07 16:50:10 +02:00
3ec7e83e00 Update changelog and version 2022-05-07 16:43:31 +02:00
ea8e0208a0 Explicitly test for disallowed self-cycles 2022-05-07 16:43:31 +02:00
8926af4e13 Also deny clippy warnings on other targets 2022-05-07 16:43:31 +02:00
77676ea04d Fix formatting 2022-05-07 16:43:31 +02:00
46c92cfbbf Merge pull request #13 from quisar/add_upgradable 2022-05-06 10:17:54 +02:00
Benjamin Lerman
743cc83669 Add TracingRwLockUpgradableReadGuard wrapper for parking_lot 2022-05-03 10:16:27 +02:00
4faaae8d8f Merge pull request #10 from quisar/fix_locking_issues_in_lockapi 2022-05-02 08:32:13 +02:00
Benjamin Lerman
b78af9150d Fix a number of issues in the lockapi wrappers. 2022-05-02 08:20:23 +02:00
515930c0a2 Merge pull request #9 from quisar/prevent-reentrant-locking 2022-05-02 08:13:48 +02:00
Benjamin Lerman
b5a5ca16c3 Do not allow recursive locks. 2022-05-02 08:11:37 +02:00
aef99d4f65 Merge pull request #8 from quisar/fix_deref 2022-05-02 08:04:55 +02:00
Benjamin Lerman
6073c6c78d Fix Target for Deref of stdsync::TracingMutexGuard 2022-05-01 16:53:26 +02:00
cdd44f74fa Merge pull request #11 from bertptrs/fix-bitrot 2022-05-01 14:06:23 +02:00
38b3b226cc Move to edition 2021 altogether 2022-05-01 12:03:50 +02:00
3b9b908460 Correctly mark mutex reference as unused 2022-05-01 11:50:50 +02:00
ef421e20eb Deal with IntoIter deprecation 2022-05-01 11:50:37 +02:00
66576e5b0e Merge pull request #5 from bertptrs/benchmarking
Implement minimal benchmarking of dependency tracking
2021-07-10 22:17:44 +02:00
308af218e1 Implement minimal benchmarking of dependency tracking 2021-07-10 22:14:33 +02:00
79ed599a2f Merge pull request #3 from bertptrs/locking-api-support 2021-07-10 17:28:20 +02:00
680e335ccf Document new modules 2021-07-10 17:25:42 +02:00
17761af5a8 Add type aliases for mapped mutex guards 2021-07-10 13:05:41 +02:00
4c70d999d6 Create type aliases for parking_lot::RwLock 2021-07-10 12:17:35 +02:00
618a11f940 Implement a wrapper for parking_lot::Once 2021-05-27 22:19:57 +02:00
77cd603363 Implement minimal mutexes for parking_lot. 2021-05-27 22:00:37 +02:00
73b4c8b1af Minimal parking_lot support 2021-05-27 21:16:24 +02:00
b21a63e74b Implement RwLock-based traits for lockapi worker. 2021-05-27 21:16:24 +02:00
6a3cb83d01 Implement Mutex behaviour for lock_api 2021-05-27 21:16:24 +02:00
08cfb17234 Build all features on CI 2021-05-27 21:16:24 +02:00
536ee31138 Prepare for relesae 2021-05-27 21:13:24 +02:00
e2db0eaca8 Fix a graph invariant violation on cycle detection 2021-05-27 20:31:00 +02:00
158e5353bb Add missing guard type aliases 2021-05-24 20:28:49 +02:00
c4d211a923 Prepare for release 2021-05-24 15:40:30 +02:00
f524318bfe Only run CI for pushes to master and PRs. 2021-05-24 15:33:36 +02:00
917906e85e Update README 2021-05-24 15:30:56 +02:00
40e40f658c Merge pull request #1 from bertptrs/improve-digraph 2021-05-24 15:13:37 +02:00
ebb8132cf8 Add a fuzz-test for the mutex ID's graph 2021-05-24 15:10:41 +02:00
ca12ae6b0e Add changelog 2021-05-24 14:49:48 +02:00
d242ac5bc2 Use interior mutability for updating graph order 2021-05-24 14:49:48 +02:00
39b493a871 Merge hash maps in graph structures
This saves quite a few hash-map lookups which improves performance by
about 25%.
2021-05-24 14:49:48 +02:00
cca3cf7827 Fix unintentional exponential order ids 2021-05-24 14:49:48 +02:00
6ef9cb12f8 Implement basic fuzz testing for the digraph impl 2021-05-24 14:49:48 +02:00
15 changed files with 2013 additions and 619 deletions

View File

@@ -1,44 +1,51 @@
on:
push:
branches:
- master
- staging
- trying
pull_request:
name: Continuous integration
jobs:
ci:
tests:
name: Rust project
runs-on: ubuntu-latest
strategy:
matrix:
rust:
- "1.70" # minimum stable rust version
- stable
- beta
- nightly
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
- uses: dtolnay/rust-toolchain@v1
with:
profile: minimal
toolchain: ${{ matrix.rust }}
override: true
components: rustfmt, clippy
- uses: actions-rs/cargo@v1
with:
command: build
- run: cargo build --all-features --all-targets
- run: cargo test --all-features
- run: cargo fmt --all -- --check
- run: cargo clippy --all-features --all-targets -- -D warnings
- uses: actions-rs/cargo@v1
with:
command: test
docs:
name: Documentation build
runs-on: ubuntu-latest
- uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
steps:
- uses: actions/checkout@v3
- uses: actions-rs/cargo@v1
- uses: dtolnay/rust-toolchain@v1
with:
command: clippy
args: -- -D warnings
toolchain: nightly
- name: Build documentation
env:
# Build the docs like docs.rs builds it
RUSTDOCFLAGS: --cfg docsrs
run: cargo doc --all-features

106
CHANGELOG.md Normal file
View File

@@ -0,0 +1,106 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project
adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [0.3.0] - 2023-09-09
### Added
- The minimum supported Rust version is now defined as 1.70. Previously it was undefined.
- Wrappers for `std::sync` primitives can now be `const` constructed.
- Add support for `std::sync::OnceLock`
- Added backtraces of mutex allocations to the cycle report. Capturing backtraces does incur some
overhead, this can be mitigated by disabling the `backtraces` feature which is enabled by default.
### Breaking
- Update [`parking_lot`][parking_lot] dependency to `0.12`.
- Restructured the crate to reduce typename verbosity. Wrapper names now match the name of the
primitive they wrap. Specific always/debug tracing versions have now moved to separate modules.
For example, `tracing_mutex::stdsync::TracingMutex` is now
`tracing_mutex::stdsync::tracing::Mutex`, and `tracing_mutex::stdsync::DebugMutex` is now called
`tracing_mutex::stdsync::Mutex`. This hopefully reduces the visual noise while reading code that
uses this in practice. Unwrapped primitives are reexported under `tracing_mutex::stdsync::raw` for
convenience.
### Fixed
- Enforce that all internal mutex guards are `!Send`. They already should be according to other
reasons, but this adds extra security through the type system.
## [0.2.1] - 2022-05-23
### Added
- Build [docs.rs] documentation with all features enabled for completeness.
- Add support for `std::sync::Condvar`
### Fixed
- The `parkinglot` module is now correctly enabled by the `parkinglot` feature rather than the
`lockapi` feature.
## [0.2.0] - 2022-05-07
### Added
- Generic support for wrapping mutexes that implement the traits provided by the
[`lock_api`][lock_api] crate. This can be used for creating support for other mutex providers that
implement it.
- Support for [`parking_lot`][parking_lot] mutexes. Support includes type aliases for all
provided mutex types as well as a dedicated `Once` wrapper.
- Simple benchmark to track the rough performance penalty incurred by dependency tracking.
### Breaking
- The library now requires edition 2021.
- The `Mutex`- and `RwLockGuards` now dereference to `T` rather than the lock guard they wrap. This
is technically a bugfix but can theoretically break existing code.
- Self-cycles are no longer allowed for lock dependencies. They previously were because it usually
isn't a problem, but it can create RWR deadlocks with `RwLocks`.
### Changed
- The project now targets edition 2021
## [0.1.2] - 2021-05-27
### Added
- Added missing type aliases for the guards returned by `DebugMutex` and `DebugRwLock`. These new
type aliases function the same as the ones they belong to, resolving to either the tracing
versions when debug assertions are enabled or the standard one when they're not.
### Fixed
- Fixed a corruption error where deallocating a previously cyclic mutex could result in a panic.
## [0.1.1] - 2021-05-24
### Changed
- New data structure for interal dependency graph, resulting in quicker graph updates.
### Fixed
- Fixed an issue where internal graph ordering indices were exponential rather than sequential. This
caused the available IDs to run out way more quickly than intended.
## [0.1.0] - 2021-05-16 [YANKED]
Initial release.
[Unreleased]: https://github.com/bertptrs/tracing-mutex/compare/v0.3.0...HEAD
[0.3.0]: https://github.com/bertptrs/tracing-mutex/compare/v0.2.1...v0.3.0
[0.2.1]: https://github.com/bertptrs/tracing-mutex/compare/v0.2.0...v0.2.1
[0.2.0]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.2...v0.2.0
[0.1.2]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.1...v0.1.2
[0.1.1]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.0...v0.1.1
[0.1.0]: https://github.com/bertptrs/tracing-mutex/releases/tag/v0.1.0
[docs.rs]: https://docs.rs/tracing-mutex/latest/tracing_mutex/
[lock_api]: https://docs.rs/lock_api/
[parking_lot]: https://docs.rs/parking_lot/

View File

@@ -1,8 +1,8 @@
[package]
name = "tracing-mutex"
version = "0.1.0"
version = "0.3.0"
authors = ["Bert Peters <bert@bertptrs.nl>"]
edition = "2018"
edition = "2021"
license = "MIT OR Apache-2.0"
documentation = "https://docs.rs/tracing-mutex"
categories = ["concurrency", "development-tools::debugging"]
@@ -10,6 +10,29 @@ keywords = ["mutex", "rwlock", "once", "thread"]
description = "Ensure deadlock-free mutexes by allocating in order, or else."
readme = "README.md"
repository = "https://github.com/bertptrs/tracing-mutex"
rust-version = "1.70"
[package.metadata.docs.rs]
# Build docs for all features so the documentation is more complete
all-features = true
# Set custom cfg so we can enable docs.rs magic
rustdoc-args = ["--cfg", "docsrs"]
[dependencies]
lazy_static = "1"
lock_api = { version = "0.4", optional = true }
parking_lot = { version = "0.12", optional = true }
[dev-dependencies]
criterion = "0.5"
rand = "0.8"
[[bench]]
name = "mutex"
harness = false
[features]
default = ["backtraces"]
backtraces = []
# Feature names do not match crate names pending namespaced features.
lockapi = ["lock_api"]
parkinglot = ["parking_lot", "lockapi"]

View File

@@ -186,7 +186,7 @@ APPENDIX: How to apply the Apache License to your work.
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Copyright 2022 Bert Peters
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
Copyright © 2021 Bert Peters
Copyright © 2022 Bert Peters
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the “Software”), to deal in the Software without restriction,

View File

@@ -2,7 +2,7 @@
[![Continuous integration](https://github.com/bertptrs/tracing-mutex/actions/workflows/ci.yml/badge.svg)](https://github.com/bertptrs/tracing-mutex/actions/workflows/ci.yml)
[![Crates.io](https://img.shields.io/crates/v/tracing-mutex.svg)](https://crates.io/crates/tracing-mutex)
[![Documentation](https://img.shields.io/docsrs/tracing-mutex.svg)](https://docs.rs/tracing-mutex)
[![Documentation](https://docs.rs/tracing-mutex/badge.svg)](https://docs.rs/tracing-mutex)
Avoid deadlocks in your mutexes by acquiring them in a consistent order, or else.
@@ -18,11 +18,17 @@ should first acquire `Foo` then you can never deadlock. Of course, with just two
easy to keep track of, but once your code starts to grow you might lose track of all these
dependencies. That's where this crate comes in.
This crate tracks the order in which you acquire locks in your code, tries to build a dependency
tree out of it, and panics if your dependencies would create a cycle. It provides replacements for
existing synchronization primitives with an identical API, and should be a drop-in replacement.
Inspired by [this blogpost][whileydave], which references a similar behaviour implemented by
[Abseil][abseil-mutex] for their mutexes.
[Abseil][abseil-mutex] for their mutexes. [This article goes into more depth on the exact
implementation.][article]
[whileydave]: https://whileydave.com/2020/12/19/dynamic-cycle-detection-for-lock-ordering/
[abseil-mutex]: https://abseil.io/docs/cpp/guides/synchronization
[article]: https://bertptrs.nl/2022/06/23/deadlock-free-mutexes-and-directed-acyclic-graphs.html
## Usage
@@ -30,7 +36,7 @@ Add this dependency to your `Cargo.lock` file like any other:
```toml
[dependencies]
tracing-mutex = "0.1"
tracing-mutex = "0.2"
```
Then use the locks provided by this library instead of the ones you would use otherwise.
@@ -38,9 +44,9 @@ Replacements for the synchronization primitives in `std::sync` can be found in t
Support for other synchronization primitives is planned.
```rust
use tracing_mutex::stdsync::TracingMutex;
use tracing_mutex::stdsync::Mutex;
let some_mutex = TracingMutex::new(42);
let some_mutex = Mutex::new(42);
*some_mutex.lock().unwrap() += 1;
println!("{:?}", some_mutex);
```
@@ -50,17 +56,31 @@ introduce a cyclic dependency between your locks, the operation panics instead.
immediately notice the cyclic dependency rather than be eventually surprised by it in production.
Mutex tracing is efficient, but it is not completely overhead-free. If you cannot spare the
performance penalty in your production environment, this library also offers debug-only tracing.
`DebugMutex`, also found in the `stdsync` module, is a type alias that evaluates to `TracingMutex`
when debug assertions are enabled, and to `Mutex` when they are not. Similar helper types are
available for other synchronization primitives.
performance penalty in your production environment, this library also offers debug-only tracing. The
type aliases in `tracing_mutex::stdsync` correspond to tracing primitives from
`tracing_mutex::stdsync::tracing` when debug assertions are enabled, and to primitives from
`std::sync::Mutex` when they are not. A similar structure exists for other
The minimum supported Rust version is 1.70. Increasing this is not considered a breaking change, but
will be avoided within semver-compatible releases if possible.
### Features
- Dependency-tracking wrappers for all locking primitives
- Optional opt-out for release mode code
- Optional backtrace capture to aid with reproducing cyclic mutex chains
- Support for primitives from:
- `std::sync`
- `parking_lot`
- Any library that implements the `lock_api` traits
## Future improvements
- Improve performance in lock tracing
- Optional logging to make debugging easier
- Better and configurable error handling when detecting cyclic dependencies
- Support for other locking libraries, such as `parking_lot`
- Support for other locking libraries
- Support for async locking libraries
- Support for `Send` mutex guards
**Note:** `parking_lot` has already began work on its own deadlock detection mechanism, which works
in a different way. Both can be complimentary.

82
benches/mutex.rs Normal file
View File

@@ -0,0 +1,82 @@
use std::sync::Arc;
use std::sync::Mutex;
use criterion::criterion_group;
use criterion::criterion_main;
use criterion::BenchmarkId;
use criterion::Criterion;
use criterion::Throughput;
use rand::prelude::*;
use tracing_mutex::stdsync::tracing::Mutex as TracingMutex;
const SAMPLE_SIZES: [usize; 5] = [10, 30, 100, 300, 1000];
/// Reproducibly generate random combinations a, b where the index(a) < index(b)
///
/// All combinations are generated
fn generate_combinations<T>(options: &[Arc<T>]) -> Vec<(Arc<T>, Arc<T>)> {
let mut combinations = Vec::new();
for (i, first) in options.iter().enumerate() {
for second in options.iter().skip(i + 1) {
combinations.push((Arc::clone(first), Arc::clone(second)));
}
}
let mut rng = StdRng::seed_from_u64(42);
combinations.shuffle(&mut rng);
combinations
}
/// Take two arbitrary mutexes, lock the first, lock the second while holding the first.
fn benchmark_baseline(c: &mut Criterion) {
let mut group = c.benchmark_group("baseline");
for nodes in SAMPLE_SIZES {
group.throughput(Throughput::Elements((nodes * (nodes - 1) / 2) as u64));
group.bench_with_input(BenchmarkId::from_parameter(nodes), &nodes, |b, &s| {
b.iter_batched(
|| {
let mutexes: Vec<_> = (0..s).map(|_| Arc::new(Mutex::new(()))).collect();
generate_combinations(&mutexes)
},
|combinations| {
for (first, second) in combinations {
let _first = first.lock();
let _second = second.lock();
}
},
criterion::BatchSize::SmallInput,
)
});
}
}
/// Same as [`benchmark_baseline`] but now while tracking dependencies.
fn benchmark_tracing_mutex(c: &mut Criterion) {
let mut group = c.benchmark_group("tracing_mutex");
for nodes in SAMPLE_SIZES {
group.throughput(Throughput::Elements((nodes * (nodes - 1) / 2) as u64));
group.bench_with_input(BenchmarkId::from_parameter(nodes), &nodes, |b, &s| {
b.iter_batched(
|| {
let mutexes: Vec<_> = (0..s).map(|_| Arc::new(TracingMutex::new(()))).collect();
generate_combinations(&mutexes)
},
|combinations| {
for (first, second) in combinations {
let _first = first.lock();
let _second = second.lock();
}
},
criterion::BatchSize::SmallInput,
)
});
}
}
criterion_group!(benches, benchmark_baseline, benchmark_tracing_mutex);
criterion_main!(benches);

6
bors.toml Normal file
View File

@@ -0,0 +1,6 @@
status = [
'Rust project (1.70)',
'Rust project (stable)',
'Rust project (beta)',
'Documentation build',
]

62
examples/mutex_cycle.rs Normal file
View File

@@ -0,0 +1,62 @@
//! Show what a crash looks like
//!
//! This shows what a traceback of a cycle detection looks like. It is expected to crash when run in
//! debug mode, because it might deadlock. In release mode, no tracing is used and the program may
//! do any of the following:
//!
//! - Return a random valuation of `a`, `b`, and `c`. The implementation has a race-condition by
//! design. I have observed (4, 3, 6), but also (6, 3, 5).
//! - Deadlock forever.
//!
//! One can increase the SLEEP_TIME constant to increase the likelihood of a deadlock to occur. On
//! my machine, 1ns of sleep time gives about a 50/50 chance of the program deadlocking.
use std::thread;
use std::time::Duration;
use tracing_mutex::stdsync::Mutex;
fn main() {
let a = Mutex::new(1);
let b = Mutex::new(2);
let c = Mutex::new(3);
// Increase this time to increase the likelihood of a deadlock.
const SLEEP_TIME: Duration = Duration::from_nanos(1);
// Depending on random CPU performance, this section may deadlock, or may return a result. With
// tracing enabled, the potential deadlock is always detected and a backtrace should be
// produced.
thread::scope(|s| {
// Create an edge from a to b
s.spawn(|| {
let a = a.lock().unwrap();
thread::sleep(SLEEP_TIME);
*b.lock().unwrap() += *a;
});
// Create an edge from b to c
s.spawn(|| {
let b = b.lock().unwrap();
thread::sleep(SLEEP_TIME);
*c.lock().unwrap() += *b;
});
// Create an edge from c to a
//
// N.B. the program can crash on any of the three edges, as there is no guarantee which
// thread will execute first. Nevertheless, any one of them is guaranteed to panic with
// tracing enabled.
s.spawn(|| {
let c = c.lock().unwrap();
thread::sleep(SLEEP_TIME);
*a.lock().unwrap() += *c;
});
});
println!(
"{}, {}, {}",
a.into_inner().unwrap(),
b.into_inner().unwrap(),
c.into_inner().unwrap()
);
}

View File

@@ -1,3 +1,5 @@
use std::cell::Cell;
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::collections::HashSet;
use std::hash::Hash;
@@ -18,20 +20,32 @@ type Order = usize;
/// visibly changed.
///
/// [paper]: https://whileydave.com/publications/pk07_jea/
#[derive(Clone, Default, Debug)]
pub struct DiGraph<V>
#[derive(Debug)]
pub struct DiGraph<V, E>
where
V: Eq + Hash + Copy,
{
in_edges: HashMap<V, HashSet<V>>,
out_edges: HashMap<V, HashSet<V>>,
/// Next topological sort order
next_ord: Order,
/// Topological sort order. Order is not guaranteed to be contiguous
ord: HashMap<V, Order>,
nodes: HashMap<V, Node<V, E>>,
// Instead of reordering the orders in the graph whenever a node is deleted, we maintain a list
// of unused ids that can be handed out later again.
unused_order: Vec<Order>,
}
impl<V> DiGraph<V>
#[derive(Debug)]
struct Node<V, E>
where
V: Eq + Hash + Clone,
{
in_edges: HashSet<V>,
out_edges: HashMap<V, E>,
// The "Ord" field is a Cell to ensure we can update it in an immutable context.
// `std::collections::HashMap` doesn't let you have multiple mutable references to elements, but
// this way we can use immutable references and still update `ord`. This saves quite a few
// hashmap lookups in the final reorder function.
ord: Cell<Order>,
}
impl<V, E> DiGraph<V, E>
where
V: Eq + Hash + Copy,
{
@@ -42,31 +56,47 @@ where
/// the node in the topological order.
///
/// New nodes are appended to the end of the topological order when added.
fn add_node(&mut self, n: V) -> (&mut HashSet<V>, &mut HashSet<V>, Order) {
let next_ord = &mut self.next_ord;
let in_edges = self.in_edges.entry(n).or_default();
let out_edges = self.out_edges.entry(n).or_default();
fn add_node(&mut self, n: V) -> (&mut HashSet<V>, &mut HashMap<V, E>, Order) {
// need to compute next id before the call to entry() to avoid duplicate borrow of nodes
let fallback_id = self.nodes.len();
let order = *self.ord.entry(n).or_insert_with(|| {
let order = *next_ord;
*next_ord += next_ord.checked_add(1).expect("Topological order overflow");
order
let node = self.nodes.entry(n).or_insert_with(|| {
let order = if let Some(id) = self.unused_order.pop() {
// Reuse discarded ordering entry
id
} else {
// Allocate new order id
fallback_id
};
Node {
ord: Cell::new(order),
in_edges: Default::default(),
out_edges: Default::default(),
}
});
(in_edges, out_edges, order)
(&mut node.in_edges, &mut node.out_edges, node.ord.get())
}
pub(crate) fn remove_node(&mut self, n: V) -> bool {
match self.out_edges.remove(&n) {
match self.nodes.remove(&n) {
None => false,
Some(out_edges) => {
for other in out_edges {
self.in_edges.get_mut(&other).unwrap().remove(&n);
}
Some(Node {
out_edges,
in_edges,
ord,
}) => {
// Return ordering to the pool of unused ones
self.unused_order.push(ord.get());
for other in self.in_edges.remove(&n).unwrap() {
self.out_edges.get_mut(&other).unwrap().remove(&n);
}
out_edges.into_keys().for_each(|m| {
self.nodes.get_mut(&m).unwrap().in_edges.remove(&n);
});
in_edges.into_iter().for_each(|m| {
self.nodes.get_mut(&m).unwrap().out_edges.remove(&n);
});
true
}
@@ -77,18 +107,29 @@ where
///
/// Nodes, both from and to, are created as needed when creating new edges. If the new edge
/// would introduce a cycle, the edge is rejected and `false` is returned.
pub(crate) fn add_edge(&mut self, x: V, y: V) -> bool {
///
/// # Errors
///
/// If the edge would introduce the cycle, the underlying graph is not modified and a list of
/// all the edge data in the would-be cycle is returned instead.
pub(crate) fn add_edge(&mut self, x: V, y: V, e: impl FnOnce() -> E) -> Result<(), Vec<E>>
where
E: Clone,
{
if x == y {
// self-edges are not considered cycles
return true;
// self-edges are always considered cycles
return Err(Vec::new());
}
let (_, out_edges, ub) = self.add_node(x);
if !out_edges.insert(y) {
// Edge already exists, nothing to be done
return true;
}
match out_edges.entry(y) {
Entry::Occupied(_) => {
// Edge already exists, nothing to be done
return Ok(());
}
Entry::Vacant(entry) => entry.insert(e()),
};
let (in_edges, _, lb) = self.add_node(y);
@@ -96,25 +137,25 @@ where
if lb < ub {
// This edge might introduce a cycle, need to recompute the topological sort
let mut visited = HashSet::new();
let mut visited = [x, y].into_iter().collect();
let mut delta_f = Vec::new();
let mut delta_b = Vec::new();
if !self.dfs_f(y, ub, &mut visited, &mut delta_f) {
if let Err(cycle) = self.dfs_f(&self.nodes[&y], ub, &mut visited, &mut delta_f) {
// This edge introduces a cycle, so we want to reject it and remove it from the
// graph again to keep the "does not contain cycles" invariant.
// We use map instead of unwrap to avoid an `unwrap()` but we know that these
// entries are present as we just added them above.
self.in_edges.get_mut(&y).map(|nodes| nodes.remove(&x));
self.out_edges.get_mut(&x).map(|nodes| nodes.remove(&y));
self.nodes.get_mut(&y).map(|node| node.in_edges.remove(&x));
self.nodes.get_mut(&x).map(|node| node.out_edges.remove(&y));
// No edge was added
return false;
return Err(cycle);
}
// No need to check as we should've found the cycle on the forward pass
self.dfs_b(x, lb, &mut visited, &mut delta_b);
self.dfs_b(&self.nodes[&x], lb, &mut visited, &mut delta_b);
// Original paper keeps it around but this saves us from clearing it
drop(visited);
@@ -122,56 +163,71 @@ where
self.reorder(delta_f, delta_b);
}
true
Ok(())
}
/// Forwards depth-first-search
fn dfs_f(&self, n: V, ub: Order, visited: &mut HashSet<V>, delta_f: &mut Vec<V>) -> bool {
visited.insert(n);
fn dfs_f<'a>(
&'a self,
n: &'a Node<V, E>,
ub: Order,
visited: &mut HashSet<V>,
delta_f: &mut Vec<&'a Node<V, E>>,
) -> Result<(), Vec<E>>
where
E: Clone,
{
delta_f.push(n);
self.out_edges[&n].iter().all(|w| {
let order = self.ord[w];
for (w, e) in &n.out_edges {
let node = &self.nodes[w];
let ord = node.ord.get();
if order == ub {
if ord == ub {
// Found a cycle
false
} else if !visited.contains(w) && order < ub {
return Err(vec![e.clone()]);
} else if !visited.contains(w) && ord < ub {
// Need to check recursively
self.dfs_f(*w, ub, visited, delta_f)
} else {
// Already seen this one or not interesting
true
visited.insert(*w);
if let Err(mut chain) = self.dfs_f(node, ub, visited, delta_f) {
chain.push(e.clone());
return Err(chain);
}
}
})
}
Ok(())
}
/// Backwards depth-first-search
fn dfs_b(&self, n: V, lb: Order, visited: &mut HashSet<V>, delta_b: &mut Vec<V>) {
visited.insert(n);
fn dfs_b<'a>(
&'a self,
n: &'a Node<V, E>,
lb: Order,
visited: &mut HashSet<V>,
delta_b: &mut Vec<&'a Node<V, E>>,
) {
delta_b.push(n);
for w in &self.in_edges[&n] {
if !visited.contains(w) && lb < self.ord[w] {
self.dfs_b(*w, lb, visited, delta_b);
for w in &n.in_edges {
let node = &self.nodes[w];
if !visited.contains(w) && lb < node.ord.get() {
visited.insert(*w);
self.dfs_b(node, lb, visited, delta_b);
}
}
}
fn reorder(&mut self, mut delta_f: Vec<V>, mut delta_b: Vec<V>) {
fn reorder(&self, mut delta_f: Vec<&Node<V, E>>, mut delta_b: Vec<&Node<V, E>>) {
self.sort(&mut delta_f);
self.sort(&mut delta_b);
let mut l = Vec::with_capacity(delta_f.len() + delta_b.len());
let mut orders = Vec::with_capacity(delta_f.len() + delta_b.len());
for w in delta_b {
orders.push(self.ord[&w]);
l.push(w);
}
for v in delta_f {
orders.push(self.ord[&v]);
for v in delta_b.into_iter().chain(delta_f) {
orders.push(v.ord.get());
l.push(v);
}
@@ -180,34 +236,90 @@ where
orders.sort_unstable();
for (node, order) in l.into_iter().zip(orders) {
self.ord.insert(node, order);
node.ord.set(order);
}
}
fn sort(&self, ids: &mut [V]) {
fn sort(&self, ids: &mut [&Node<V, E>]) {
// Can use unstable sort because mutex ids should not be equal
ids.sort_unstable_by_key(|v| self.ord[v]);
ids.sort_unstable_by_key(|v| &v.ord);
}
}
// Manual `Default` impl as derive causes unnecessarily strong bounds.
impl<V, E> Default for DiGraph<V, E>
where
V: Eq + Hash + Copy,
{
fn default() -> Self {
Self {
nodes: Default::default(),
unused_order: Default::default(),
}
}
}
#[cfg(test)]
mod tests {
use rand::seq::SliceRandom;
use rand::thread_rng;
use super::*;
fn nop() {}
#[test]
fn test_no_self_cycle() {
// Regression test for https://github.com/bertptrs/tracing-mutex/issues/7
let mut graph = DiGraph::default();
assert!(graph.add_edge(1, 1, nop).is_err());
}
#[test]
fn test_digraph() {
let mut graph = DiGraph::default();
// Add some safe edges
assert!(graph.add_edge(0, 1));
assert!(graph.add_edge(1, 2));
assert!(graph.add_edge(2, 3));
assert!(graph.add_edge(4, 2));
assert!(graph.add_edge(0, 1, nop).is_ok());
assert!(graph.add_edge(1, 2, nop).is_ok());
assert!(graph.add_edge(2, 3, nop).is_ok());
assert!(graph.add_edge(4, 2, nop).is_ok());
// Try to add an edge that introduces a cycle
assert!(!graph.add_edge(3, 1));
assert!(graph.add_edge(3, 1, nop).is_err());
// Add an edge that should reorder 0 to be after 4
assert!(graph.add_edge(4, 0));
assert!(graph.add_edge(4, 0, nop).is_ok());
}
/// Fuzz the DiGraph implementation by adding a bunch of valid edges.
///
/// This test generates all possible forward edges in a 100-node graph consisting of natural
/// numbers, shuffles them, then adds them to the graph. This will always be a valid directed,
/// acyclic graph because there is a trivial order (the natural numbers) but because the edges
/// are added in a random order the DiGraph will still occasionally need to reorder nodes.
#[test]
fn fuzz_digraph() {
// Note: this fuzzer is quadratic in the number of nodes, so this cannot be too large or it
// will slow down the tests too much.
const NUM_NODES: usize = 100;
let mut edges = Vec::with_capacity(NUM_NODES * NUM_NODES);
for i in 0..NUM_NODES {
for j in i..NUM_NODES {
if i != j {
edges.push((i, j));
}
}
}
edges.shuffle(&mut thread_rng());
let mut graph = DiGraph::default();
for (x, y) in edges {
assert!(graph.add_edge(x, y, nop).is_ok());
}
}
}

View File

@@ -18,8 +18,23 @@
//! # Structure
//!
//! Each module in this crate exposes wrappers for a specific base-mutex with dependency trakcing
//! added. For now, that is limited to [`stdsync`] which provides wrappers for the base locks in the
//! standard library. More back-ends may be added as features in the future.
//! added. This includes [`stdsync`] which provides wrappers for the base locks in the standard
//! library, and more depending on enabled compile-time features. More back-ends may be added as
//! features in the future.
//!
//! # Feature flags
//!
//! `tracing-mutex` uses feature flags to reduce the impact of this crate on both your compile time
//! and runtime overhead. Below are the available flags. Modules are annotated with the features
//! they require.
//!
//! - `backtraces`: Enables capturing backtraces of mutex dependencies, to make it easier to
//! determine what sequence of events would trigger a deadlock. This is enabled by default, but if
//! the performance overhead is unaccceptable, it can be disabled by disabling default features.
//!
//! - `lockapi`: Enables the wrapper lock for [`lock_api`][lock_api] locks
//!
//! - `parkinglot`: Enables wrapper types for [`parking_lot`][parking_lot] mutexes
//!
//! # Performance considerations
//!
@@ -41,36 +56,50 @@
//!
//! These operations have been reasonably optimized, but the performance penalty may yet be too much
//! for production use. In those cases, it may be beneficial to instead use debug-only versions
//! (such as [`stdsync::DebugMutex`]) which evaluate to a tracing mutex when debug assertions are
//! (such as [`stdsync::Mutex`]) which evaluate to a tracing mutex when debug assertions are
//! enabled, and to the underlying mutex when they're not.
//!
//! For ease of debugging, this crate will, by default, capture a backtrace when establishing a new
//! dependency between two mutexes. This has an additional overhead of over 60%. If this additional
//! debugging aid is not required, it can be disabled by disabling default features.
//!
//! [paper]: https://whileydave.com/publications/pk07_jea/
//! [lock_api]: https://docs.rs/lock_api/0.4/lock_api/index.html
//! [parking_lot]: https://docs.rs/parking_lot/0.12.1/parking_lot/
#![cfg_attr(docsrs, feature(doc_cfg))]
use std::cell::RefCell;
use std::cell::UnsafeCell;
use std::fmt;
use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::ops::Deref;
use std::ops::DerefMut;
use std::ptr;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::sync::Mutex;
use std::sync::Once;
use std::sync::MutexGuard;
use std::sync::OnceLock;
use std::sync::PoisonError;
use lazy_static::lazy_static;
#[cfg(feature = "lockapi")]
#[cfg_attr(docsrs, doc(cfg(feature = "lockapi")))]
pub use lock_api;
#[cfg(feature = "parkinglot")]
#[cfg_attr(docsrs, doc(cfg(feature = "parkinglot")))]
pub use parking_lot;
use reporting::Dep;
use reporting::Reportable;
use crate::graph::DiGraph;
mod graph;
#[cfg(feature = "lockapi")]
#[cfg_attr(docsrs, doc(cfg(feature = "lockapi")))]
pub mod lockapi;
#[cfg(feature = "parkinglot")]
#[cfg_attr(docsrs, doc(cfg(feature = "parkinglot")))]
pub mod parkinglot;
mod reporting;
pub mod stdsync;
/// Counter for Mutex IDs. Atomic avoids the need for locking.
///
/// Should be part of the `MutexID` impl but static items are not yet a thing.
static ID_SEQUENCE: AtomicUsize = AtomicUsize::new(0);
thread_local! {
/// Stack to track which locks are held
///
@@ -79,19 +108,12 @@ thread_local! {
static HELD_LOCKS: RefCell<Vec<usize>> = RefCell::new(Vec::new());
}
lazy_static! {
static ref DEPENDENCY_GRAPH: Mutex<DiGraph<usize>> = Default::default();
}
/// Dedicated ID type for Mutexes
///
/// # Unstable
///
/// This type is currently private to prevent usage while the exact implementation is figured out,
/// but it will likely be public in the future.
///
/// One possible alteration is to make this type not `Copy` but `Drop`, and handle deregistering
/// the lock from there.
struct MutexId(usize);
impl MutexId {
@@ -104,6 +126,9 @@ impl MutexId {
/// This function may panic when there are no more mutex IDs available. The number of mutex ids
/// is `usize::MAX - 1` which should be plenty for most practical applications.
pub fn new() -> Self {
// Counter for Mutex IDs. Atomic avoids the need for locking.
static ID_SEQUENCE: AtomicUsize = AtomicUsize::new(0);
ID_SEQUENCE
.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |id| id.checked_add(1))
.map(Self)
@@ -123,23 +148,50 @@ impl MutexId {
///
/// This method panics if the new dependency would introduce a cycle.
pub fn get_borrowed(&self) -> BorrowedMutex {
let creates_cycle = HELD_LOCKS.with(|locks| {
self.mark_held();
BorrowedMutex {
id: self,
_not_send: PhantomData,
}
}
/// Mark this lock as held for the purposes of dependency tracking.
///
/// # Panics
///
/// This method panics if the new dependency would introduce a cycle.
pub fn mark_held(&self) {
let opt_cycle = HELD_LOCKS.with(|locks| {
if let Some(&previous) = locks.borrow().last() {
let mut graph = get_dependency_graph();
!graph.add_edge(previous, self.value())
graph.add_edge(previous, self.value(), Dep::capture).err()
} else {
false
None
}
});
if creates_cycle {
// Panic without holding the lock to avoid needlessly poisoning it
panic!("Mutex order graph should not have cycles");
if let Some(cycle) = opt_cycle {
panic!("{}", Dep::panic_message(&cycle))
}
HELD_LOCKS.with(|locks| locks.borrow_mut().push(self.value()));
BorrowedMutex(self)
}
pub unsafe fn mark_released(&self) {
HELD_LOCKS.with(|locks| {
let mut locks = locks.borrow_mut();
for (i, &lock) in locks.iter().enumerate().rev() {
if lock == self.value() {
locks.remove(i);
return;
}
}
// Drop impls shouldn't panic but if this happens something is seriously broken.
unreachable!("Tried to drop lock for mutex {:?} but it wasn't held", self)
});
}
}
@@ -169,17 +221,13 @@ impl Drop for MutexId {
///
/// This type can be largely replaced once std::lazy gets stabilized.
struct LazyMutexId {
inner: UnsafeCell<MaybeUninit<MutexId>>,
setter: Once,
_marker: PhantomData<MutexId>,
inner: OnceLock<MutexId>,
}
impl LazyMutexId {
pub const fn new() -> Self {
Self {
inner: UnsafeCell::new(MaybeUninit::uninit()),
setter: Once::new(),
_marker: PhantomData,
inner: OnceLock::new(),
}
}
}
@@ -190,51 +238,36 @@ impl fmt::Debug for LazyMutexId {
}
}
/// Safety: the UnsafeCell is guaranteed to only be accessed mutably from a `Once`.
unsafe impl Sync for LazyMutexId {}
impl Default for LazyMutexId {
fn default() -> Self {
Self::new()
}
}
impl Deref for LazyMutexId {
type Target = MutexId;
fn deref(&self) -> &Self::Target {
self.setter.call_once(|| {
// Safety: this function is only called once, so only one mutable reference should exist
// at a time.
unsafe {
*self.inner.get() = MaybeUninit::new(MutexId::new());
}
});
// Safety: after the above Once runs, there are no longer any mutable references, so we can
// hand this out safely.
//
// Explanation of this monstrosity:
//
// - Get a pointer to the data from the UnsafeCell
// - Dereference that to get a reference to the underlying MaybeUninit
// - Use as_ptr on MaybeUninit to get a pointer to the initialized MutexID
// - Dereference the pointer to turn in into a reference as intended.
//
// This should get slightly nicer once `maybe_uninit_extra` is stabilized.
unsafe { &*((*self.inner.get()).as_ptr()) }
}
}
impl Drop for LazyMutexId {
fn drop(&mut self) {
if self.setter.is_completed() {
// We have a valid mutex ID and need to drop it
// Safety: we know that this pointer is valid because the initializer has successfully run.
let mutex_id = unsafe { ptr::read((*self.inner.get()).as_ptr()) };
drop(mutex_id);
}
self.inner.get_or_init(MutexId::new)
}
}
/// Borrowed mutex ID
///
/// This type should be used as part of a mutex guard wrapper. It can be acquired through
/// [`MutexId::get_borrowed`] and will automatically mark the mutex as not borrowed when it is
/// dropped.
///
/// This type intentionally is [`!Send`](std::marker::Send) because the ownership tracking is based
/// on a thread-local stack which doesn't work if a guard gets released in a different thread from
/// where they're acquired.
#[derive(Debug)]
struct BorrowedMutex<'a>(&'a MutexId);
struct BorrowedMutex<'a> {
/// Reference to the mutex we're borrowing from
id: &'a MutexId,
/// This value serves no purpose but to make the type [`!Send`](std::marker::Send)
_not_send: PhantomData<MutexGuard<'static, ()>>,
}
/// Drop a lock held by the current thread.
///
@@ -244,33 +277,26 @@ struct BorrowedMutex<'a>(&'a MutexId);
/// that is an indication of a serious design flaw in this library.
impl<'a> Drop for BorrowedMutex<'a> {
fn drop(&mut self) {
let id = self.0;
HELD_LOCKS.with(|locks| {
let mut locks = locks.borrow_mut();
for (i, &lock) in locks.iter().enumerate().rev() {
if lock == id.value() {
locks.remove(i);
return;
}
}
// Drop impls shouldn't panic but if this happens something is seriously broken.
unreachable!("Tried to drop lock for mutex {:?} but it wasn't held", id)
});
// Safety: the only way to get a BorrowedMutex is by locking the mutex.
unsafe { self.id.mark_released() };
}
}
/// Get a reference to the current dependency graph
fn get_dependency_graph() -> impl DerefMut<Target = DiGraph<usize>> {
fn get_dependency_graph() -> impl DerefMut<Target = DiGraph<usize, Dep>> {
static DEPENDENCY_GRAPH: OnceLock<Mutex<DiGraph<usize, Dep>>> = OnceLock::new();
DEPENDENCY_GRAPH
.get_or_init(Default::default)
.lock()
.unwrap_or_else(PoisonError::into_inner)
}
#[cfg(test)]
mod tests {
use rand::seq::SliceRandom;
use rand::thread_rng;
use super::*;
#[test]
@@ -289,11 +315,11 @@ mod tests {
let c = LazyMutexId::new();
let mut graph = get_dependency_graph();
assert!(graph.add_edge(a.value(), b.value()));
assert!(graph.add_edge(b.value(), c.value()));
assert!(graph.add_edge(a.value(), b.value(), Dep::capture).is_ok());
assert!(graph.add_edge(b.value(), c.value(), Dep::capture).is_ok());
// Creating an edge c → a should fail as it introduces a cycle.
assert!(!graph.add_edge(c.value(), a.value()));
assert!(graph.add_edge(c.value(), a.value(), Dep::capture).is_err());
// Drop graph handle so we can drop vertices without deadlocking
drop(graph);
@@ -301,6 +327,50 @@ mod tests {
drop(b);
// If b's destructor correctly ran correctly we can now add an edge from c to a.
assert!(get_dependency_graph().add_edge(c.value(), a.value()));
assert!(get_dependency_graph()
.add_edge(c.value(), a.value(), Dep::capture)
.is_ok());
}
/// Test creating a cycle, then panicking.
#[test]
#[should_panic]
fn test_mutex_id_conflict() {
let ids = [MutexId::new(), MutexId::new(), MutexId::new()];
for i in 0..3 {
let _first_lock = ids[i].get_borrowed();
let _second_lock = ids[(i + 1) % 3].get_borrowed();
}
}
/// Fuzz the global dependency graph by fake-acquiring lots of mutexes in a valid order.
///
/// This test generates all possible forward edges in a 100-node graph consisting of natural
/// numbers, shuffles them, then adds them to the graph. This will always be a valid directed,
/// acyclic graph because there is a trivial order (the natural numbers) but because the edges
/// are added in a random order the DiGraph will still occassionally need to reorder nodes.
#[test]
fn fuzz_mutex_id() {
const NUM_NODES: usize = 100;
let ids: Vec<MutexId> = (0..NUM_NODES).map(|_| Default::default()).collect();
let mut edges = Vec::with_capacity(NUM_NODES * NUM_NODES);
for i in 0..NUM_NODES {
for j in i..NUM_NODES {
if i != j {
edges.push((i, j));
}
}
}
edges.shuffle(&mut thread_rng());
for (x, y) in edges {
// Acquire the mutexes, smallest first to ensure a cycle-free graph
let _ignored = ids[x].get_borrowed();
let _ = ids[y].get_borrowed();
}
}
}

348
src/lockapi.rs Normal file
View File

@@ -0,0 +1,348 @@
//! Wrapper implementations for [`lock_api`].
//!
//! This module does not provide any particular mutex implementation by itself, but rather can be
//! used to add dependency tracking to mutexes that already exist. It implements all of the traits
//! in `lock_api` based on the one it wraps. Crates such as `spin` and `parking_lot` provide base
//! primitives that can be wrapped.
//!
//! Wrapped mutexes are at least one `usize` larger than the types they wrapped, and must be aligned
//! to `usize` boundaries. As such, libraries with many mutexes may want to consider the additional
//! required memory.
use lock_api::GuardNoSend;
use lock_api::RawMutex;
use lock_api::RawMutexFair;
use lock_api::RawMutexTimed;
use lock_api::RawRwLock;
use lock_api::RawRwLockDowngrade;
use lock_api::RawRwLockFair;
use lock_api::RawRwLockRecursive;
use lock_api::RawRwLockRecursiveTimed;
use lock_api::RawRwLockTimed;
use lock_api::RawRwLockUpgrade;
use lock_api::RawRwLockUpgradeDowngrade;
use lock_api::RawRwLockUpgradeFair;
use lock_api::RawRwLockUpgradeTimed;
use crate::LazyMutexId;
/// Tracing wrapper for all [`lock_api`] traits.
///
/// This wrapper implements any of the locking traits available, given that the wrapped type
/// implements them. As such, this wrapper can be used both for normal mutexes and rwlocks.
#[derive(Debug, Default)]
pub struct TracingWrapper<T> {
inner: T,
// Need to use a lazy mutex ID to intialize statically.
id: LazyMutexId,
}
impl<T> TracingWrapper<T> {
/// Mark this lock as held in the dependency graph.
fn mark_held(&self) {
self.id.mark_held();
}
/// Mark this lock as released in the dependency graph.
///
/// # Safety
///
/// This function should only be called when the lock has been previously acquired by this
/// thread.
unsafe fn mark_released(&self) {
self.id.mark_released();
}
/// First mark ourselves as held, then call the locking function.
fn lock(&self, f: impl FnOnce()) {
self.mark_held();
f();
}
/// First call the unlocking function, then mark ourselves as realeased.
unsafe fn unlock(&self, f: impl FnOnce()) {
f();
self.mark_released();
}
/// Conditionally lock the mutex.
///
/// First acquires the lock, then runs the provided function. If that function returns true,
/// then the lock is kept, otherwise the mutex is immediately marked as relased.
///
/// # Returns
///
/// The value returned from the callback.
fn conditionally_lock(&self, f: impl FnOnce() -> bool) -> bool {
// Mark as locked while we try to do the thing
self.mark_held();
if f() {
true
} else {
// Safety: we just locked it above.
unsafe { self.mark_released() }
false
}
}
}
unsafe impl<T> RawMutex for TracingWrapper<T>
where
T: RawMutex,
{
const INIT: Self = Self {
inner: T::INIT,
id: LazyMutexId::new(),
};
/// Always equal to [`GuardNoSend`], as an implementation detail in the tracking system requires
/// this behaviour. May change in the future to reflect the actual guard type from the wrapped
/// primitive.
type GuardMarker = GuardNoSend;
fn lock(&self) {
self.lock(|| self.inner.lock());
}
fn try_lock(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock())
}
unsafe fn unlock(&self) {
self.unlock(|| self.inner.unlock());
}
fn is_locked(&self) -> bool {
// Can't use the default implementation as the inner type might've overwritten it.
self.inner.is_locked()
}
}
unsafe impl<T> RawMutexFair for TracingWrapper<T>
where
T: RawMutexFair,
{
unsafe fn unlock_fair(&self) {
self.unlock(|| self.inner.unlock_fair())
}
unsafe fn bump(&self) {
// Bumping effectively doesn't change which locks are held, so we don't need to manage the
// lock state.
self.inner.bump();
}
}
unsafe impl<T> RawMutexTimed for TracingWrapper<T>
where
T: RawMutexTimed,
{
type Duration = T::Duration;
type Instant = T::Instant;
fn try_lock_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_for(timeout))
}
fn try_lock_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_until(timeout))
}
}
unsafe impl<T> RawRwLock for TracingWrapper<T>
where
T: RawRwLock,
{
const INIT: Self = Self {
inner: T::INIT,
id: LazyMutexId::new(),
};
/// Always equal to [`GuardNoSend`], as an implementation detail in the tracking system requires
/// this behaviour. May change in the future to reflect the actual guard type from the wrapped
/// primitive.
type GuardMarker = GuardNoSend;
fn lock_shared(&self) {
self.lock(|| self.inner.lock_shared());
}
fn try_lock_shared(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared())
}
unsafe fn unlock_shared(&self) {
self.unlock(|| self.inner.unlock_shared());
}
fn lock_exclusive(&self) {
self.lock(|| self.inner.lock_exclusive());
}
fn try_lock_exclusive(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock_exclusive())
}
unsafe fn unlock_exclusive(&self) {
self.unlock(|| self.inner.unlock_exclusive());
}
fn is_locked(&self) -> bool {
self.inner.is_locked()
}
}
unsafe impl<T> RawRwLockDowngrade for TracingWrapper<T>
where
T: RawRwLockDowngrade,
{
unsafe fn downgrade(&self) {
// Downgrading does not require tracking
self.inner.downgrade()
}
}
unsafe impl<T> RawRwLockUpgrade for TracingWrapper<T>
where
T: RawRwLockUpgrade,
{
fn lock_upgradable(&self) {
self.lock(|| self.inner.lock_upgradable());
}
fn try_lock_upgradable(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock_upgradable())
}
unsafe fn unlock_upgradable(&self) {
self.unlock(|| self.inner.unlock_upgradable());
}
unsafe fn upgrade(&self) {
self.inner.upgrade();
}
unsafe fn try_upgrade(&self) -> bool {
self.inner.try_upgrade()
}
}
unsafe impl<T> RawRwLockFair for TracingWrapper<T>
where
T: RawRwLockFair,
{
unsafe fn unlock_shared_fair(&self) {
self.unlock(|| self.inner.unlock_shared_fair());
}
unsafe fn unlock_exclusive_fair(&self) {
self.unlock(|| self.inner.unlock_exclusive_fair());
}
unsafe fn bump_shared(&self) {
self.inner.bump_shared();
}
unsafe fn bump_exclusive(&self) {
self.inner.bump_exclusive();
}
}
unsafe impl<T> RawRwLockRecursive for TracingWrapper<T>
where
T: RawRwLockRecursive,
{
fn lock_shared_recursive(&self) {
self.lock(|| self.inner.lock_shared_recursive());
}
fn try_lock_shared_recursive(&self) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_recursive())
}
}
unsafe impl<T> RawRwLockRecursiveTimed for TracingWrapper<T>
where
T: RawRwLockRecursiveTimed,
{
fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_recursive_for(timeout))
}
fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_recursive_until(timeout))
}
}
unsafe impl<T> RawRwLockTimed for TracingWrapper<T>
where
T: RawRwLockTimed,
{
type Duration = T::Duration;
type Instant = T::Instant;
fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_for(timeout))
}
fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_shared_until(timeout))
}
fn try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_exclusive_for(timeout))
}
fn try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_exclusive_until(timeout))
}
}
unsafe impl<T> RawRwLockUpgradeDowngrade for TracingWrapper<T>
where
T: RawRwLockUpgradeDowngrade,
{
unsafe fn downgrade_upgradable(&self) {
self.inner.downgrade_upgradable()
}
unsafe fn downgrade_to_upgradable(&self) {
self.inner.downgrade_to_upgradable()
}
}
unsafe impl<T> RawRwLockUpgradeFair for TracingWrapper<T>
where
T: RawRwLockUpgradeFair,
{
unsafe fn unlock_upgradable_fair(&self) {
self.unlock(|| self.inner.unlock_upgradable_fair())
}
unsafe fn bump_upgradable(&self) {
self.inner.bump_upgradable()
}
}
unsafe impl<T> RawRwLockUpgradeTimed for TracingWrapper<T>
where
T: RawRwLockUpgradeTimed,
{
fn try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool {
self.conditionally_lock(|| self.inner.try_lock_upgradable_for(timeout))
}
fn try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool {
self.conditionally_lock(|| self.inner.try_lock_upgradable_until(timeout))
}
unsafe fn try_upgrade_for(&self, timeout: Self::Duration) -> bool {
self.inner.try_upgrade_for(timeout)
}
unsafe fn try_upgrade_until(&self, timeout: Self::Instant) -> bool {
self.inner.try_upgrade_until(timeout)
}
}

236
src/parkinglot.rs Normal file
View File

@@ -0,0 +1,236 @@
//! Wrapper types and type aliases for tracing [`parking_lot`] mutexes.
//!
//! This module provides type aliases that use the [`lockapi`][crate::lockapi] module to provide
//! tracing variants of the `parking_lot` primitives. The [`tracing`] module contains type aliases
//! that use dependency tracking, while the main `parking_lot` primitives are reexported as [`raw`].
//!
//! This main module imports from [`tracing`] when `debug_assertions` are enabled, and from [`raw`]
//! when they're not. Note that primitives for which no tracing wrapper exists are not imported into
//! the main module.
//!
//! # Usage
//!
//! ```
//! # use std::sync::Arc;
//! # use std::thread;
//! use tracing_mutex::parkinglot::Mutex;
//! let mutex = Arc::new(Mutex::new(0));
//!
//! let handles: Vec<_> = (0..10).map(|_| {
//! let mutex = Arc::clone(&mutex);
//! thread::spawn(move || *mutex.lock() += 1)
//! }).collect();
//!
//! handles.into_iter().for_each(|handle| handle.join().unwrap());
//!
//! // All threads completed so the value should be 10.
//! assert_eq!(10, *mutex.lock());
//! ```
//!
//! # Limitations
//!
//! The main lock for the global state is still provided by `std::sync` and the tracing primitives
//! are larger than the `parking_lot` primitives they wrap, so there can be a performance
//! degradation between using this and using `parking_lot` directly. If this is of concern to you,
//! try using the `DebugX`-structs, which provide cycle detection only when `debug_assertions` are
//! enabled and have no overhead when they're not.
//!
//! In addition, the mutex guards returned by the tracing wrappers are `!Send`, regardless of
//! whether `parking_lot` is configured to have `Send` mutex guards. This is a limitation of the
//! current bookkeeping system.
pub use parking_lot as raw;
#[cfg(debug_assertions)]
pub use tracing::{
FairMutex, FairMutexGuard, MappedFairMutexGuard, MappedMutexGuard, MappedReentrantMutexGuard,
MappedRwLockReadGuard, MappedRwLockWriteGuard, Mutex, MutexGuard, Once, OnceState,
ReentrantMutex, ReentrantMutexGuard, RwLock, RwLockReadGuard, RwLockUpgradableReadGuard,
RwLockWriteGuard,
};
#[cfg(not(debug_assertions))]
pub use parking_lot::{
FairMutex, FairMutexGuard, MappedFairMutexGuard, MappedMutexGuard, MappedReentrantMutexGuard,
MappedRwLockReadGuard, MappedRwLockWriteGuard, Mutex, MutexGuard, Once, OnceState,
ReentrantMutex, ReentrantMutexGuard, RwLock, RwLockReadGuard, RwLockUpgradableReadGuard,
RwLockWriteGuard,
};
/// Dependency tracing wrappers for [`parking_lot`].
pub mod tracing {
pub use parking_lot::OnceState;
use crate::lockapi::TracingWrapper;
use crate::LazyMutexId;
type RawFairMutex = TracingWrapper<parking_lot::RawFairMutex>;
type RawMutex = TracingWrapper<parking_lot::RawMutex>;
type RawRwLock = TracingWrapper<parking_lot::RawRwLock>;
/// Dependency tracking fair mutex. See: [`parking_lot::FairMutex`].
pub type FairMutex<T> = lock_api::Mutex<RawFairMutex, T>;
/// Mutex guard for [`FairMutex`].
pub type FairMutexGuard<'a, T> = lock_api::MutexGuard<'a, RawFairMutex, T>;
/// RAII guard for [`FairMutexGuard::map`].
pub type MappedFairMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawFairMutex, T>;
/// Dependency tracking mutex. See: [`parking_lot::Mutex`].
pub type Mutex<T> = lock_api::Mutex<RawMutex, T>;
/// Mutex guard for [`Mutex`].
pub type MutexGuard<'a, T> = lock_api::MutexGuard<'a, RawMutex, T>;
/// RAII guard for [`MutexGuard::map`].
pub type MappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawMutex, T>;
/// Dependency tracking reentrant mutex. See: [`parking_lot::ReentrantMutex`].
///
/// **Note:** due to the way dependencies are tracked, this mutex can only be acquired directly
/// after itself. Acquiring any other mutex in between introduces a dependency cycle, and will
/// therefore be rejected.
pub type ReentrantMutex<T> = lock_api::ReentrantMutex<RawMutex, parking_lot::RawThreadId, T>;
/// Mutex guard for [`ReentrantMutex`].
pub type ReentrantMutexGuard<'a, T> =
lock_api::ReentrantMutexGuard<'a, RawMutex, parking_lot::RawThreadId, T>;
/// RAII guard for `ReentrantMutexGuard::map`.
pub type MappedReentrantMutexGuard<'a, T> =
lock_api::MappedReentrantMutexGuard<'a, RawMutex, parking_lot::RawThreadId, T>;
/// Dependency tracking RwLock. See: [`parking_lot::RwLock`].
pub type RwLock<T> = lock_api::RwLock<RawRwLock, T>;
/// Read guard for [`RwLock`].
pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, RawRwLock, T>;
/// Upgradable Read guard for [`RwLock`].
pub type RwLockUpgradableReadGuard<'a, T> =
lock_api::RwLockUpgradableReadGuard<'a, RawRwLock, T>;
/// Write guard for [`RwLock`].
pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, RawRwLock, T>;
/// RAII guard for `RwLockReadGuard::map`.
pub type MappedRwLockReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, RawRwLock, T>;
/// RAII guard for `RwLockWriteGuard::map`.
pub type MappedRwLockWriteGuard<'a, T> = lock_api::MappedRwLockWriteGuard<'a, RawRwLock, T>;
/// A dependency-tracking wrapper for [`parking_lot::Once`].
#[derive(Debug, Default)]
pub struct Once {
inner: parking_lot::Once,
id: LazyMutexId,
}
impl Once {
/// Create a new `Once` value.
pub const fn new() -> Self {
Self {
inner: parking_lot::Once::new(),
id: LazyMutexId::new(),
}
}
/// Returns the current state of this `Once`.
pub fn state(&self) -> OnceState {
self.inner.state()
}
/// This call is considered as "locking this `Once`" and it participates in dependency
/// tracking as such.
///
/// # Panics
///
/// This method will panic if `f` panics, poisoning this `Once`. In addition, this function
/// panics when the lock acquisition order is determined to be inconsistent.
pub fn call_once(&self, f: impl FnOnce()) {
let _borrow = self.id.get_borrowed();
self.inner.call_once(f);
}
/// Performs the given initialization routine once and only once.
///
/// This method is identical to [`Once::call_once`] except it ignores poisoning.
pub fn call_once_force(&self, f: impl FnOnce(OnceState)) {
let _borrow = self.id.get_borrowed();
self.inner.call_once_force(f);
}
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use std::thread;
use super::tracing;
#[test]
fn test_mutex_usage() {
let mutex = Arc::new(tracing::Mutex::new(()));
let local_lock = mutex.lock();
drop(local_lock);
thread::spawn(move || {
let _remote_lock = mutex.lock();
})
.join()
.unwrap();
}
#[test]
#[should_panic]
fn test_mutex_conflict() {
let mutexes = [
tracing::Mutex::new(()),
tracing::Mutex::new(()),
tracing::Mutex::new(()),
];
for i in 0..3 {
let _first_lock = mutexes[i].lock();
let _second_lock = mutexes[(i + 1) % 3].lock();
}
}
#[test]
fn test_rwlock_usage() {
let lock = Arc::new(tracing::RwLock::new(()));
let lock2 = Arc::clone(&lock);
let _read_lock = lock.read();
// Should be able to acquire lock in the background
thread::spawn(move || {
let _read_lock = lock2.read();
})
.join()
.unwrap();
}
#[test]
fn test_rwlock_upgradable_read_usage() {
let lock = tracing::RwLock::new(());
// Should be able to acquire an upgradable read lock.
let upgradable_guard: tracing::RwLockUpgradableReadGuard<'_, _> = lock.upgradable_read();
// Should be able to upgrade the guard.
let _write_guard: tracing::RwLockWriteGuard<'_, _> =
tracing::RwLockUpgradableReadGuard::upgrade(upgradable_guard);
}
#[test]
fn test_once_usage() {
let once = Arc::new(tracing::Once::new());
let once_clone = once.clone();
assert!(!once_clone.state().done());
let handle = thread::spawn(move || {
assert!(!once_clone.state().done());
once_clone.call_once(|| {});
assert!(once_clone.state().done());
});
handle.join().unwrap();
assert!(once.state().done());
}
}

64
src/reporting.rs Normal file
View File

@@ -0,0 +1,64 @@
//! Cycle reporting primitives
//!
//! This module exposes [`Dep`], which resolves to either something that tracks dependencies or to
//! something that doesn't. It should only be assumed to implement the [`Reportable`] trait.
use std::backtrace::Backtrace;
use std::borrow::Cow;
use std::fmt::Write;
use std::sync::Arc;
#[cfg(feature = "backtraces")]
pub type Dep = MutexDep<Arc<Backtrace>>;
#[cfg(not(feature = "backtraces"))]
pub type Dep = MutexDep<()>;
// Base message to be reported when cycle is detected
const BASE_MESSAGE: &str = "Found cycle in mutex dependency graph:";
pub trait Reportable: Clone {
/// Capture the current state
fn capture() -> Self;
/// Format a trace of state for human readable consumption.
fn panic_message(trace: &[Self]) -> Cow<'static, str>;
}
#[derive(Clone)]
pub struct MutexDep<T>(T);
/// Use a unit as tracing data: no tracing.
///
/// This should have no runtime overhead for capturing traces and should therefore be cheap enough
/// for most purposes.
impl Reportable for MutexDep<()> {
fn capture() -> Self {
Self(())
}
fn panic_message(_trace: &[Self]) -> Cow<'static, str> {
Cow::Borrowed(BASE_MESSAGE)
}
}
/// Use a full backtrace as tracing data
///
/// Capture the entire backtrace which may be expensive. This implementation does not force capture
/// in the event that backtraces are disabled at runtime, so the exact overhead can still be
/// controlled a little.
///
/// N.B. the [`Backtrace`] needs to be wrapped in an Arc as backtraces are not [`Clone`].
impl Reportable for MutexDep<Arc<Backtrace>> {
fn capture() -> Self {
Self(Arc::new(Backtrace::capture()))
}
fn panic_message(trace: &[Self]) -> Cow<'static, str> {
let mut message = format!("{BASE_MESSAGE}\n");
for entry in trace {
let _ = writeln!(message, "{}", entry.0);
}
message.into()
}
}

File diff suppressed because it is too large Load Diff