46 Commits

Author SHA1 Message Date
bors[bot]
43df59ac1c Merge #33
33: Prepare for release 0.3.0 r=bertptrs a=bertptrs

Double check

- [x] documentation
- [x] changelog
- [x] tests


Co-authored-by: Bert Peters <bert@bertptrs.nl>
2023-09-13 10:03:25 +00:00
1fe44d0a05 Expand changelog 2023-09-11 08:27:06 +02:00
c9083c8bc1 Clarify feature selection 2023-09-09 12:02:52 +02:00
bors[bot]
d8c559fd3f Merge #32
32: Capture backtraces for mutex dependencies r=bertptrs a=bertptrs

Builds on top of #28.

This PR adds backtrace data to the dependency graph, so you can figure out what series of events might have introduced the cycle in dependencies. Only the first backtrace

These changes do have a performance penalty, with a worst case of 20-50% degradation over previous results. This applies to the worst case scenario where every dependency between mutexes is new and thus is unlikely to be as severe.

Below is an example of what this can look like, generated with `examples/mutex_cycle.rs`. The formatting is decidedly suboptimal but backtraces cannot be formatted very well in stable rust at the moment. The exact performance hit depends on a lot of things, such as the level of backtraces captured (off, 1, or full), and how many dependencies are involved.

```
thread 'main' panicked at 'Found cycle in mutex dependency graph:
   0: tracing_mutex::MutexDep::capture
             at ./src/lib.rs:278:23
   1: core::ops::function::FnOnce::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:250:5
   2: tracing_mutex::graph::DiGraph<V,E>::add_edge
             at ./src/graph.rs:131:50
   3: tracing_mutex::MutexId::mark_held::{{closure}}
             at ./src/lib.rs:146:17
   4: std:🧵:local::LocalKey<T>::try_with
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/thread/local.rs:270:16
   5: std:🧵:local::LocalKey<T>::with
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/thread/local.rs:246:9
   6: tracing_mutex::MutexId::mark_held
             at ./src/lib.rs:142:25
   7: tracing_mutex::MutexId::get_borrowed
             at ./src/lib.rs:129:9
   8: tracing_mutex::stdsync::tracing::Mutex<T>::lock
             at ./src/stdsync.rs:110:25
   9: mutex_cycle::main
             at ./examples/mutex_cycle.rs:20:18
  10: core::ops::function::FnOnce::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:250:5
  11: std::sys_common::backtrace::__rust_begin_short_backtrace
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/sys_common/backtrace.rs:135:18
  12: std::rt::lang_start::{{closure}}
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:166:18
  13: core::ops::function::impls::<impl core::ops::function::FnOnce<A> for &F>::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:284:13
  14: std::panicking::try::do_call
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:500:40
  15: std::panicking::try
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:464:19
  16: std::panic::catch_unwind
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panic.rs:142:14
  17: std::rt::lang_start_internal::{{closure}}
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:148:48
  18: std::panicking::try::do_call
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:500:40
  19: std::panicking::try
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:464:19
  20: std::panic::catch_unwind
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panic.rs:142:14
  21: std::rt::lang_start_internal
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:148:20
  22: std::rt::lang_start
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:165:17
  23: main
  24: <unknown>
  25: __libc_start_main
  26: _start

   0: tracing_mutex::MutexDep::capture
             at ./src/lib.rs:278:23
   1: core::ops::function::FnOnce::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:250:5
   2: tracing_mutex::graph::DiGraph<V,E>::add_edge
             at ./src/graph.rs:131:50
   3: tracing_mutex::MutexId::mark_held::{{closure}}
             at ./src/lib.rs:146:17
   4: std:🧵:local::LocalKey<T>::try_with
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/thread/local.rs:270:16
   5: std:🧵:local::LocalKey<T>::with
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/thread/local.rs:246:9
   6: tracing_mutex::MutexId::mark_held
             at ./src/lib.rs:142:25
   7: tracing_mutex::MutexId::get_borrowed
             at ./src/lib.rs:129:9
   8: tracing_mutex::stdsync::tracing::Mutex<T>::lock
             at ./src/stdsync.rs:110:25
   9: mutex_cycle::main
             at ./examples/mutex_cycle.rs:14:18
  10: core::ops::function::FnOnce::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:250:5
  11: std::sys_common::backtrace::__rust_begin_short_backtrace
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/sys_common/backtrace.rs:135:18
  12: std::rt::lang_start::{{closure}}
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:166:18
  13: core::ops::function::impls::<impl core::ops::function::FnOnce<A> for &F>::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:284:13
  14: std::panicking::try::do_call
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:500:40
  15: std::panicking::try
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:464:19
  16: std::panic::catch_unwind
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panic.rs:142:14
  17: std::rt::lang_start_internal::{{closure}}
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:148:48
  18: std::panicking::try::do_call
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:500:40
  19: std::panicking::try
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:464:19
  20: std::panic::catch_unwind
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panic.rs:142:14
  21: std::rt::lang_start_internal
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:148:20
  22: std::rt::lang_start
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:165:17
  23: main
  24: <unknown>
  25: __libc_start_main
  26: _start

', src/lib.rs:163:13
stack backtrace:
   0: rust_begin_unwind
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:593:5
   1: core::panicking::panic_fmt
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/panicking.rs:67:14
   2: tracing_mutex::MutexId::mark_held
             at ./src/lib.rs:163:13
   3: tracing_mutex::MutexId::get_borrowed
             at ./src/lib.rs:129:9
   4: tracing_mutex::stdsync::tracing::Mutex<T>::lock
             at ./src/stdsync.rs:110:25
   5: mutex_cycle::main
             at ./examples/mutex_cycle.rs:25:14
   6: core::ops::function::FnOnce::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:250:5
```

Importantly, the error shows all the dependencies that are already part of the graph, not the one that was just added, since that is already visible from the immediate panic.

Co-authored-by: Bert Peters <bert@bertptrs.nl>
2023-09-09 09:24:46 +00:00
a8e8af6351 Make dependency tracking a compile time setting 2023-09-09 11:21:22 +02:00
068303d81d Show cycle backtraces when they happen 2023-09-09 11:21:22 +02:00
6be3e05cab Capture backtraces of allocations for debugging
Largely based on https://github.com/bertptrs/tracing-mutex/pull/28 with
only minor modifications.
2023-08-27 16:44:02 +02:00
909e934572 Reuse dependency orderings in graph
This avoids a potential panic when adding new nodes to the graph, as
there is no feasible way to overflow IDs any more.
2023-08-27 15:48:57 +02:00
bors[bot]
0ae544a07a Merge #31
31: Update CI dependencies r=bertptrs a=bertptrs

actions-rs uses deprecated features, move to dtolney for the toolchain and just use regular run commands for everything else

Co-authored-by: Bert Peters <bert@bertptrs.nl>
2023-08-27 12:39:35 +00:00
4148d509bf Update CI dependencies
actions-rs uses deprecated features, move to dtolney for the toolchain
and just use regular run commands for everything else
2023-08-27 14:35:40 +02:00
fc1593b76f Bump criterion version
0.3 uses some Rust features that will not work in the future, so upgrade
while we're moving thigns anyway
2023-08-27 14:25:14 +02:00
8f19921e9e Merge pull request #30 from bertptrs/remove-lazy-static 2023-08-27 11:26:58 +02:00
00420d6807 Implement wrapper for OnceLock 2023-08-26 00:58:54 +02:00
49b15bb6bd Bump MSRV to 1.70 2023-08-26 00:58:54 +02:00
29c9daf53e Replace dependency on lazy-static with OnceLock 2023-08-25 08:44:45 +02:00
bors[bot]
8feedb09d2 Merge #27
27: Add MSRV of 1.63 r=bertptrs a=bertptrs

Fixes #26.

Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-08-29 06:34:17 +00:00
de9888a102 Update documentation with MSRV 2022-08-29 08:32:17 +02:00
2d2e03eede Simplify lazy mutex ID drop 2022-08-29 08:26:12 +02:00
e9b577a0f5 Make stdsync wrappers const-constructible 2022-08-27 10:33:15 +02:00
5f6823394d Build and test with Rust 1.63 2022-08-27 10:26:42 +02:00
bors[bot]
61d19f866c Merge #25
25: Restructure modules r=bertptrs a=bertptrs

The `TracingFoo`, `DebugFoo` versions of every `Foo` resulted in quite verbose types everywhere. This PR restructures them to separate modules. The new modules map onto the old types as follows:

- `tracing_mutex::foo::TracingFoo` -> `tracing_mutex::foo::tracing::Foo`
- `tracing_mutex::foo::DebugFoo` -> `tracing_mutex::foo::Foo`

Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-08-27 08:18:29 +00:00
f78969ebf7 Update documentation 2022-08-27 10:08:51 +02:00
56b0604448 Restructure parking_lot wrappers 2022-08-27 10:06:31 +02:00
6e5516eaa7 Restructure std::sync wrappers 2022-08-27 10:01:51 +02:00
764d3df454 Add parking_lot to changelog 2022-08-24 10:28:51 +02:00
bors[bot]
e543860d8b Merge #24
24: Update parking_lot dependency to 0.12 r=bertptrs a=djkoloski

The changelog for parking_lot 0.12 can be found [here](https://github.com/Amanieu/parking_lot/blob/master/CHANGELOG.md#parking_lot-0120-parking_lot_core-090-lock_api-046-2022-01-28):
```
- The MSRV is bumped to 1.49.0.
- Disabled eventual fairness on wasm32-unknown-unknown. (#302)
- Added a rwlock method to report if lock is held exclusively. (#303)
- Use new asm! macro. (#304)
- Use windows-rs instead of winapi for faster builds. (#311)
- Moved hardware lock elision support to a separate Cargo feature. (#313)
- Removed used of deprecated spin_loop_hint. (#314)
```

Co-authored-by: David Koloski <dkoloski@google.com>
2022-08-24 08:20:55 +00:00
David Koloski
ed04552af3 Update parking_lot dependency to 0.12 2022-08-23 11:34:31 -04:00
bors[bot]
c5a506436c Merge #23
23: Ensure `BorrowedMutex` is `!Send` r=bertptrs a=bertptrs

This should prevent the bugs found in #22.

Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-06-23 20:02:10 +00:00
33cb6014a3 Ensure BorrowedMutex is !Send 2022-06-23 21:54:25 +02:00
5232bac582 Bump version 2022-05-23 08:59:47 +02:00
bors[bot]
6472f4b807 Merge #21
21: Prepare for release v0.2.1 r=bertptrs a=bertptrs



Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-05-23 06:55:45 +00:00
6afe7b1c48 Update README and CHANGELOG 2022-05-23 08:53:56 +02:00
9238ef53ee Update copyright 2022-05-23 08:37:26 +02:00
bors[bot]
c08addff7d Merge #17
17: Fix typos r=bertptrs a=quisar



Co-authored-by: Benjamin Lerman <qsr@chromium.org>
2022-05-23 06:33:21 +00:00
bors[bot]
c1ce9df8ad Merge #19
19: Add a wrapper for `std::sync::Condvar` r=bertptrs a=bertptrs

This wrapper does not do any tracing itself but supports the use of a tracing mutex guard instead of an `std::sync` one.

Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-05-17 19:50:02 +00:00
312eaa8649 Add a wrapper for std::sync::Condvar
This wrapper does not do any tracing itself but supports the use of a
tracing mutex guard instead of an `std::sync` one.
2022-05-17 21:45:25 +02:00
bors[bot]
1f7e6921aa Merge #18
18: Enable bors for nicer merging r=bertptrs a=bertptrs



Co-authored-by: Bert Peters <bert@bertptrs.nl>
2022-05-15 21:40:40 +00:00
f7048f265f Enable CI builds on staging/trying 2022-05-15 23:35:00 +02:00
64e56fdb86 Add minimal bors config 2022-05-15 23:35:00 +02:00
Benjamin Lerman
8e3278fdd2 Fix typos 2022-05-10 10:30:20 +02:00
9ea993e737 Add missing date 2022-05-07 18:15:50 +02:00
062850fc3e Merge pull request #16 from bertptrs/docsrs_feature_docs
Fix documentation builds for features
2022-05-07 17:56:09 +02:00
0d2622d5c6 Build documentation on CI 2022-05-07 17:52:32 +02:00
d1417e0b0c Tag module docs with their required features 2022-05-07 17:52:32 +02:00
fcc64e2cef Automatically build documentation for all features 2022-05-07 17:03:45 +02:00
fd0d05307c Update README and copyright year 2022-05-07 16:54:37 +02:00
14 changed files with 1159 additions and 796 deletions

View File

@@ -2,48 +2,50 @@ on:
push: push:
branches: branches:
- master - master
- staging
- trying
pull_request: pull_request:
name: Continuous integration name: Continuous integration
jobs: jobs:
ci: tests:
name: Rust project name: Rust project
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
rust: rust:
- "1.70" # minimum stable rust version
- stable - stable
- beta - beta
- nightly - nightly
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1 - uses: dtolnay/rust-toolchain@v1
with: with:
profile: minimal
toolchain: ${{ matrix.rust }} toolchain: ${{ matrix.rust }}
override: true
components: rustfmt, clippy components: rustfmt, clippy
- uses: actions-rs/cargo@v1 - run: cargo build --all-features --all-targets
with: - run: cargo test --all-features
command: build - run: cargo fmt --all -- --check
# --all-targets ensures that we also build the benchmarks and tests already. - run: cargo clippy --all-features --all-targets -- -D warnings
args: --all-features --all-targets
- uses: actions-rs/cargo@v1 docs:
with: name: Documentation build
command: test runs-on: ubuntu-latest
args: --all-features
- uses: actions-rs/cargo@v1 steps:
with: - uses: actions/checkout@v3
command: fmt
args: --all -- --check
- uses: actions-rs/cargo@v1 - uses: dtolnay/rust-toolchain@v1
with: with:
command: clippy toolchain: nightly
args: --all-features --all-targets -- -D warnings
- name: Build documentation
env:
# Build the docs like docs.rs builds it
RUSTDOCFLAGS: --cfg docsrs
run: cargo doc --all-features

View File

@@ -6,7 +6,45 @@ adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased] ## [Unreleased]
## [0.2.0] ## [0.3.0] - 2023-09-09
### Added
- The minimum supported Rust version is now defined as 1.70. Previously it was undefined.
- Wrappers for `std::sync` primitives can now be `const` constructed.
- Add support for `std::sync::OnceLock`
- Added backtraces of mutex allocations to the cycle report. Capturing backtraces does incur some
overhead, this can be mitigated by disabling the `backtraces` feature which is enabled by default.
### Breaking
- Update [`parking_lot`][parking_lot] dependency to `0.12`.
- Restructured the crate to reduce typename verbosity. Wrapper names now match the name of the
primitive they wrap. Specific always/debug tracing versions have now moved to separate modules.
For example, `tracing_mutex::stdsync::TracingMutex` is now
`tracing_mutex::stdsync::tracing::Mutex`, and `tracing_mutex::stdsync::DebugMutex` is now called
`tracing_mutex::stdsync::Mutex`. This hopefully reduces the visual noise while reading code that
uses this in practice. Unwrapped primitives are reexported under `tracing_mutex::stdsync::raw` for
convenience.
### Fixed
- Enforce that all internal mutex guards are `!Send`. They already should be according to other
reasons, but this adds extra security through the type system.
## [0.2.1] - 2022-05-23
### Added
- Build [docs.rs] documentation with all features enabled for completeness.
- Add support for `std::sync::Condvar`
### Fixed
- The `parkinglot` module is now correctly enabled by the `parkinglot` feature rather than the
`lockapi` feature.
## [0.2.0] - 2022-05-07
### Added ### Added
- Generic support for wrapping mutexes that implement the traits provided by the - Generic support for wrapping mutexes that implement the traits provided by the
@@ -55,11 +93,14 @@ adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
Initial release. Initial release.
[Unreleased]: https://github.com/bertptrs/tracing-mutex/compare/v0.2.0...HEAD [Unreleased]: https://github.com/bertptrs/tracing-mutex/compare/v0.3.0...HEAD
[0.3.0]: https://github.com/bertptrs/tracing-mutex/compare/v0.2.1...v0.3.0
[0.2.1]: https://github.com/bertptrs/tracing-mutex/compare/v0.2.0...v0.2.1
[0.2.0]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.2...v0.2.0 [0.2.0]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.2...v0.2.0
[0.1.2]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.1...v0.1.2 [0.1.2]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.1...v0.1.2
[0.1.1]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.0...v0.1.1 [0.1.1]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.0...v0.1.1
[0.1.0]: https://github.com/bertptrs/tracing-mutex/releases/tag/v0.1.0 [0.1.0]: https://github.com/bertptrs/tracing-mutex/releases/tag/v0.1.0
[docs.rs]: https://docs.rs/tracing-mutex/latest/tracing_mutex/
[lock_api]: https://docs.rs/lock_api/ [lock_api]: https://docs.rs/lock_api/
[parking_lot]: https://docs.rs/parking_lot/ [parking_lot]: https://docs.rs/parking_lot/

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tracing-mutex" name = "tracing-mutex"
version = "0.2.0" version = "0.3.0"
authors = ["Bert Peters <bert@bertptrs.nl>"] authors = ["Bert Peters <bert@bertptrs.nl>"]
edition = "2021" edition = "2021"
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
@@ -10,14 +10,20 @@ keywords = ["mutex", "rwlock", "once", "thread"]
description = "Ensure deadlock-free mutexes by allocating in order, or else." description = "Ensure deadlock-free mutexes by allocating in order, or else."
readme = "README.md" readme = "README.md"
repository = "https://github.com/bertptrs/tracing-mutex" repository = "https://github.com/bertptrs/tracing-mutex"
rust-version = "1.70"
[package.metadata.docs.rs]
# Build docs for all features so the documentation is more complete
all-features = true
# Set custom cfg so we can enable docs.rs magic
rustdoc-args = ["--cfg", "docsrs"]
[dependencies] [dependencies]
lazy_static = "1"
lock_api = { version = "0.4", optional = true } lock_api = { version = "0.4", optional = true }
parking_lot = { version = "0.11", optional = true } parking_lot = { version = "0.12", optional = true }
[dev-dependencies] [dev-dependencies]
criterion = "0.3" criterion = "0.5"
rand = "0.8" rand = "0.8"
[[bench]] [[bench]]
@@ -25,6 +31,8 @@ name = "mutex"
harness = false harness = false
[features] [features]
default = ["backtraces"]
backtraces = []
# Feature names do not match crate names pending namespaced features. # Feature names do not match crate names pending namespaced features.
lockapi = ["lock_api"] lockapi = ["lock_api"]
parkinglot = ["parking_lot", "lockapi"] parkinglot = ["parking_lot", "lockapi"]

View File

@@ -186,7 +186,7 @@ APPENDIX: How to apply the Apache License to your work.
same "printed page" as the copyright notice for easier same "printed page" as the copyright notice for easier
identification within third-party archives. identification within third-party archives.
Copyright [yyyy] [name of copyright owner] Copyright 2022 Bert Peters
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -1,4 +1,4 @@
Copyright © 2021 Bert Peters Copyright © 2022 Bert Peters
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the “Software”), to deal in the Software without restriction, associated documentation files (the “Software”), to deal in the Software without restriction,

View File

@@ -23,10 +23,12 @@ tree out of it, and panics if your dependencies would create a cycle. It provide
existing synchronization primitives with an identical API, and should be a drop-in replacement. existing synchronization primitives with an identical API, and should be a drop-in replacement.
Inspired by [this blogpost][whileydave], which references a similar behaviour implemented by Inspired by [this blogpost][whileydave], which references a similar behaviour implemented by
[Abseil][abseil-mutex] for their mutexes. [Abseil][abseil-mutex] for their mutexes. [This article goes into more depth on the exact
implementation.][article]
[whileydave]: https://whileydave.com/2020/12/19/dynamic-cycle-detection-for-lock-ordering/ [whileydave]: https://whileydave.com/2020/12/19/dynamic-cycle-detection-for-lock-ordering/
[abseil-mutex]: https://abseil.io/docs/cpp/guides/synchronization [abseil-mutex]: https://abseil.io/docs/cpp/guides/synchronization
[article]: https://bertptrs.nl/2022/06/23/deadlock-free-mutexes-and-directed-acyclic-graphs.html
## Usage ## Usage
@@ -34,7 +36,7 @@ Add this dependency to your `Cargo.lock` file like any other:
```toml ```toml
[dependencies] [dependencies]
tracing-mutex = "0.1" tracing-mutex = "0.2"
``` ```
Then use the locks provided by this library instead of the ones you would use otherwise. Then use the locks provided by this library instead of the ones you would use otherwise.
@@ -42,9 +44,9 @@ Replacements for the synchronization primitives in `std::sync` can be found in t
Support for other synchronization primitives is planned. Support for other synchronization primitives is planned.
```rust ```rust
use tracing_mutex::stdsync::TracingMutex; use tracing_mutex::stdsync::Mutex;
let some_mutex = TracingMutex::new(42); let some_mutex = Mutex::new(42);
*some_mutex.lock().unwrap() += 1; *some_mutex.lock().unwrap() += 1;
println!("{:?}", some_mutex); println!("{:?}", some_mutex);
``` ```
@@ -59,12 +61,26 @@ performance penalty in your production environment, this library also offers deb
when debug assertions are enabled, and to `Mutex` when they are not. Similar helper types are when debug assertions are enabled, and to `Mutex` when they are not. Similar helper types are
available for other synchronization primitives. available for other synchronization primitives.
The minimum supported Rust version is 1.70. Increasing this is not considered a breaking change, but
will be avoided within semver-compatible releases if possible.
### Features
- Dependency-tracking wrappers for all locking primitives
- Optional opt-out for release mode code
- Support for primitives from:
- `std::sync`
- `parking_lot`
- Any library that implements the `lock_api` traits
## Future improvements ## Future improvements
- Improve performance in lock tracing - Improve performance in lock tracing
- Optional logging to make debugging easier - Optional logging to make debugging easier
- Better and configurable error handling when detecting cyclic dependencies - Better and configurable error handling when detecting cyclic dependencies
- Support for other locking libraries, such as `parking_lot` - Support for other locking libraries
- Support for async locking libraries
- Support for `Send` mutex guards
**Note:** `parking_lot` has already began work on its own deadlock detection mechanism, which works **Note:** `parking_lot` has already began work on its own deadlock detection mechanism, which works
in a different way. Both can be complimentary. in a different way. Both can be complimentary.

View File

@@ -7,7 +7,7 @@ use criterion::BenchmarkId;
use criterion::Criterion; use criterion::Criterion;
use criterion::Throughput; use criterion::Throughput;
use rand::prelude::*; use rand::prelude::*;
use tracing_mutex::stdsync::TracingMutex; use tracing_mutex::stdsync::tracing::Mutex as TracingMutex;
const SAMPLE_SIZES: [usize; 5] = [10, 30, 100, 300, 1000]; const SAMPLE_SIZES: [usize; 5] = [10, 30, 100, 300, 1000];

6
bors.toml Normal file
View File

@@ -0,0 +1,6 @@
status = [
'Rust project (1.70)',
'Rust project (stable)',
'Rust project (beta)',
'Documentation build',
]

26
examples/mutex_cycle.rs Normal file
View File

@@ -0,0 +1,26 @@
//! Show what a crash looks like
//!
//! This shows what a traceback of a cycle detection looks like. It is expected to crash.
use tracing_mutex::stdsync::Mutex;
fn main() {
let a = Mutex::new(());
let b = Mutex::new(());
let c = Mutex::new(());
// Create an edge from a to b
{
let _a = a.lock();
let _b = b.lock();
}
// Create an edge from b to c
{
let _b = b.lock();
let _c = c.lock();
}
// Now crash by trying to add an edge from c to a
let _c = c.lock();
let _a = a.lock(); // This line will crash
}

View File

@@ -1,4 +1,5 @@
use std::cell::Cell; use std::cell::Cell;
use std::collections::hash_map::Entry;
use std::collections::HashMap; use std::collections::HashMap;
use std::collections::HashSet; use std::collections::HashSet;
use std::hash::Hash; use std::hash::Hash;
@@ -19,23 +20,24 @@ type Order = usize;
/// visibly changed. /// visibly changed.
/// ///
/// [paper]: https://whileydave.com/publications/pk07_jea/ /// [paper]: https://whileydave.com/publications/pk07_jea/
#[derive(Default, Debug)] #[derive(Debug)]
pub struct DiGraph<V> pub struct DiGraph<V, E>
where where
V: Eq + Hash + Copy, V: Eq + Hash + Copy,
{ {
nodes: HashMap<V, Node<V>>, nodes: HashMap<V, Node<V, E>>,
/// Next topological sort order // Instead of reordering the orders in the graph whenever a node is deleted, we maintain a list
next_ord: Order, // of unused ids that can be handed out later again.
unused_order: Vec<Order>,
} }
#[derive(Debug)] #[derive(Debug)]
struct Node<V> struct Node<V, E>
where where
V: Eq + Hash + Clone, V: Eq + Hash + Clone,
{ {
in_edges: HashSet<V>, in_edges: HashSet<V>,
out_edges: HashSet<V>, out_edges: HashMap<V, E>,
// The "Ord" field is a Cell to ensure we can update it in an immutable context. // The "Ord" field is a Cell to ensure we can update it in an immutable context.
// `std::collections::HashMap` doesn't let you have multiple mutable references to elements, but // `std::collections::HashMap` doesn't let you have multiple mutable references to elements, but
// this way we can use immutable references and still update `ord`. This saves quite a few // this way we can use immutable references and still update `ord`. This saves quite a few
@@ -43,7 +45,7 @@ where
ord: Cell<Order>, ord: Cell<Order>,
} }
impl<V> DiGraph<V> impl<V, E> DiGraph<V, E>
where where
V: Eq + Hash + Copy, V: Eq + Hash + Copy,
{ {
@@ -54,12 +56,18 @@ where
/// the node in the topological order. /// the node in the topological order.
/// ///
/// New nodes are appended to the end of the topological order when added. /// New nodes are appended to the end of the topological order when added.
fn add_node(&mut self, n: V) -> (&mut HashSet<V>, &mut HashSet<V>, Order) { fn add_node(&mut self, n: V) -> (&mut HashSet<V>, &mut HashMap<V, E>, Order) {
let next_ord = &mut self.next_ord; // need to compute next id before the call to entry() to avoid duplicate borrow of nodes
let fallback_id = self.nodes.len();
let node = self.nodes.entry(n).or_insert_with(|| { let node = self.nodes.entry(n).or_insert_with(|| {
let order = *next_ord; let order = if let Some(id) = self.unused_order.pop() {
*next_ord = next_ord.checked_add(1).expect("Topological order overflow"); // Reuse discarded ordering entry
id
} else {
// Allocate new order id
fallback_id
};
Node { Node {
ord: Cell::new(order), ord: Cell::new(order),
@@ -77,9 +85,12 @@ where
Some(Node { Some(Node {
out_edges, out_edges,
in_edges, in_edges,
.. ord,
}) => { }) => {
out_edges.into_iter().for_each(|m| { // Return ordering to the pool of unused ones
self.unused_order.push(ord.get());
out_edges.into_keys().for_each(|m| {
self.nodes.get_mut(&m).unwrap().in_edges.remove(&n); self.nodes.get_mut(&m).unwrap().in_edges.remove(&n);
}); });
@@ -96,18 +107,29 @@ where
/// ///
/// Nodes, both from and to, are created as needed when creating new edges. If the new edge /// Nodes, both from and to, are created as needed when creating new edges. If the new edge
/// would introduce a cycle, the edge is rejected and `false` is returned. /// would introduce a cycle, the edge is rejected and `false` is returned.
pub(crate) fn add_edge(&mut self, x: V, y: V) -> bool { ///
/// # Errors
///
/// If the edge would introduce the cycle, the underlying graph is not modified and a list of
/// all the edge data in the would-be cycle is returned instead.
pub(crate) fn add_edge(&mut self, x: V, y: V, e: impl FnOnce() -> E) -> Result<(), Vec<E>>
where
E: Clone,
{
if x == y { if x == y {
// self-edges are always considered cycles // self-edges are always considered cycles
return false; return Err(Vec::new());
} }
let (_, out_edges, ub) = self.add_node(x); let (_, out_edges, ub) = self.add_node(x);
if !out_edges.insert(y) { match out_edges.entry(y) {
// Edge already exists, nothing to be done Entry::Occupied(_) => {
return true; // Edge already exists, nothing to be done
} return Ok(());
}
Entry::Vacant(entry) => entry.insert(e()),
};
let (in_edges, _, lb) = self.add_node(y); let (in_edges, _, lb) = self.add_node(y);
@@ -119,7 +141,7 @@ where
let mut delta_f = Vec::new(); let mut delta_f = Vec::new();
let mut delta_b = Vec::new(); let mut delta_b = Vec::new();
if !self.dfs_f(&self.nodes[&y], ub, &mut visited, &mut delta_f) { if let Err(cycle) = self.dfs_f(&self.nodes[&y], ub, &mut visited, &mut delta_f) {
// This edge introduces a cycle, so we want to reject it and remove it from the // This edge introduces a cycle, so we want to reject it and remove it from the
// graph again to keep the "does not contain cycles" invariant. // graph again to keep the "does not contain cycles" invariant.
@@ -129,7 +151,7 @@ where
self.nodes.get_mut(&x).map(|node| node.out_edges.remove(&y)); self.nodes.get_mut(&x).map(|node| node.out_edges.remove(&y));
// No edge was added // No edge was added
return false; return Err(cycle);
} }
// No need to check as we should've found the cycle on the forward pass // No need to check as we should've found the cycle on the forward pass
@@ -141,44 +163,49 @@ where
self.reorder(delta_f, delta_b); self.reorder(delta_f, delta_b);
} }
true Ok(())
} }
/// Forwards depth-first-search /// Forwards depth-first-search
fn dfs_f<'a>( fn dfs_f<'a>(
&'a self, &'a self,
n: &'a Node<V>, n: &'a Node<V, E>,
ub: Order, ub: Order,
visited: &mut HashSet<V>, visited: &mut HashSet<V>,
delta_f: &mut Vec<&'a Node<V>>, delta_f: &mut Vec<&'a Node<V, E>>,
) -> bool { ) -> Result<(), Vec<E>>
where
E: Clone,
{
delta_f.push(n); delta_f.push(n);
n.out_edges.iter().all(|w| { for (w, e) in &n.out_edges {
let node = &self.nodes[w]; let node = &self.nodes[w];
let ord = node.ord.get(); let ord = node.ord.get();
if ord == ub { if ord == ub {
// Found a cycle // Found a cycle
false return Err(vec![e.clone()]);
} else if !visited.contains(w) && ord < ub { } else if !visited.contains(w) && ord < ub {
// Need to check recursively // Need to check recursively
visited.insert(*w); visited.insert(*w);
self.dfs_f(node, ub, visited, delta_f) if let Err(mut chain) = self.dfs_f(node, ub, visited, delta_f) {
} else { chain.push(e.clone());
// Already seen this one or not interesting return Err(chain);
true }
} }
}) }
Ok(())
} }
/// Backwards depth-first-search /// Backwards depth-first-search
fn dfs_b<'a>( fn dfs_b<'a>(
&'a self, &'a self,
n: &'a Node<V>, n: &'a Node<V, E>,
lb: Order, lb: Order,
visited: &mut HashSet<V>, visited: &mut HashSet<V>,
delta_b: &mut Vec<&'a Node<V>>, delta_b: &mut Vec<&'a Node<V, E>>,
) { ) {
delta_b.push(n); delta_b.push(n);
@@ -192,7 +219,7 @@ where
} }
} }
fn reorder(&self, mut delta_f: Vec<&Node<V>>, mut delta_b: Vec<&Node<V>>) { fn reorder(&self, mut delta_f: Vec<&Node<V, E>>, mut delta_b: Vec<&Node<V, E>>) {
self.sort(&mut delta_f); self.sort(&mut delta_f);
self.sort(&mut delta_b); self.sort(&mut delta_b);
@@ -213,12 +240,25 @@ where
} }
} }
fn sort(&self, ids: &mut [&Node<V>]) { fn sort(&self, ids: &mut [&Node<V, E>]) {
// Can use unstable sort because mutex ids should not be equal // Can use unstable sort because mutex ids should not be equal
ids.sort_unstable_by_key(|v| &v.ord); ids.sort_unstable_by_key(|v| &v.ord);
} }
} }
// Manual `Default` impl as derive causes unnecessarily strong bounds.
impl<V, E> Default for DiGraph<V, E>
where
V: Eq + Hash + Copy,
{
fn default() -> Self {
Self {
nodes: Default::default(),
unused_order: Default::default(),
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use rand::seq::SliceRandom; use rand::seq::SliceRandom;
@@ -226,12 +266,14 @@ mod tests {
use super::*; use super::*;
fn nop() {}
#[test] #[test]
fn test_no_self_cycle() { fn test_no_self_cycle() {
// Regression test for https://github.com/bertptrs/tracing-mutex/issues/7 // Regression test for https://github.com/bertptrs/tracing-mutex/issues/7
let mut graph = DiGraph::default(); let mut graph = DiGraph::default();
assert!(!graph.add_edge(1, 1)); assert!(graph.add_edge(1, 1, nop).is_err());
} }
#[test] #[test]
@@ -239,16 +281,16 @@ mod tests {
let mut graph = DiGraph::default(); let mut graph = DiGraph::default();
// Add some safe edges // Add some safe edges
assert!(graph.add_edge(0, 1)); assert!(graph.add_edge(0, 1, nop).is_ok());
assert!(graph.add_edge(1, 2)); assert!(graph.add_edge(1, 2, nop).is_ok());
assert!(graph.add_edge(2, 3)); assert!(graph.add_edge(2, 3, nop).is_ok());
assert!(graph.add_edge(4, 2)); assert!(graph.add_edge(4, 2, nop).is_ok());
// Try to add an edge that introduces a cycle // Try to add an edge that introduces a cycle
assert!(!graph.add_edge(3, 1)); assert!(graph.add_edge(3, 1, nop).is_err());
// Add an edge that should reorder 0 to be after 4 // Add an edge that should reorder 0 to be after 4
assert!(graph.add_edge(4, 0)); assert!(graph.add_edge(4, 0, nop).is_ok());
} }
/// Fuzz the DiGraph implementation by adding a bunch of valid edges. /// Fuzz the DiGraph implementation by adding a bunch of valid edges.
@@ -256,7 +298,7 @@ mod tests {
/// This test generates all possible forward edges in a 100-node graph consisting of natural /// This test generates all possible forward edges in a 100-node graph consisting of natural
/// numbers, shuffles them, then adds them to the graph. This will always be a valid directed, /// numbers, shuffles them, then adds them to the graph. This will always be a valid directed,
/// acyclic graph because there is a trivial order (the natural numbers) but because the edges /// acyclic graph because there is a trivial order (the natural numbers) but because the edges
/// are added in a random order the DiGraph will still occassionally need to reorder nodes. /// are added in a random order the DiGraph will still occasionally need to reorder nodes.
#[test] #[test]
fn fuzz_digraph() { fn fuzz_digraph() {
// Note: this fuzzer is quadratic in the number of nodes, so this cannot be too large or it // Note: this fuzzer is quadratic in the number of nodes, so this cannot be too large or it
@@ -277,7 +319,7 @@ mod tests {
let mut graph = DiGraph::default(); let mut graph = DiGraph::default();
for (x, y) in edges { for (x, y) in edges {
assert!(graph.add_edge(x, y)); assert!(graph.add_edge(x, y, nop).is_ok());
} }
} }
} }

View File

@@ -18,8 +18,23 @@
//! # Structure //! # Structure
//! //!
//! Each module in this crate exposes wrappers for a specific base-mutex with dependency trakcing //! Each module in this crate exposes wrappers for a specific base-mutex with dependency trakcing
//! added. For now, that is limited to [`stdsync`] which provides wrappers for the base locks in the //! added. This includes [`stdsync`] which provides wrappers for the base locks in the standard
//! standard library. More back-ends may be added as features in the future. //! library, and more depending on enabled compile-time features. More back-ends may be added as
//! features in the future.
//!
//! # Feature flags
//!
//! `tracing-mutex` uses feature flags to reduce the impact of this crate on both your compile time
//! and runtime overhead. Below are the available flags. Modules are annotated with the features
//! they require.
//!
//! - `backtraces`: Enables capturing backtraces of mutex dependencies, to make it easier to
//! determine what sequence of events would trigger a deadlock. This is enabled by default, but if
//! the performance overhead is unaccceptable, it can be disabled by disabling default features.
//!
//! - `lockapi`: Enables the wrapper lock for [`lock_api`][lock_api] locks
//!
//! - `parkinglot`: Enables wrapper types for [`parking_lot`][parking_lot] mutexes
//! //!
//! # Performance considerations //! # Performance considerations
//! //!
@@ -41,44 +56,50 @@
//! //!
//! These operations have been reasonably optimized, but the performance penalty may yet be too much //! These operations have been reasonably optimized, but the performance penalty may yet be too much
//! for production use. In those cases, it may be beneficial to instead use debug-only versions //! for production use. In those cases, it may be beneficial to instead use debug-only versions
//! (such as [`stdsync::DebugMutex`]) which evaluate to a tracing mutex when debug assertions are //! (such as [`stdsync::Mutex`]) which evaluate to a tracing mutex when debug assertions are
//! enabled, and to the underlying mutex when they're not. //! enabled, and to the underlying mutex when they're not.
//! //!
//! For ease of debugging, this crate will, by default, capture a backtrace when establishing a new
//! dependency between two mutexes. This has an additional overhead of over 60%. If this additional
//! debugging aid is not required, it can be disabled by disabling default features.
//!
//! [paper]: https://whileydave.com/publications/pk07_jea/ //! [paper]: https://whileydave.com/publications/pk07_jea/
//! [lock_api]: https://docs.rs/lock_api/0.4/lock_api/index.html
//! [parking_lot]: https://docs.rs/parking_lot/0.12.1/parking_lot/
#![cfg_attr(docsrs, feature(doc_cfg))]
use std::cell::RefCell; use std::cell::RefCell;
use std::cell::UnsafeCell;
use std::fmt; use std::fmt;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::ops::Deref; use std::ops::Deref;
use std::ops::DerefMut; use std::ops::DerefMut;
use std::ptr;
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
use std::sync::Mutex; use std::sync::Mutex;
use std::sync::Once; use std::sync::MutexGuard;
use std::sync::OnceLock;
use std::sync::PoisonError; use std::sync::PoisonError;
use lazy_static::lazy_static;
#[cfg(feature = "lockapi")] #[cfg(feature = "lockapi")]
#[cfg_attr(docsrs, doc(cfg(feature = "lockapi")))]
pub use lock_api; pub use lock_api;
#[cfg(feature = "parkinglot")] #[cfg(feature = "parkinglot")]
#[cfg_attr(docsrs, doc(cfg(feature = "parkinglot")))]
pub use parking_lot; pub use parking_lot;
use reporting::Dep;
use reporting::Reportable;
use crate::graph::DiGraph; use crate::graph::DiGraph;
mod graph; mod graph;
#[cfg(feature = "lockapi")] #[cfg(feature = "lockapi")]
#[cfg_attr(docsrs, doc(cfg(feature = "lockapi")))]
pub mod lockapi; pub mod lockapi;
#[cfg(feature = "lockapi")] #[cfg(feature = "parkinglot")]
#[cfg_attr(docsrs, doc(cfg(feature = "parkinglot")))]
pub mod parkinglot; pub mod parkinglot;
mod reporting;
pub mod stdsync; pub mod stdsync;
/// Counter for Mutex IDs. Atomic avoids the need for locking.
///
/// Should be part of the `MutexID` impl but static items are not yet a thing.
static ID_SEQUENCE: AtomicUsize = AtomicUsize::new(0);
thread_local! { thread_local! {
/// Stack to track which locks are held /// Stack to track which locks are held
/// ///
@@ -87,10 +108,6 @@ thread_local! {
static HELD_LOCKS: RefCell<Vec<usize>> = RefCell::new(Vec::new()); static HELD_LOCKS: RefCell<Vec<usize>> = RefCell::new(Vec::new());
} }
lazy_static! {
static ref DEPENDENCY_GRAPH: Mutex<DiGraph<usize>> = Default::default();
}
/// Dedicated ID type for Mutexes /// Dedicated ID type for Mutexes
/// ///
/// # Unstable /// # Unstable
@@ -109,6 +126,9 @@ impl MutexId {
/// This function may panic when there are no more mutex IDs available. The number of mutex ids /// This function may panic when there are no more mutex IDs available. The number of mutex ids
/// is `usize::MAX - 1` which should be plenty for most practical applications. /// is `usize::MAX - 1` which should be plenty for most practical applications.
pub fn new() -> Self { pub fn new() -> Self {
// Counter for Mutex IDs. Atomic avoids the need for locking.
static ID_SEQUENCE: AtomicUsize = AtomicUsize::new(0);
ID_SEQUENCE ID_SEQUENCE
.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |id| id.checked_add(1)) .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |id| id.checked_add(1))
.map(Self) .map(Self)
@@ -129,7 +149,10 @@ impl MutexId {
/// This method panics if the new dependency would introduce a cycle. /// This method panics if the new dependency would introduce a cycle.
pub fn get_borrowed(&self) -> BorrowedMutex { pub fn get_borrowed(&self) -> BorrowedMutex {
self.mark_held(); self.mark_held();
BorrowedMutex(self) BorrowedMutex {
id: self,
_not_send: PhantomData,
}
} }
/// Mark this lock as held for the purposes of dependency tracking. /// Mark this lock as held for the purposes of dependency tracking.
@@ -138,19 +161,18 @@ impl MutexId {
/// ///
/// This method panics if the new dependency would introduce a cycle. /// This method panics if the new dependency would introduce a cycle.
pub fn mark_held(&self) { pub fn mark_held(&self) {
let creates_cycle = HELD_LOCKS.with(|locks| { let opt_cycle = HELD_LOCKS.with(|locks| {
if let Some(&previous) = locks.borrow().last() { if let Some(&previous) = locks.borrow().last() {
let mut graph = get_dependency_graph(); let mut graph = get_dependency_graph();
!graph.add_edge(previous, self.value()) graph.add_edge(previous, self.value(), Dep::capture).err()
} else { } else {
false None
} }
}); });
if creates_cycle { if let Some(cycle) = opt_cycle {
// Panic without holding the lock to avoid needlessly poisoning it panic!("{}", Dep::panic_message(&cycle))
panic!("Mutex order graph should not have cycles");
} }
HELD_LOCKS.with(|locks| locks.borrow_mut().push(self.value())); HELD_LOCKS.with(|locks| locks.borrow_mut().push(self.value()));
@@ -199,17 +221,13 @@ impl Drop for MutexId {
/// ///
/// This type can be largely replaced once std::lazy gets stabilized. /// This type can be largely replaced once std::lazy gets stabilized.
struct LazyMutexId { struct LazyMutexId {
inner: UnsafeCell<MaybeUninit<MutexId>>, inner: OnceLock<MutexId>,
setter: Once,
_marker: PhantomData<MutexId>,
} }
impl LazyMutexId { impl LazyMutexId {
pub const fn new() -> Self { pub const fn new() -> Self {
Self { Self {
inner: UnsafeCell::new(MaybeUninit::uninit()), inner: OnceLock::new(),
setter: Once::new(),
_marker: PhantomData,
} }
} }
} }
@@ -226,51 +244,30 @@ impl Default for LazyMutexId {
} }
} }
/// Safety: the UnsafeCell is guaranteed to only be accessed mutably from a `Once`.
unsafe impl Sync for LazyMutexId {}
impl Deref for LazyMutexId { impl Deref for LazyMutexId {
type Target = MutexId; type Target = MutexId;
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
self.setter.call_once(|| { self.inner.get_or_init(MutexId::new)
// Safety: this function is only called once, so only one mutable reference should exist
// at a time.
unsafe {
*self.inner.get() = MaybeUninit::new(MutexId::new());
}
});
// Safety: after the above Once runs, there are no longer any mutable references, so we can
// hand this out safely.
//
// Explanation of this monstrosity:
//
// - Get a pointer to the data from the UnsafeCell
// - Dereference that to get a reference to the underlying MaybeUninit
// - Use as_ptr on MaybeUninit to get a pointer to the initialized MutexID
// - Dereference the pointer to turn in into a reference as intended.
//
// This should get slightly nicer once `maybe_uninit_extra` is stabilized.
unsafe { &*((*self.inner.get()).as_ptr()) }
}
}
impl Drop for LazyMutexId {
fn drop(&mut self) {
if self.setter.is_completed() {
// We have a valid mutex ID and need to drop it
// Safety: we know that this pointer is valid because the initializer has successfully run.
let mutex_id = unsafe { ptr::read((*self.inner.get()).as_ptr()) };
drop(mutex_id);
}
} }
} }
/// Borrowed mutex ID
///
/// This type should be used as part of a mutex guard wrapper. It can be acquired through
/// [`MutexId::get_borrowed`] and will automatically mark the mutex as not borrowed when it is
/// dropped.
///
/// This type intentionally is [`!Send`](std::marker::Send) because the ownership tracking is based
/// on a thread-local stack which doesn't work if a guard gets released in a different thread from
/// where they're acquired.
#[derive(Debug)] #[derive(Debug)]
struct BorrowedMutex<'a>(&'a MutexId); struct BorrowedMutex<'a> {
/// Reference to the mutex we're borrowing from
id: &'a MutexId,
/// This value serves no purpose but to make the type [`!Send`](std::marker::Send)
_not_send: PhantomData<MutexGuard<'static, ()>>,
}
/// Drop a lock held by the current thread. /// Drop a lock held by the current thread.
/// ///
@@ -281,13 +278,16 @@ struct BorrowedMutex<'a>(&'a MutexId);
impl<'a> Drop for BorrowedMutex<'a> { impl<'a> Drop for BorrowedMutex<'a> {
fn drop(&mut self) { fn drop(&mut self) {
// Safety: the only way to get a BorrowedMutex is by locking the mutex. // Safety: the only way to get a BorrowedMutex is by locking the mutex.
unsafe { self.0.mark_released() }; unsafe { self.id.mark_released() };
} }
} }
/// Get a reference to the current dependency graph /// Get a reference to the current dependency graph
fn get_dependency_graph() -> impl DerefMut<Target = DiGraph<usize>> { fn get_dependency_graph() -> impl DerefMut<Target = DiGraph<usize, Dep>> {
static DEPENDENCY_GRAPH: OnceLock<Mutex<DiGraph<usize, Dep>>> = OnceLock::new();
DEPENDENCY_GRAPH DEPENDENCY_GRAPH
.get_or_init(Default::default)
.lock() .lock()
.unwrap_or_else(PoisonError::into_inner) .unwrap_or_else(PoisonError::into_inner)
} }
@@ -315,11 +315,11 @@ mod tests {
let c = LazyMutexId::new(); let c = LazyMutexId::new();
let mut graph = get_dependency_graph(); let mut graph = get_dependency_graph();
assert!(graph.add_edge(a.value(), b.value())); assert!(graph.add_edge(a.value(), b.value(), Dep::capture).is_ok());
assert!(graph.add_edge(b.value(), c.value())); assert!(graph.add_edge(b.value(), c.value(), Dep::capture).is_ok());
// Creating an edge c → a should fail as it introduces a cycle. // Creating an edge c → a should fail as it introduces a cycle.
assert!(!graph.add_edge(c.value(), a.value())); assert!(graph.add_edge(c.value(), a.value(), Dep::capture).is_err());
// Drop graph handle so we can drop vertices without deadlocking // Drop graph handle so we can drop vertices without deadlocking
drop(graph); drop(graph);
@@ -327,7 +327,9 @@ mod tests {
drop(b); drop(b);
// If b's destructor correctly ran correctly we can now add an edge from c to a. // If b's destructor correctly ran correctly we can now add an edge from c to a.
assert!(get_dependency_graph().add_edge(c.value(), a.value())); assert!(get_dependency_graph()
.add_edge(c.value(), a.value(), Dep::capture)
.is_ok());
} }
/// Test creating a cycle, then panicking. /// Test creating a cycle, then panicking.

View File

@@ -1,19 +1,20 @@
//! Wrapper types and type aliases for tracing [`parking_lot`] mutexes. //! Wrapper types and type aliases for tracing [`parking_lot`] mutexes.
//! //!
//! This module provides type aliases that use the [`lockapi`][crate::lockapi] module to provide //! This module provides type aliases that use the [`lockapi`][crate::lockapi] module to provide
//! tracing variants of the `parking_lot` primitives. Each of the `TracingX` type aliases wraps an //! tracing variants of the `parking_lot` primitives. The [`tracing`] module contains type aliases
//! `X` in the `parkint_lot` api with dependency tracking, and a `DebugX` will refer to a `TracingX` //! that use dependency tracking, while the main `parking_lot` primitives are reexported as [`raw`].
//! when `debug_assertions` are enabled and to `X` when they're not. This can be used to aid //!
//! debugging in development while enjoying maximum performance in production. //! This main module imports from [`tracing`] when `debug_assertions` are enabled, and from [`raw`]
//! when they're not. Note that primitives for which no tracing wrapper exists are not imported into
//! the main module.
//! //!
//! # Usage //! # Usage
//! //!
//! ``` //! ```
//! # use std::sync::Arc; //! # use std::sync::Arc;
//! # use std::thread; //! # use std::thread;
//! # use lock_api::Mutex; //! use tracing_mutex::parkinglot::Mutex;
//! # use tracing_mutex::parkinglot::TracingMutex; //! let mutex = Arc::new(Mutex::new(0));
//! let mutex = Arc::new(TracingMutex::new(0));
//! //!
//! let handles: Vec<_> = (0..10).map(|_| { //! let handles: Vec<_> = (0..10).map(|_| {
//! let mutex = Arc::clone(&mutex); //! let mutex = Arc::clone(&mutex);
@@ -37,191 +38,130 @@
//! In addition, the mutex guards returned by the tracing wrappers are `!Send`, regardless of //! In addition, the mutex guards returned by the tracing wrappers are `!Send`, regardless of
//! whether `parking_lot` is configured to have `Send` mutex guards. This is a limitation of the //! whether `parking_lot` is configured to have `Send` mutex guards. This is a limitation of the
//! current bookkeeping system. //! current bookkeeping system.
use parking_lot::Once;
use parking_lot::OnceState;
use crate::lockapi::TracingWrapper; pub use parking_lot as raw;
use crate::LazyMutexId;
macro_rules! debug_variant { #[cfg(debug_assertions)]
($debug_name:ident, $tracing_name:ident, $normal_name:ty) => { pub use tracing::{
type $tracing_name = TracingWrapper<$normal_name>; FairMutex, FairMutexGuard, MappedFairMutexGuard, MappedMutexGuard, MappedReentrantMutexGuard,
MappedRwLockReadGuard, MappedRwLockWriteGuard, Mutex, MutexGuard, Once, OnceState,
ReentrantMutex, ReentrantMutexGuard, RwLock, RwLockReadGuard, RwLockUpgradableReadGuard,
RwLockWriteGuard,
};
#[cfg(debug_assertions)] #[cfg(not(debug_assertions))]
type $debug_name = TracingWrapper<$normal_name>; pub use parking_lot::{
#[cfg(not(debug_assertions))] FairMutex, FairMutexGuard, MappedFairMutexGuard, MappedMutexGuard, MappedReentrantMutexGuard,
type $debug_name = $normal_name; MappedRwLockReadGuard, MappedRwLockWriteGuard, Mutex, MutexGuard, Once, OnceState,
}; ReentrantMutex, ReentrantMutexGuard, RwLock, RwLockReadGuard, RwLockUpgradableReadGuard,
} RwLockWriteGuard,
};
debug_variant!( /// Dependency tracing wrappers for [`parking_lot`].
DebugRawFairMutex, pub mod tracing {
TracingRawFairMutex, pub use parking_lot::OnceState;
parking_lot::RawFairMutex
);
debug_variant!(DebugRawMutex, TracingRawMutex, parking_lot::RawMutex);
debug_variant!(DebugRawRwLock, TracingRawRwLock, parking_lot::RawRwLock);
/// Dependency tracking fair mutex. See: [`parking_lot::FairMutex`]. use crate::lockapi::TracingWrapper;
pub type TracingFairMutex<T> = lock_api::Mutex<TracingRawFairMutex, T>; use crate::LazyMutexId;
/// Mutex guard for [`TracingFairMutex`].
pub type TracingFairMutexGuard<'a, T> = lock_api::MutexGuard<'a, TracingRawFairMutex, T>;
/// RAII guard for `TracingFairMutexGuard::map`.
pub type TracingMappedFairMutexGuard<'a, T> =
lock_api::MappedMutexGuard<'a, TracingRawFairMutex, T>;
/// Debug-only dependency tracking fair mutex.
///
/// If debug assertions are enabled this resolves to [`TracingFairMutex`] and to
/// [`parking_lot::FairMutex`] otherwise.
pub type DebugFairMutex<T> = lock_api::Mutex<DebugRawFairMutex, T>;
/// Mutex guard for [`DebugFairMutex`].
pub type DebugFairMutexGuard<'a, T> = lock_api::MutexGuard<'a, DebugRawFairMutex, T>;
/// RAII guard for `DebugFairMutexGuard::map`.
pub type DebugMappedFairMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, DebugRawFairMutex, T>;
/// Dependency tracking mutex. See: [`parking_lot::Mutex`]. type RawFairMutex = TracingWrapper<parking_lot::RawFairMutex>;
pub type TracingMutex<T> = lock_api::Mutex<TracingRawMutex, T>; type RawMutex = TracingWrapper<parking_lot::RawMutex>;
/// Mutex guard for [`TracingMutex`]. type RawRwLock = TracingWrapper<parking_lot::RawRwLock>;
pub type TracingMutexGuard<'a, T> = lock_api::MutexGuard<'a, TracingRawMutex, T>;
/// RAII guard for `TracingMutexGuard::map`.
pub type TracingMappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, TracingRawMutex, T>;
/// Debug-only dependency tracking mutex.
///
/// If debug assertions are enabled this resolves to [`TracingMutex`] and to [`parking_lot::Mutex`]
/// otherwise.
pub type DebugMutex<T> = lock_api::Mutex<DebugRawMutex, T>;
/// Mutex guard for [`DebugMutex`].
pub type DebugMutexGuard<'a, T> = lock_api::MutexGuard<'a, DebugRawMutex, T>;
/// RAII guard for `TracingMutexGuard::map`.
pub type DebugMappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, DebugRawMutex, T>;
/// Dependency tracking reentrant mutex. See: [`parking_lot::ReentrantMutex`]. /// Dependency tracking fair mutex. See: [`parking_lot::FairMutex`].
/// pub type FairMutex<T> = lock_api::Mutex<RawFairMutex, T>;
/// **Note:** due to the way dependencies are tracked, this mutex can only be acquired directly /// Mutex guard for [`FairMutex`].
/// after itself. Acquiring any other mutex in between introduces a dependency cycle, and will pub type FairMutexGuard<'a, T> = lock_api::MutexGuard<'a, RawFairMutex, T>;
/// therefore be rejected. /// RAII guard for [`FairMutexGuard::map`].
pub type TracingReentrantMutex<T> = pub type MappedFairMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawFairMutex, T>;
lock_api::ReentrantMutex<TracingWrapper<parking_lot::RawMutex>, parking_lot::RawThreadId, T>;
/// Mutex guard for [`TracingReentrantMutex`].
pub type TracingReentrantMutexGuard<'a, T> = lock_api::ReentrantMutexGuard<
'a,
TracingWrapper<parking_lot::RawMutex>,
parking_lot::RawThreadId,
T,
>;
/// RAII guard for `TracingReentrantMutexGuard::map`.
pub type TracingMappedReentrantMutexGuard<'a, T> =
lock_api::MappedReentrantMutexGuard<'a, TracingRawMutex, parking_lot::RawThreadId, T>;
/// Debug-only dependency tracking reentrant mutex. /// Dependency tracking mutex. See: [`parking_lot::Mutex`].
/// pub type Mutex<T> = lock_api::Mutex<RawMutex, T>;
/// If debug assertions are enabled this resolves to [`TracingReentrantMutex`] and to /// Mutex guard for [`Mutex`].
/// [`parking_lot::ReentrantMutex`] otherwise. pub type MutexGuard<'a, T> = lock_api::MutexGuard<'a, RawMutex, T>;
pub type DebugReentrantMutex<T> = /// RAII guard for [`MutexGuard::map`].
lock_api::ReentrantMutex<DebugRawMutex, parking_lot::RawThreadId, T>; pub type MappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawMutex, T>;
/// Mutex guard for [`DebugReentrantMutex`].
pub type DebugReentrantMutexGuard<'a, T> =
lock_api::ReentrantMutexGuard<'a, DebugRawMutex, parking_lot::RawThreadId, T>;
/// RAII guard for `DebugReentrantMutexGuard::map`.
pub type DebugMappedReentrantMutexGuard<'a, T> =
lock_api::MappedReentrantMutexGuard<'a, DebugRawMutex, parking_lot::RawThreadId, T>;
/// Dependency tracking RwLock. See: [`parking_lot::RwLock`]. /// Dependency tracking reentrant mutex. See: [`parking_lot::ReentrantMutex`].
pub type TracingRwLock<T> = lock_api::RwLock<TracingRawRwLock, T>; ///
/// Read guard for [`TracingRwLock`]. /// **Note:** due to the way dependencies are tracked, this mutex can only be acquired directly
pub type TracingRwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, TracingRawRwLock, T>; /// after itself. Acquiring any other mutex in between introduces a dependency cycle, and will
/// Upgradable Read guard for [`TracingRwLock`]. /// therefore be rejected.
pub type TracingRwLockUpgradableReadGuard<'a, T> = pub type ReentrantMutex<T> = lock_api::ReentrantMutex<RawMutex, parking_lot::RawThreadId, T>;
lock_api::RwLockUpgradableReadGuard<'a, TracingRawRwLock, T>; /// Mutex guard for [`ReentrantMutex`].
/// Write guard for [`TracingRwLock`]. pub type ReentrantMutexGuard<'a, T> =
pub type TracingRwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, TracingRawRwLock, T>; lock_api::ReentrantMutexGuard<'a, RawMutex, parking_lot::RawThreadId, T>;
/// RAII guard for `TracingRwLockReadGuard::map`. /// RAII guard for `ReentrantMutexGuard::map`.
pub type TracingMappedRwLockReadGuard<'a, T> = pub type MappedReentrantMutexGuard<'a, T> =
lock_api::MappedRwLockReadGuard<'a, TracingRawRwLock, T>; lock_api::MappedReentrantMutexGuard<'a, RawMutex, parking_lot::RawThreadId, T>;
/// RAII guard for `TracingRwLockWriteGuard::map`.
pub type TracingMappedRwLockWriteGuard<'a, T> =
lock_api::MappedRwLockWriteGuard<'a, TracingRawRwLock, T>;
/// Debug-only dependency tracking RwLock. /// Dependency tracking RwLock. See: [`parking_lot::RwLock`].
/// pub type RwLock<T> = lock_api::RwLock<RawRwLock, T>;
/// If debug assertions are enabled this resolved to [`TracingRwLock`] and to /// Read guard for [`RwLock`].
/// [`parking_lot::RwLock`] otherwise. pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, RawRwLock, T>;
pub type DebugRwLock<T> = lock_api::RwLock<DebugRawRwLock, T>; /// Upgradable Read guard for [`RwLock`].
/// Read guard for [`TracingRwLock`]. pub type RwLockUpgradableReadGuard<'a, T> =
pub type DebugRwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, DebugRawRwLock, T>; lock_api::RwLockUpgradableReadGuard<'a, RawRwLock, T>;
/// Upgradable Read guard for [`TracingRwLock`]. /// Write guard for [`RwLock`].
pub type DebugRwLockUpgradableReadGuard<'a, T> = pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, RawRwLock, T>;
lock_api::RwLockUpgradableReadGuard<'a, DebugRawRwLock, T>; /// RAII guard for `RwLockReadGuard::map`.
/// Write guard for [`TracingRwLock`]. pub type MappedRwLockReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, RawRwLock, T>;
pub type DebugRwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, DebugRawRwLock, T>; /// RAII guard for `RwLockWriteGuard::map`.
/// RAII guard for `DebugRwLockReadGuard::map`. pub type MappedRwLockWriteGuard<'a, T> = lock_api::MappedRwLockWriteGuard<'a, RawRwLock, T>;
pub type DebugMappedRwLockReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, DebugRawRwLock, T>;
/// RAII guard for `DebugRwLockWriteGuard::map`.
pub type DebugMappedRwLockWriteGuard<'a, T> =
lock_api::MappedRwLockWriteGuard<'a, DebugRawRwLock, T>;
/// A dependency-tracking wrapper for [`parking_lot::Once`]. /// A dependency-tracking wrapper for [`parking_lot::Once`].
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub struct TracingOnce { pub struct Once {
inner: Once, inner: parking_lot::Once,
id: LazyMutexId, id: LazyMutexId,
} }
impl TracingOnce { impl Once {
/// Create a new `TracingOnce` value. /// Create a new `Once` value.
pub const fn new() -> Self { pub const fn new() -> Self {
Self { Self {
inner: Once::new(), inner: parking_lot::Once::new(),
id: LazyMutexId::new(), id: LazyMutexId::new(),
}
}
/// Returns the current state of this `Once`.
pub fn state(&self) -> OnceState {
self.inner.state()
}
/// This call is considered as "locking this `Once`" and it participates in dependency
/// tracking as such.
///
/// # Panics
///
/// This method will panic if `f` panics, poisoning this `Once`. In addition, this function
/// panics when the lock acquisition order is determined to be inconsistent.
pub fn call_once(&self, f: impl FnOnce()) {
let _borrow = self.id.get_borrowed();
self.inner.call_once(f);
}
/// Performs the given initialization routine once and only once.
///
/// This method is identical to [`Once::call_once`] except it ignores poisoning.
pub fn call_once_force(&self, f: impl FnOnce(OnceState)) {
let _borrow = self.id.get_borrowed();
self.inner.call_once_force(f);
} }
} }
/// Returns the current state of this `Once`.
pub fn state(&self) -> OnceState {
self.inner.state()
}
///
/// This call is considered as "locking this `TracingOnce`" and it participates in dependency
/// tracking as such.
///
/// # Panics
///
/// This method will panic if `f` panics, poisoning this `Once`. In addition, this function
/// panics when the lock acquisition order is determined to be inconsistent.
pub fn call_once(&self, f: impl FnOnce()) {
let _borrow = self.id.get_borrowed();
self.inner.call_once(f);
}
/// Performs the given initialization routeine once and only once.
///
/// This method is identical to [`TracingOnce::call_once`] except it ignores poisining.
pub fn call_once_force(&self, f: impl FnOnce(OnceState)) {
let _borrow = self.id.get_borrowed();
self.inner.call_once_force(f);
}
} }
/// Debug-only `Once`.
///
/// If debug assertions are enabled this resolves to [`TracingOnce`] and to [`parking_lot::Once`]
/// otherwise.
#[cfg(debug_assertions)]
pub type DebugOnce = TracingOnce;
#[cfg(not(debug_assertions))]
pub type DebugOnce = Once;
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::sync::Arc; use std::sync::Arc;
use std::thread; use std::thread;
use super::*; use super::tracing;
#[test] #[test]
fn test_mutex_usage() { fn test_mutex_usage() {
let mutex = Arc::new(TracingMutex::new(())); let mutex = Arc::new(tracing::Mutex::new(()));
let local_lock = mutex.lock(); let local_lock = mutex.lock();
drop(local_lock); drop(local_lock);
@@ -236,9 +176,9 @@ mod tests {
#[should_panic] #[should_panic]
fn test_mutex_conflict() { fn test_mutex_conflict() {
let mutexes = [ let mutexes = [
TracingMutex::new(()), tracing::Mutex::new(()),
TracingMutex::new(()), tracing::Mutex::new(()),
TracingMutex::new(()), tracing::Mutex::new(()),
]; ];
for i in 0..3 { for i in 0..3 {
@@ -249,7 +189,7 @@ mod tests {
#[test] #[test]
fn test_rwlock_usage() { fn test_rwlock_usage() {
let lock = Arc::new(TracingRwLock::new(())); let lock = Arc::new(tracing::RwLock::new(()));
let lock2 = Arc::clone(&lock); let lock2 = Arc::clone(&lock);
let _read_lock = lock.read(); let _read_lock = lock.read();
@@ -264,19 +204,19 @@ mod tests {
#[test] #[test]
fn test_rwlock_upgradable_read_usage() { fn test_rwlock_upgradable_read_usage() {
let lock = TracingRwLock::new(()); let lock = tracing::RwLock::new(());
// Should be able to acquire an upgradable read lock. // Should be able to acquire an upgradable read lock.
let upgradable_guard: TracingRwLockUpgradableReadGuard<'_, _> = lock.upgradable_read(); let upgradable_guard: tracing::RwLockUpgradableReadGuard<'_, _> = lock.upgradable_read();
// Should be able to upgrade the guard. // Should be able to upgrade the guard.
let _write_guard: TracingRwLockWriteGuard<'_, _> = let _write_guard: tracing::RwLockWriteGuard<'_, _> =
TracingRwLockUpgradableReadGuard::upgrade(upgradable_guard); tracing::RwLockUpgradableReadGuard::upgrade(upgradable_guard);
} }
#[test] #[test]
fn test_once_usage() { fn test_once_usage() {
let once = Arc::new(TracingOnce::new()); let once = Arc::new(tracing::Once::new());
let once_clone = once.clone(); let once_clone = once.clone();
assert!(!once_clone.state().done()); assert!(!once_clone.state().done());

64
src/reporting.rs Normal file
View File

@@ -0,0 +1,64 @@
//! Cycle reporting primitives
//!
//! This module exposes [`Dep`], which resolves to either something that tracks dependencies or to
//! something that doesn't. It should only be assumed to implement the [`Reportable`] trait.
use std::backtrace::Backtrace;
use std::borrow::Cow;
use std::fmt::Write;
use std::sync::Arc;
#[cfg(feature = "backtraces")]
pub type Dep = MutexDep<Arc<Backtrace>>;
#[cfg(not(feature = "backtraces"))]
pub type Dep = MutexDep<()>;
// Base message to be reported when cycle is detected
const BASE_MESSAGE: &str = "Found cycle in mutex dependency graph:";
pub trait Reportable: Clone {
/// Capture the current state
fn capture() -> Self;
/// Format a trace of state for human readable consumption.
fn panic_message(trace: &[Self]) -> Cow<'static, str>;
}
#[derive(Clone)]
pub struct MutexDep<T>(T);
/// Use a unit as tracing data: no tracing.
///
/// This should have no runtime overhead for capturing traces and should therefore be cheap enough
/// for most purposes.
impl Reportable for MutexDep<()> {
fn capture() -> Self {
Self(())
}
fn panic_message(_trace: &[Self]) -> Cow<'static, str> {
Cow::Borrowed(BASE_MESSAGE)
}
}
/// Use a full backtrace as tracing data
///
/// Capture the entire backtrace which may be expensive. This implementation does not force capture
/// in the event that backtraces are disabled at runtime, so the exact overhead can still be
/// controlled a little.
///
/// N.B. the [`Backtrace`] needs to be wrapped in an Arc as backtraces are not [`Clone`].
impl Reportable for MutexDep<Arc<Backtrace>> {
fn capture() -> Self {
Self(Arc::new(Backtrace::capture()))
}
fn panic_message(trace: &[Self]) -> Cow<'static, str> {
let mut message = format!("{BASE_MESSAGE}\n");
for entry in trace {
let _ = writeln!(message, "{}", entry.0);
}
message.into()
}
}

File diff suppressed because it is too large Load Diff