mirror of
https://github.com/bertptrs/tracing-mutex.git
synced 2025-12-27 21:40:32 +01:00
Compare commits
15 Commits
8feedb09d2
...
v0.3.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
43df59ac1c | ||
| 1fe44d0a05 | |||
| c9083c8bc1 | |||
|
|
d8c559fd3f | ||
| a8e8af6351 | |||
| 068303d81d | |||
| 6be3e05cab | |||
| 909e934572 | |||
|
|
0ae544a07a | ||
| 4148d509bf | |||
| fc1593b76f | |||
| 8f19921e9e | |||
| 00420d6807 | |||
| 49b15bb6bd | |||
| 29c9daf53e |
38
.github/workflows/ci.yml
vendored
38
.github/workflows/ci.yml
vendored
@@ -15,54 +15,34 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
rust:
|
||||
- "1.63" # minimum stable rust version
|
||||
- "1.70" # minimum stable rust version
|
||||
- stable
|
||||
- beta
|
||||
- nightly
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: ${{ matrix.rust }}
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
|
||||
- uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
# --all-targets ensures that we also build the benchmarks and tests already.
|
||||
args: --all-features --all-targets
|
||||
|
||||
- uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --all-features
|
||||
|
||||
- uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: fmt
|
||||
args: --all -- --check
|
||||
|
||||
- uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clippy
|
||||
args: --all-features --all-targets -- -D warnings
|
||||
- run: cargo build --all-features --all-targets
|
||||
- run: cargo test --all-features
|
||||
- run: cargo fmt --all -- --check
|
||||
- run: cargo clippy --all-features --all-targets -- -D warnings
|
||||
|
||||
docs:
|
||||
name: Documentation build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
- uses: dtolnay/rust-toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: nightly
|
||||
override: true
|
||||
|
||||
- name: Build documentation
|
||||
env:
|
||||
|
||||
18
CHANGELOG.md
18
CHANGELOG.md
@@ -6,15 +6,26 @@ adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.3.0] - 2023-09-09
|
||||
|
||||
### Added
|
||||
|
||||
- The minimum supported Rust version is now defined as 1.63. Previously it was undefined.
|
||||
- The minimum supported Rust version is now defined as 1.70. Previously it was undefined.
|
||||
- Wrappers for `std::sync` primitives can now be `const` constructed.
|
||||
- Add support for `std::sync::OnceLock`
|
||||
- Added backtraces of mutex allocations to the cycle report. Capturing backtraces does incur some
|
||||
overhead, this can be mitigated by disabling the `backtraces` feature which is enabled by default.
|
||||
|
||||
### Breaking
|
||||
|
||||
- Update [`parking_lot`][parking_lot] dependency to `0.12`.
|
||||
- Restructured the crate to reduce typename verbosity. For details, see: #25.
|
||||
- Restructured the crate to reduce typename verbosity. Wrapper names now match the name of the
|
||||
primitive they wrap. Specific always/debug tracing versions have now moved to separate modules.
|
||||
For example, `tracing_mutex::stdsync::TracingMutex` is now
|
||||
`tracing_mutex::stdsync::tracing::Mutex`, and `tracing_mutex::stdsync::DebugMutex` is now called
|
||||
`tracing_mutex::stdsync::Mutex`. This hopefully reduces the visual noise while reading code that
|
||||
uses this in practice. Unwrapped primitives are reexported under `tracing_mutex::stdsync::raw` for
|
||||
convenience.
|
||||
|
||||
### Fixed
|
||||
|
||||
@@ -82,7 +93,8 @@ adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
Initial release.
|
||||
|
||||
[Unreleased]: https://github.com/bertptrs/tracing-mutex/compare/v0.2.1...HEAD
|
||||
[Unreleased]: https://github.com/bertptrs/tracing-mutex/compare/v0.3.0...HEAD
|
||||
[0.3.0]: https://github.com/bertptrs/tracing-mutex/compare/v0.2.1...v0.3.0
|
||||
[0.2.1]: https://github.com/bertptrs/tracing-mutex/compare/v0.2.0...v0.2.1
|
||||
[0.2.0]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.2...v0.2.0
|
||||
[0.1.2]: https://github.com/bertptrs/tracing-mutex/compare/v0.1.1...v0.1.2
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tracing-mutex"
|
||||
version = "0.2.1"
|
||||
version = "0.3.0"
|
||||
authors = ["Bert Peters <bert@bertptrs.nl>"]
|
||||
edition = "2021"
|
||||
license = "MIT OR Apache-2.0"
|
||||
@@ -10,7 +10,7 @@ keywords = ["mutex", "rwlock", "once", "thread"]
|
||||
description = "Ensure deadlock-free mutexes by allocating in order, or else."
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/bertptrs/tracing-mutex"
|
||||
rust-version = "1.63"
|
||||
rust-version = "1.70"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
# Build docs for all features so the documentation is more complete
|
||||
@@ -19,12 +19,11 @@ all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[dependencies]
|
||||
lazy_static = "1"
|
||||
lock_api = { version = "0.4", optional = true }
|
||||
parking_lot = { version = "0.12", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.3"
|
||||
criterion = "0.5"
|
||||
rand = "0.8"
|
||||
|
||||
[[bench]]
|
||||
@@ -32,6 +31,8 @@ name = "mutex"
|
||||
harness = false
|
||||
|
||||
[features]
|
||||
default = ["backtraces"]
|
||||
backtraces = []
|
||||
# Feature names do not match crate names pending namespaced features.
|
||||
lockapi = ["lock_api"]
|
||||
parkinglot = ["parking_lot", "lockapi"]
|
||||
|
||||
@@ -61,7 +61,7 @@ performance penalty in your production environment, this library also offers deb
|
||||
when debug assertions are enabled, and to `Mutex` when they are not. Similar helper types are
|
||||
available for other synchronization primitives.
|
||||
|
||||
The minimum supported Rust version is 1.63. Increasing this is not considered a breaking change, but
|
||||
The minimum supported Rust version is 1.70. Increasing this is not considered a breaking change, but
|
||||
will be avoided within semver-compatible releases if possible.
|
||||
|
||||
### Features
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
status = [
|
||||
'Rust project (1.63)',
|
||||
'Rust project (1.70)',
|
||||
'Rust project (stable)',
|
||||
'Rust project (beta)',
|
||||
'Documentation build',
|
||||
|
||||
26
examples/mutex_cycle.rs
Normal file
26
examples/mutex_cycle.rs
Normal file
@@ -0,0 +1,26 @@
|
||||
//! Show what a crash looks like
|
||||
//!
|
||||
//! This shows what a traceback of a cycle detection looks like. It is expected to crash.
|
||||
use tracing_mutex::stdsync::Mutex;
|
||||
|
||||
fn main() {
|
||||
let a = Mutex::new(());
|
||||
let b = Mutex::new(());
|
||||
let c = Mutex::new(());
|
||||
|
||||
// Create an edge from a to b
|
||||
{
|
||||
let _a = a.lock();
|
||||
let _b = b.lock();
|
||||
}
|
||||
|
||||
// Create an edge from b to c
|
||||
{
|
||||
let _b = b.lock();
|
||||
let _c = c.lock();
|
||||
}
|
||||
|
||||
// Now crash by trying to add an edge from c to a
|
||||
let _c = c.lock();
|
||||
let _a = a.lock(); // This line will crash
|
||||
}
|
||||
134
src/graph.rs
134
src/graph.rs
@@ -1,4 +1,5 @@
|
||||
use std::cell::Cell;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::hash::Hash;
|
||||
@@ -19,23 +20,24 @@ type Order = usize;
|
||||
/// visibly changed.
|
||||
///
|
||||
/// [paper]: https://whileydave.com/publications/pk07_jea/
|
||||
#[derive(Default, Debug)]
|
||||
pub struct DiGraph<V>
|
||||
#[derive(Debug)]
|
||||
pub struct DiGraph<V, E>
|
||||
where
|
||||
V: Eq + Hash + Copy,
|
||||
{
|
||||
nodes: HashMap<V, Node<V>>,
|
||||
/// Next topological sort order
|
||||
next_ord: Order,
|
||||
nodes: HashMap<V, Node<V, E>>,
|
||||
// Instead of reordering the orders in the graph whenever a node is deleted, we maintain a list
|
||||
// of unused ids that can be handed out later again.
|
||||
unused_order: Vec<Order>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Node<V>
|
||||
struct Node<V, E>
|
||||
where
|
||||
V: Eq + Hash + Clone,
|
||||
{
|
||||
in_edges: HashSet<V>,
|
||||
out_edges: HashSet<V>,
|
||||
out_edges: HashMap<V, E>,
|
||||
// The "Ord" field is a Cell to ensure we can update it in an immutable context.
|
||||
// `std::collections::HashMap` doesn't let you have multiple mutable references to elements, but
|
||||
// this way we can use immutable references and still update `ord`. This saves quite a few
|
||||
@@ -43,7 +45,7 @@ where
|
||||
ord: Cell<Order>,
|
||||
}
|
||||
|
||||
impl<V> DiGraph<V>
|
||||
impl<V, E> DiGraph<V, E>
|
||||
where
|
||||
V: Eq + Hash + Copy,
|
||||
{
|
||||
@@ -54,12 +56,18 @@ where
|
||||
/// the node in the topological order.
|
||||
///
|
||||
/// New nodes are appended to the end of the topological order when added.
|
||||
fn add_node(&mut self, n: V) -> (&mut HashSet<V>, &mut HashSet<V>, Order) {
|
||||
let next_ord = &mut self.next_ord;
|
||||
fn add_node(&mut self, n: V) -> (&mut HashSet<V>, &mut HashMap<V, E>, Order) {
|
||||
// need to compute next id before the call to entry() to avoid duplicate borrow of nodes
|
||||
let fallback_id = self.nodes.len();
|
||||
|
||||
let node = self.nodes.entry(n).or_insert_with(|| {
|
||||
let order = *next_ord;
|
||||
*next_ord = next_ord.checked_add(1).expect("Topological order overflow");
|
||||
let order = if let Some(id) = self.unused_order.pop() {
|
||||
// Reuse discarded ordering entry
|
||||
id
|
||||
} else {
|
||||
// Allocate new order id
|
||||
fallback_id
|
||||
};
|
||||
|
||||
Node {
|
||||
ord: Cell::new(order),
|
||||
@@ -77,9 +85,12 @@ where
|
||||
Some(Node {
|
||||
out_edges,
|
||||
in_edges,
|
||||
..
|
||||
ord,
|
||||
}) => {
|
||||
out_edges.into_iter().for_each(|m| {
|
||||
// Return ordering to the pool of unused ones
|
||||
self.unused_order.push(ord.get());
|
||||
|
||||
out_edges.into_keys().for_each(|m| {
|
||||
self.nodes.get_mut(&m).unwrap().in_edges.remove(&n);
|
||||
});
|
||||
|
||||
@@ -96,18 +107,29 @@ where
|
||||
///
|
||||
/// Nodes, both from and to, are created as needed when creating new edges. If the new edge
|
||||
/// would introduce a cycle, the edge is rejected and `false` is returned.
|
||||
pub(crate) fn add_edge(&mut self, x: V, y: V) -> bool {
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If the edge would introduce the cycle, the underlying graph is not modified and a list of
|
||||
/// all the edge data in the would-be cycle is returned instead.
|
||||
pub(crate) fn add_edge(&mut self, x: V, y: V, e: impl FnOnce() -> E) -> Result<(), Vec<E>>
|
||||
where
|
||||
E: Clone,
|
||||
{
|
||||
if x == y {
|
||||
// self-edges are always considered cycles
|
||||
return false;
|
||||
return Err(Vec::new());
|
||||
}
|
||||
|
||||
let (_, out_edges, ub) = self.add_node(x);
|
||||
|
||||
if !out_edges.insert(y) {
|
||||
// Edge already exists, nothing to be done
|
||||
return true;
|
||||
}
|
||||
match out_edges.entry(y) {
|
||||
Entry::Occupied(_) => {
|
||||
// Edge already exists, nothing to be done
|
||||
return Ok(());
|
||||
}
|
||||
Entry::Vacant(entry) => entry.insert(e()),
|
||||
};
|
||||
|
||||
let (in_edges, _, lb) = self.add_node(y);
|
||||
|
||||
@@ -119,7 +141,7 @@ where
|
||||
let mut delta_f = Vec::new();
|
||||
let mut delta_b = Vec::new();
|
||||
|
||||
if !self.dfs_f(&self.nodes[&y], ub, &mut visited, &mut delta_f) {
|
||||
if let Err(cycle) = self.dfs_f(&self.nodes[&y], ub, &mut visited, &mut delta_f) {
|
||||
// This edge introduces a cycle, so we want to reject it and remove it from the
|
||||
// graph again to keep the "does not contain cycles" invariant.
|
||||
|
||||
@@ -129,7 +151,7 @@ where
|
||||
self.nodes.get_mut(&x).map(|node| node.out_edges.remove(&y));
|
||||
|
||||
// No edge was added
|
||||
return false;
|
||||
return Err(cycle);
|
||||
}
|
||||
|
||||
// No need to check as we should've found the cycle on the forward pass
|
||||
@@ -141,44 +163,49 @@ where
|
||||
self.reorder(delta_f, delta_b);
|
||||
}
|
||||
|
||||
true
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Forwards depth-first-search
|
||||
fn dfs_f<'a>(
|
||||
&'a self,
|
||||
n: &'a Node<V>,
|
||||
n: &'a Node<V, E>,
|
||||
ub: Order,
|
||||
visited: &mut HashSet<V>,
|
||||
delta_f: &mut Vec<&'a Node<V>>,
|
||||
) -> bool {
|
||||
delta_f: &mut Vec<&'a Node<V, E>>,
|
||||
) -> Result<(), Vec<E>>
|
||||
where
|
||||
E: Clone,
|
||||
{
|
||||
delta_f.push(n);
|
||||
|
||||
n.out_edges.iter().all(|w| {
|
||||
for (w, e) in &n.out_edges {
|
||||
let node = &self.nodes[w];
|
||||
let ord = node.ord.get();
|
||||
|
||||
if ord == ub {
|
||||
// Found a cycle
|
||||
false
|
||||
return Err(vec![e.clone()]);
|
||||
} else if !visited.contains(w) && ord < ub {
|
||||
// Need to check recursively
|
||||
visited.insert(*w);
|
||||
self.dfs_f(node, ub, visited, delta_f)
|
||||
} else {
|
||||
// Already seen this one or not interesting
|
||||
true
|
||||
if let Err(mut chain) = self.dfs_f(node, ub, visited, delta_f) {
|
||||
chain.push(e.clone());
|
||||
return Err(chain);
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Backwards depth-first-search
|
||||
fn dfs_b<'a>(
|
||||
&'a self,
|
||||
n: &'a Node<V>,
|
||||
n: &'a Node<V, E>,
|
||||
lb: Order,
|
||||
visited: &mut HashSet<V>,
|
||||
delta_b: &mut Vec<&'a Node<V>>,
|
||||
delta_b: &mut Vec<&'a Node<V, E>>,
|
||||
) {
|
||||
delta_b.push(n);
|
||||
|
||||
@@ -192,7 +219,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
fn reorder(&self, mut delta_f: Vec<&Node<V>>, mut delta_b: Vec<&Node<V>>) {
|
||||
fn reorder(&self, mut delta_f: Vec<&Node<V, E>>, mut delta_b: Vec<&Node<V, E>>) {
|
||||
self.sort(&mut delta_f);
|
||||
self.sort(&mut delta_b);
|
||||
|
||||
@@ -213,12 +240,25 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
fn sort(&self, ids: &mut [&Node<V>]) {
|
||||
fn sort(&self, ids: &mut [&Node<V, E>]) {
|
||||
// Can use unstable sort because mutex ids should not be equal
|
||||
ids.sort_unstable_by_key(|v| &v.ord);
|
||||
}
|
||||
}
|
||||
|
||||
// Manual `Default` impl as derive causes unnecessarily strong bounds.
|
||||
impl<V, E> Default for DiGraph<V, E>
|
||||
where
|
||||
V: Eq + Hash + Copy,
|
||||
{
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
nodes: Default::default(),
|
||||
unused_order: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use rand::seq::SliceRandom;
|
||||
@@ -226,12 +266,14 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
|
||||
fn nop() {}
|
||||
|
||||
#[test]
|
||||
fn test_no_self_cycle() {
|
||||
// Regression test for https://github.com/bertptrs/tracing-mutex/issues/7
|
||||
let mut graph = DiGraph::default();
|
||||
|
||||
assert!(!graph.add_edge(1, 1));
|
||||
assert!(graph.add_edge(1, 1, nop).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -239,16 +281,16 @@ mod tests {
|
||||
let mut graph = DiGraph::default();
|
||||
|
||||
// Add some safe edges
|
||||
assert!(graph.add_edge(0, 1));
|
||||
assert!(graph.add_edge(1, 2));
|
||||
assert!(graph.add_edge(2, 3));
|
||||
assert!(graph.add_edge(4, 2));
|
||||
assert!(graph.add_edge(0, 1, nop).is_ok());
|
||||
assert!(graph.add_edge(1, 2, nop).is_ok());
|
||||
assert!(graph.add_edge(2, 3, nop).is_ok());
|
||||
assert!(graph.add_edge(4, 2, nop).is_ok());
|
||||
|
||||
// Try to add an edge that introduces a cycle
|
||||
assert!(!graph.add_edge(3, 1));
|
||||
assert!(graph.add_edge(3, 1, nop).is_err());
|
||||
|
||||
// Add an edge that should reorder 0 to be after 4
|
||||
assert!(graph.add_edge(4, 0));
|
||||
assert!(graph.add_edge(4, 0, nop).is_ok());
|
||||
}
|
||||
|
||||
/// Fuzz the DiGraph implementation by adding a bunch of valid edges.
|
||||
@@ -256,7 +298,7 @@ mod tests {
|
||||
/// This test generates all possible forward edges in a 100-node graph consisting of natural
|
||||
/// numbers, shuffles them, then adds them to the graph. This will always be a valid directed,
|
||||
/// acyclic graph because there is a trivial order (the natural numbers) but because the edges
|
||||
/// are added in a random order the DiGraph will still occassionally need to reorder nodes.
|
||||
/// are added in a random order the DiGraph will still occasionally need to reorder nodes.
|
||||
#[test]
|
||||
fn fuzz_digraph() {
|
||||
// Note: this fuzzer is quadratic in the number of nodes, so this cannot be too large or it
|
||||
@@ -277,7 +319,7 @@ mod tests {
|
||||
let mut graph = DiGraph::default();
|
||||
|
||||
for (x, y) in edges {
|
||||
assert!(graph.add_edge(x, y));
|
||||
assert!(graph.add_edge(x, y, nop).is_ok());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
114
src/lib.rs
114
src/lib.rs
@@ -18,8 +18,23 @@
|
||||
//! # Structure
|
||||
//!
|
||||
//! Each module in this crate exposes wrappers for a specific base-mutex with dependency trakcing
|
||||
//! added. For now, that is limited to [`stdsync`] which provides wrappers for the base locks in the
|
||||
//! standard library. More back-ends may be added as features in the future.
|
||||
//! added. This includes [`stdsync`] which provides wrappers for the base locks in the standard
|
||||
//! library, and more depending on enabled compile-time features. More back-ends may be added as
|
||||
//! features in the future.
|
||||
//!
|
||||
//! # Feature flags
|
||||
//!
|
||||
//! `tracing-mutex` uses feature flags to reduce the impact of this crate on both your compile time
|
||||
//! and runtime overhead. Below are the available flags. Modules are annotated with the features
|
||||
//! they require.
|
||||
//!
|
||||
//! - `backtraces`: Enables capturing backtraces of mutex dependencies, to make it easier to
|
||||
//! determine what sequence of events would trigger a deadlock. This is enabled by default, but if
|
||||
//! the performance overhead is unaccceptable, it can be disabled by disabling default features.
|
||||
//!
|
||||
//! - `lockapi`: Enables the wrapper lock for [`lock_api`][lock_api] locks
|
||||
//!
|
||||
//! - `parkinglot`: Enables wrapper types for [`parking_lot`][parking_lot] mutexes
|
||||
//!
|
||||
//! # Performance considerations
|
||||
//!
|
||||
@@ -44,29 +59,34 @@
|
||||
//! (such as [`stdsync::Mutex`]) which evaluate to a tracing mutex when debug assertions are
|
||||
//! enabled, and to the underlying mutex when they're not.
|
||||
//!
|
||||
//! For ease of debugging, this crate will, by default, capture a backtrace when establishing a new
|
||||
//! dependency between two mutexes. This has an additional overhead of over 60%. If this additional
|
||||
//! debugging aid is not required, it can be disabled by disabling default features.
|
||||
//!
|
||||
//! [paper]: https://whileydave.com/publications/pk07_jea/
|
||||
//! [lock_api]: https://docs.rs/lock_api/0.4/lock_api/index.html
|
||||
//! [parking_lot]: https://docs.rs/parking_lot/0.12.1/parking_lot/
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
use std::cell::RefCell;
|
||||
use std::cell::UnsafeCell;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem::MaybeUninit;
|
||||
use std::ops::Deref;
|
||||
use std::ops::DerefMut;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::MutexGuard;
|
||||
use std::sync::Once;
|
||||
use std::sync::OnceLock;
|
||||
use std::sync::PoisonError;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
#[cfg(feature = "lockapi")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "lockapi")))]
|
||||
pub use lock_api;
|
||||
#[cfg(feature = "parkinglot")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "parkinglot")))]
|
||||
pub use parking_lot;
|
||||
use reporting::Dep;
|
||||
use reporting::Reportable;
|
||||
|
||||
use crate::graph::DiGraph;
|
||||
|
||||
@@ -77,13 +97,9 @@ pub mod lockapi;
|
||||
#[cfg(feature = "parkinglot")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "parkinglot")))]
|
||||
pub mod parkinglot;
|
||||
mod reporting;
|
||||
pub mod stdsync;
|
||||
|
||||
/// Counter for Mutex IDs. Atomic avoids the need for locking.
|
||||
///
|
||||
/// Should be part of the `MutexID` impl but static items are not yet a thing.
|
||||
static ID_SEQUENCE: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
thread_local! {
|
||||
/// Stack to track which locks are held
|
||||
///
|
||||
@@ -92,10 +108,6 @@ thread_local! {
|
||||
static HELD_LOCKS: RefCell<Vec<usize>> = RefCell::new(Vec::new());
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref DEPENDENCY_GRAPH: Mutex<DiGraph<usize>> = Default::default();
|
||||
}
|
||||
|
||||
/// Dedicated ID type for Mutexes
|
||||
///
|
||||
/// # Unstable
|
||||
@@ -114,6 +126,9 @@ impl MutexId {
|
||||
/// This function may panic when there are no more mutex IDs available. The number of mutex ids
|
||||
/// is `usize::MAX - 1` which should be plenty for most practical applications.
|
||||
pub fn new() -> Self {
|
||||
// Counter for Mutex IDs. Atomic avoids the need for locking.
|
||||
static ID_SEQUENCE: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
ID_SEQUENCE
|
||||
.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |id| id.checked_add(1))
|
||||
.map(Self)
|
||||
@@ -146,19 +161,18 @@ impl MutexId {
|
||||
///
|
||||
/// This method panics if the new dependency would introduce a cycle.
|
||||
pub fn mark_held(&self) {
|
||||
let creates_cycle = HELD_LOCKS.with(|locks| {
|
||||
let opt_cycle = HELD_LOCKS.with(|locks| {
|
||||
if let Some(&previous) = locks.borrow().last() {
|
||||
let mut graph = get_dependency_graph();
|
||||
|
||||
!graph.add_edge(previous, self.value())
|
||||
graph.add_edge(previous, self.value(), Dep::capture).err()
|
||||
} else {
|
||||
false
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
if creates_cycle {
|
||||
// Panic without holding the lock to avoid needlessly poisoning it
|
||||
panic!("Mutex order graph should not have cycles");
|
||||
if let Some(cycle) = opt_cycle {
|
||||
panic!("{}", Dep::panic_message(&cycle))
|
||||
}
|
||||
|
||||
HELD_LOCKS.with(|locks| locks.borrow_mut().push(self.value()));
|
||||
@@ -207,17 +221,13 @@ impl Drop for MutexId {
|
||||
///
|
||||
/// This type can be largely replaced once std::lazy gets stabilized.
|
||||
struct LazyMutexId {
|
||||
inner: UnsafeCell<MaybeUninit<MutexId>>,
|
||||
setter: Once,
|
||||
_marker: PhantomData<MutexId>,
|
||||
inner: OnceLock<MutexId>,
|
||||
}
|
||||
|
||||
impl LazyMutexId {
|
||||
pub const fn new() -> Self {
|
||||
Self {
|
||||
inner: UnsafeCell::new(MaybeUninit::uninit()),
|
||||
setter: Once::new(),
|
||||
_marker: PhantomData,
|
||||
inner: OnceLock::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -234,44 +244,11 @@ impl Default for LazyMutexId {
|
||||
}
|
||||
}
|
||||
|
||||
/// Safety: the UnsafeCell is guaranteed to only be accessed mutably from a `Once`.
|
||||
unsafe impl Sync for LazyMutexId {}
|
||||
|
||||
impl Deref for LazyMutexId {
|
||||
type Target = MutexId;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.setter.call_once(|| {
|
||||
// Safety: this function is only called once, so only one mutable reference should exist
|
||||
// at a time.
|
||||
unsafe {
|
||||
*self.inner.get() = MaybeUninit::new(MutexId::new());
|
||||
}
|
||||
});
|
||||
|
||||
// Safety: after the above Once runs, there are no longer any mutable references, so we can
|
||||
// hand this out safely.
|
||||
//
|
||||
// Explanation of this monstrosity:
|
||||
//
|
||||
// - Get a pointer to the data from the UnsafeCell
|
||||
// - Dereference that to get a reference to the underlying MaybeUninit
|
||||
// - Use as_ptr on MaybeUninit to get a pointer to the initialized MutexID
|
||||
// - Dereference the pointer to turn in into a reference as intended.
|
||||
//
|
||||
// This should get slightly nicer once `maybe_uninit_extra` is stabilized.
|
||||
unsafe { &*((*self.inner.get()).as_ptr()) }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for LazyMutexId {
|
||||
fn drop(&mut self) {
|
||||
if self.setter.is_completed() {
|
||||
// We have a valid mutex ID and need to drop it
|
||||
|
||||
// Safety: we know that this pointer is valid because the initializer has successfully run.
|
||||
unsafe { (*self.inner.get()).assume_init_drop() };
|
||||
}
|
||||
self.inner.get_or_init(MutexId::new)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -306,8 +283,11 @@ impl<'a> Drop for BorrowedMutex<'a> {
|
||||
}
|
||||
|
||||
/// Get a reference to the current dependency graph
|
||||
fn get_dependency_graph() -> impl DerefMut<Target = DiGraph<usize>> {
|
||||
fn get_dependency_graph() -> impl DerefMut<Target = DiGraph<usize, Dep>> {
|
||||
static DEPENDENCY_GRAPH: OnceLock<Mutex<DiGraph<usize, Dep>>> = OnceLock::new();
|
||||
|
||||
DEPENDENCY_GRAPH
|
||||
.get_or_init(Default::default)
|
||||
.lock()
|
||||
.unwrap_or_else(PoisonError::into_inner)
|
||||
}
|
||||
@@ -335,11 +315,11 @@ mod tests {
|
||||
let c = LazyMutexId::new();
|
||||
|
||||
let mut graph = get_dependency_graph();
|
||||
assert!(graph.add_edge(a.value(), b.value()));
|
||||
assert!(graph.add_edge(b.value(), c.value()));
|
||||
assert!(graph.add_edge(a.value(), b.value(), Dep::capture).is_ok());
|
||||
assert!(graph.add_edge(b.value(), c.value(), Dep::capture).is_ok());
|
||||
|
||||
// Creating an edge c → a should fail as it introduces a cycle.
|
||||
assert!(!graph.add_edge(c.value(), a.value()));
|
||||
assert!(graph.add_edge(c.value(), a.value(), Dep::capture).is_err());
|
||||
|
||||
// Drop graph handle so we can drop vertices without deadlocking
|
||||
drop(graph);
|
||||
@@ -347,7 +327,9 @@ mod tests {
|
||||
drop(b);
|
||||
|
||||
// If b's destructor correctly ran correctly we can now add an edge from c to a.
|
||||
assert!(get_dependency_graph().add_edge(c.value(), a.value()));
|
||||
assert!(get_dependency_graph()
|
||||
.add_edge(c.value(), a.value(), Dep::capture)
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
/// Test creating a cycle, then panicking.
|
||||
|
||||
64
src/reporting.rs
Normal file
64
src/reporting.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
//! Cycle reporting primitives
|
||||
//!
|
||||
//! This module exposes [`Dep`], which resolves to either something that tracks dependencies or to
|
||||
//! something that doesn't. It should only be assumed to implement the [`Reportable`] trait.
|
||||
use std::backtrace::Backtrace;
|
||||
use std::borrow::Cow;
|
||||
use std::fmt::Write;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[cfg(feature = "backtraces")]
|
||||
pub type Dep = MutexDep<Arc<Backtrace>>;
|
||||
#[cfg(not(feature = "backtraces"))]
|
||||
pub type Dep = MutexDep<()>;
|
||||
|
||||
// Base message to be reported when cycle is detected
|
||||
const BASE_MESSAGE: &str = "Found cycle in mutex dependency graph:";
|
||||
|
||||
pub trait Reportable: Clone {
|
||||
/// Capture the current state
|
||||
fn capture() -> Self;
|
||||
|
||||
/// Format a trace of state for human readable consumption.
|
||||
fn panic_message(trace: &[Self]) -> Cow<'static, str>;
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct MutexDep<T>(T);
|
||||
|
||||
/// Use a unit as tracing data: no tracing.
|
||||
///
|
||||
/// This should have no runtime overhead for capturing traces and should therefore be cheap enough
|
||||
/// for most purposes.
|
||||
impl Reportable for MutexDep<()> {
|
||||
fn capture() -> Self {
|
||||
Self(())
|
||||
}
|
||||
|
||||
fn panic_message(_trace: &[Self]) -> Cow<'static, str> {
|
||||
Cow::Borrowed(BASE_MESSAGE)
|
||||
}
|
||||
}
|
||||
|
||||
/// Use a full backtrace as tracing data
|
||||
///
|
||||
/// Capture the entire backtrace which may be expensive. This implementation does not force capture
|
||||
/// in the event that backtraces are disabled at runtime, so the exact overhead can still be
|
||||
/// controlled a little.
|
||||
///
|
||||
/// N.B. the [`Backtrace`] needs to be wrapped in an Arc as backtraces are not [`Clone`].
|
||||
impl Reportable for MutexDep<Arc<Backtrace>> {
|
||||
fn capture() -> Self {
|
||||
Self(Arc::new(Backtrace::capture()))
|
||||
}
|
||||
|
||||
fn panic_message(trace: &[Self]) -> Cow<'static, str> {
|
||||
let mut message = format!("{BASE_MESSAGE}\n");
|
||||
|
||||
for entry in trace {
|
||||
let _ = writeln!(message, "{}", entry.0);
|
||||
}
|
||||
|
||||
message.into()
|
||||
}
|
||||
}
|
||||
150
src/stdsync.rs
150
src/stdsync.rs
@@ -21,10 +21,14 @@
|
||||
pub use std::sync as raw;
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
pub use std::sync::{Condvar, Mutex, MutexGuard, Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||
pub use std::sync::{
|
||||
Condvar, Mutex, MutexGuard, Once, OnceLock, RwLock, RwLockReadGuard, RwLockWriteGuard,
|
||||
};
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
pub use tracing::{Condvar, Mutex, MutexGuard, Once, RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||
pub use tracing::{
|
||||
Condvar, Mutex, MutexGuard, Once, OnceLock, RwLock, RwLockReadGuard, RwLockWriteGuard,
|
||||
};
|
||||
|
||||
/// Dependency tracing versions of [`std::sync`].
|
||||
pub mod tracing {
|
||||
@@ -427,8 +431,8 @@ pub mod tracing {
|
||||
|
||||
/// Wrapper around [`std::sync::Once`].
|
||||
///
|
||||
/// Refer to the [crate-level][`crate`] documentaiton for the differences between this struct and
|
||||
/// the one it wraps.
|
||||
/// Refer to the [crate-level][`crate`] documentaiton for the differences between this struct
|
||||
/// and the one it wraps.
|
||||
#[derive(Debug)]
|
||||
pub struct Once {
|
||||
inner: sync::Once,
|
||||
@@ -479,6 +483,142 @@ pub mod tracing {
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper for [`std::sync::OnceLock`]
|
||||
///
|
||||
/// The exact locking behaviour of [`std::sync::OnceLock`] is currently undefined, but may
|
||||
/// deadlock in the event of reentrant initialization attempts. This wrapper participates in
|
||||
/// cycle detection as normal and will therefore panic in the event of reentrancy.
|
||||
///
|
||||
/// Most of this primitive's methods do not involve locking and as such are simply passed
|
||||
/// through to the inner implementation.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use tracing_mutex::stdsync::tracing::OnceLock;
|
||||
///
|
||||
/// static LOCK: OnceLock<i32> = OnceLock::new();
|
||||
/// assert!(LOCK.get().is_none());
|
||||
///
|
||||
/// std::thread::spawn(|| {
|
||||
/// let value: &i32 = LOCK.get_or_init(|| 42);
|
||||
/// assert_eq!(value, &42);
|
||||
/// }).join().unwrap();
|
||||
///
|
||||
/// let value: Option<&i32> = LOCK.get();
|
||||
/// assert_eq!(value, Some(&42));
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
pub struct OnceLock<T> {
|
||||
id: LazyMutexId,
|
||||
inner: sync::OnceLock<T>,
|
||||
}
|
||||
|
||||
// N.B. this impl inlines everything that directly calls the inner implementation as there
|
||||
// should be 0 overhead to doing so.
|
||||
impl<T> OnceLock<T> {
|
||||
/// Creates a new empty cell
|
||||
pub const fn new() -> Self {
|
||||
Self {
|
||||
id: LazyMutexId::new(),
|
||||
inner: sync::OnceLock::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets a reference to the underlying value.
|
||||
///
|
||||
/// This method does not attempt to lock and therefore does not participate in cycle
|
||||
/// detection.
|
||||
#[inline]
|
||||
pub fn get(&self) -> Option<&T> {
|
||||
self.inner.get()
|
||||
}
|
||||
|
||||
/// Gets a mutable reference to the underlying value.
|
||||
///
|
||||
/// This method does not attempt to lock and therefore does not participate in cycle
|
||||
/// detection.
|
||||
#[inline]
|
||||
pub fn get_mut(&mut self) -> Option<&mut T> {
|
||||
self.inner.get_mut()
|
||||
}
|
||||
|
||||
/// Sets the contents of this cell to the underlying value
|
||||
///
|
||||
/// As this method may block until initialization is complete, it participates in cycle
|
||||
/// detection.
|
||||
pub fn set(&self, value: T) -> Result<(), T> {
|
||||
let _guard = self.id.get_borrowed();
|
||||
|
||||
self.inner.set(value)
|
||||
}
|
||||
|
||||
/// Gets the contents of the cell, initializing it with `f` if the cell was empty.
|
||||
///
|
||||
/// This method participates in cycle detection. Reentrancy is considered a cycle.
|
||||
pub fn get_or_init<F>(&self, f: F) -> &T
|
||||
where
|
||||
F: FnOnce() -> T,
|
||||
{
|
||||
let _guard = self.id.get_borrowed();
|
||||
self.inner.get_or_init(f)
|
||||
}
|
||||
|
||||
/// Takes the value out of this `OnceLock`, moving it back to an uninitialized state.
|
||||
///
|
||||
/// This method does not attempt to lock and therefore does not participate in cycle
|
||||
/// detection.
|
||||
#[inline]
|
||||
pub fn take(&mut self) -> Option<T> {
|
||||
self.inner.take()
|
||||
}
|
||||
|
||||
/// Consumes the `OnceLock`, returning the wrapped value. Returns None if the cell was
|
||||
/// empty.
|
||||
///
|
||||
/// This method does not attempt to lock and therefore does not participate in cycle
|
||||
/// detection.
|
||||
#[inline]
|
||||
pub fn into_inner(mut self) -> Option<T> {
|
||||
self.take()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for OnceLock<T> {
|
||||
#[inline]
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: PartialEq> PartialEq for OnceLock<T> {
|
||||
#[inline]
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.inner == other.inner
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Eq> Eq for OnceLock<T> {}
|
||||
|
||||
impl<T: Clone> Clone for OnceLock<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
id: LazyMutexId::new(),
|
||||
inner: self.inner.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<T> for OnceLock<T> {
|
||||
#[inline]
|
||||
fn from(value: T) -> Self {
|
||||
Self {
|
||||
id: LazyMutexId::new(),
|
||||
inner: sync::OnceLock::from(value),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
@@ -556,7 +696,7 @@ pub mod tracing {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "Mutex order graph should not have cycles")]
|
||||
#[should_panic(expected = "Found cycle in mutex dependency graph")]
|
||||
fn test_detect_cycle() {
|
||||
let a = Mutex::new(());
|
||||
let b = Mutex::new(());
|
||||
|
||||
Reference in New Issue
Block a user