32: Capture backtraces for mutex dependencies r=bertptrs a=bertptrs

Builds on top of #28.

This PR adds backtrace data to the dependency graph, so you can figure out what series of events might have introduced the cycle in dependencies. Only the first backtrace

These changes do have a performance penalty, with a worst case of 20-50% degradation over previous results. This applies to the worst case scenario where every dependency between mutexes is new and thus is unlikely to be as severe.

Below is an example of what this can look like, generated with `examples/mutex_cycle.rs`. The formatting is decidedly suboptimal but backtraces cannot be formatted very well in stable rust at the moment. The exact performance hit depends on a lot of things, such as the level of backtraces captured (off, 1, or full), and how many dependencies are involved.

```
thread 'main' panicked at 'Found cycle in mutex dependency graph:
   0: tracing_mutex::MutexDep::capture
             at ./src/lib.rs:278:23
   1: core::ops::function::FnOnce::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:250:5
   2: tracing_mutex::graph::DiGraph<V,E>::add_edge
             at ./src/graph.rs:131:50
   3: tracing_mutex::MutexId::mark_held::{{closure}}
             at ./src/lib.rs:146:17
   4: std:🧵:local::LocalKey<T>::try_with
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/thread/local.rs:270:16
   5: std:🧵:local::LocalKey<T>::with
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/thread/local.rs:246:9
   6: tracing_mutex::MutexId::mark_held
             at ./src/lib.rs:142:25
   7: tracing_mutex::MutexId::get_borrowed
             at ./src/lib.rs:129:9
   8: tracing_mutex::stdsync::tracing::Mutex<T>::lock
             at ./src/stdsync.rs:110:25
   9: mutex_cycle::main
             at ./examples/mutex_cycle.rs:20:18
  10: core::ops::function::FnOnce::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:250:5
  11: std::sys_common::backtrace::__rust_begin_short_backtrace
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/sys_common/backtrace.rs:135:18
  12: std::rt::lang_start::{{closure}}
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:166:18
  13: core::ops::function::impls::<impl core::ops::function::FnOnce<A> for &F>::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:284:13
  14: std::panicking::try::do_call
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:500:40
  15: std::panicking::try
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:464:19
  16: std::panic::catch_unwind
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panic.rs:142:14
  17: std::rt::lang_start_internal::{{closure}}
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:148:48
  18: std::panicking::try::do_call
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:500:40
  19: std::panicking::try
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:464:19
  20: std::panic::catch_unwind
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panic.rs:142:14
  21: std::rt::lang_start_internal
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:148:20
  22: std::rt::lang_start
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:165:17
  23: main
  24: <unknown>
  25: __libc_start_main
  26: _start

   0: tracing_mutex::MutexDep::capture
             at ./src/lib.rs:278:23
   1: core::ops::function::FnOnce::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:250:5
   2: tracing_mutex::graph::DiGraph<V,E>::add_edge
             at ./src/graph.rs:131:50
   3: tracing_mutex::MutexId::mark_held::{{closure}}
             at ./src/lib.rs:146:17
   4: std:🧵:local::LocalKey<T>::try_with
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/thread/local.rs:270:16
   5: std:🧵:local::LocalKey<T>::with
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/thread/local.rs:246:9
   6: tracing_mutex::MutexId::mark_held
             at ./src/lib.rs:142:25
   7: tracing_mutex::MutexId::get_borrowed
             at ./src/lib.rs:129:9
   8: tracing_mutex::stdsync::tracing::Mutex<T>::lock
             at ./src/stdsync.rs:110:25
   9: mutex_cycle::main
             at ./examples/mutex_cycle.rs:14:18
  10: core::ops::function::FnOnce::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:250:5
  11: std::sys_common::backtrace::__rust_begin_short_backtrace
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/sys_common/backtrace.rs:135:18
  12: std::rt::lang_start::{{closure}}
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:166:18
  13: core::ops::function::impls::<impl core::ops::function::FnOnce<A> for &F>::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:284:13
  14: std::panicking::try::do_call
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:500:40
  15: std::panicking::try
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:464:19
  16: std::panic::catch_unwind
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panic.rs:142:14
  17: std::rt::lang_start_internal::{{closure}}
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:148:48
  18: std::panicking::try::do_call
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:500:40
  19: std::panicking::try
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:464:19
  20: std::panic::catch_unwind
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panic.rs:142:14
  21: std::rt::lang_start_internal
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:148:20
  22: std::rt::lang_start
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/rt.rs:165:17
  23: main
  24: <unknown>
  25: __libc_start_main
  26: _start

', src/lib.rs:163:13
stack backtrace:
   0: rust_begin_unwind
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/std/src/panicking.rs:593:5
   1: core::panicking::panic_fmt
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/panicking.rs:67:14
   2: tracing_mutex::MutexId::mark_held
             at ./src/lib.rs:163:13
   3: tracing_mutex::MutexId::get_borrowed
             at ./src/lib.rs:129:9
   4: tracing_mutex::stdsync::tracing::Mutex<T>::lock
             at ./src/stdsync.rs:110:25
   5: mutex_cycle::main
             at ./examples/mutex_cycle.rs:25:14
   6: core::ops::function::FnOnce::call_once
             at /rustc/eb26296b556cef10fb713a38f3d16b9886080f26/library/core/src/ops/function.rs:250:5
```

Importantly, the error shows all the dependencies that are already part of the graph, not the one that was just added, since that is already visible from the immediate panic.

Co-authored-by: Bert Peters <bert@bertptrs.nl>
This commit is contained in:
bors[bot]
2023-09-09 09:24:46 +00:00
committed by GitHub
7 changed files with 198 additions and 59 deletions

View File

@@ -11,6 +11,7 @@ adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
- The minimum supported Rust version is now defined as 1.70. Previously it was undefined. - The minimum supported Rust version is now defined as 1.70. Previously it was undefined.
- Wrappers for `std::sync` primitives can now be `const` constructed. - Wrappers for `std::sync` primitives can now be `const` constructed.
- Add support for `std::sync::OnceLock` - Add support for `std::sync::OnceLock`
- Added backtraces of mutex allocations to the cycle report.
### Breaking ### Breaking

View File

@@ -31,6 +31,8 @@ name = "mutex"
harness = false harness = false
[features] [features]
default = ["backtraces"]
backtraces = []
# Feature names do not match crate names pending namespaced features. # Feature names do not match crate names pending namespaced features.
lockapi = ["lock_api"] lockapi = ["lock_api"]
parkinglot = ["parking_lot", "lockapi"] parkinglot = ["parking_lot", "lockapi"]

26
examples/mutex_cycle.rs Normal file
View File

@@ -0,0 +1,26 @@
//! Show what a crash looks like
//!
//! This shows what a traceback of a cycle detection looks like. It is expected to crash.
use tracing_mutex::stdsync::Mutex;
fn main() {
let a = Mutex::new(());
let b = Mutex::new(());
let c = Mutex::new(());
// Create an edge from a to b
{
let _a = a.lock();
let _b = b.lock();
}
// Create an edge from b to c
{
let _b = b.lock();
let _c = c.lock();
}
// Now crash by trying to add an edge from c to a
let _c = c.lock();
let _a = a.lock(); // This line will crash
}

View File

@@ -1,4 +1,5 @@
use std::cell::Cell; use std::cell::Cell;
use std::collections::hash_map::Entry;
use std::collections::HashMap; use std::collections::HashMap;
use std::collections::HashSet; use std::collections::HashSet;
use std::hash::Hash; use std::hash::Hash;
@@ -19,23 +20,24 @@ type Order = usize;
/// visibly changed. /// visibly changed.
/// ///
/// [paper]: https://whileydave.com/publications/pk07_jea/ /// [paper]: https://whileydave.com/publications/pk07_jea/
#[derive(Default, Debug)] #[derive(Debug)]
pub struct DiGraph<V> pub struct DiGraph<V, E>
where where
V: Eq + Hash + Copy, V: Eq + Hash + Copy,
{ {
nodes: HashMap<V, Node<V>>, nodes: HashMap<V, Node<V, E>>,
/// Next topological sort order // Instead of reordering the orders in the graph whenever a node is deleted, we maintain a list
next_ord: Order, // of unused ids that can be handed out later again.
unused_order: Vec<Order>,
} }
#[derive(Debug)] #[derive(Debug)]
struct Node<V> struct Node<V, E>
where where
V: Eq + Hash + Clone, V: Eq + Hash + Clone,
{ {
in_edges: HashSet<V>, in_edges: HashSet<V>,
out_edges: HashSet<V>, out_edges: HashMap<V, E>,
// The "Ord" field is a Cell to ensure we can update it in an immutable context. // The "Ord" field is a Cell to ensure we can update it in an immutable context.
// `std::collections::HashMap` doesn't let you have multiple mutable references to elements, but // `std::collections::HashMap` doesn't let you have multiple mutable references to elements, but
// this way we can use immutable references and still update `ord`. This saves quite a few // this way we can use immutable references and still update `ord`. This saves quite a few
@@ -43,7 +45,7 @@ where
ord: Cell<Order>, ord: Cell<Order>,
} }
impl<V> DiGraph<V> impl<V, E> DiGraph<V, E>
where where
V: Eq + Hash + Copy, V: Eq + Hash + Copy,
{ {
@@ -54,12 +56,18 @@ where
/// the node in the topological order. /// the node in the topological order.
/// ///
/// New nodes are appended to the end of the topological order when added. /// New nodes are appended to the end of the topological order when added.
fn add_node(&mut self, n: V) -> (&mut HashSet<V>, &mut HashSet<V>, Order) { fn add_node(&mut self, n: V) -> (&mut HashSet<V>, &mut HashMap<V, E>, Order) {
let next_ord = &mut self.next_ord; // need to compute next id before the call to entry() to avoid duplicate borrow of nodes
let fallback_id = self.nodes.len();
let node = self.nodes.entry(n).or_insert_with(|| { let node = self.nodes.entry(n).or_insert_with(|| {
let order = *next_ord; let order = if let Some(id) = self.unused_order.pop() {
*next_ord = next_ord.checked_add(1).expect("Topological order overflow"); // Reuse discarded ordering entry
id
} else {
// Allocate new order id
fallback_id
};
Node { Node {
ord: Cell::new(order), ord: Cell::new(order),
@@ -77,9 +85,12 @@ where
Some(Node { Some(Node {
out_edges, out_edges,
in_edges, in_edges,
.. ord,
}) => { }) => {
out_edges.into_iter().for_each(|m| { // Return ordering to the pool of unused ones
self.unused_order.push(ord.get());
out_edges.into_keys().for_each(|m| {
self.nodes.get_mut(&m).unwrap().in_edges.remove(&n); self.nodes.get_mut(&m).unwrap().in_edges.remove(&n);
}); });
@@ -96,18 +107,29 @@ where
/// ///
/// Nodes, both from and to, are created as needed when creating new edges. If the new edge /// Nodes, both from and to, are created as needed when creating new edges. If the new edge
/// would introduce a cycle, the edge is rejected and `false` is returned. /// would introduce a cycle, the edge is rejected and `false` is returned.
pub(crate) fn add_edge(&mut self, x: V, y: V) -> bool { ///
/// # Errors
///
/// If the edge would introduce the cycle, the underlying graph is not modified and a list of
/// all the edge data in the would-be cycle is returned instead.
pub(crate) fn add_edge(&mut self, x: V, y: V, e: impl FnOnce() -> E) -> Result<(), Vec<E>>
where
E: Clone,
{
if x == y { if x == y {
// self-edges are always considered cycles // self-edges are always considered cycles
return false; return Err(Vec::new());
} }
let (_, out_edges, ub) = self.add_node(x); let (_, out_edges, ub) = self.add_node(x);
if !out_edges.insert(y) { match out_edges.entry(y) {
Entry::Occupied(_) => {
// Edge already exists, nothing to be done // Edge already exists, nothing to be done
return true; return Ok(());
} }
Entry::Vacant(entry) => entry.insert(e()),
};
let (in_edges, _, lb) = self.add_node(y); let (in_edges, _, lb) = self.add_node(y);
@@ -119,7 +141,7 @@ where
let mut delta_f = Vec::new(); let mut delta_f = Vec::new();
let mut delta_b = Vec::new(); let mut delta_b = Vec::new();
if !self.dfs_f(&self.nodes[&y], ub, &mut visited, &mut delta_f) { if let Err(cycle) = self.dfs_f(&self.nodes[&y], ub, &mut visited, &mut delta_f) {
// This edge introduces a cycle, so we want to reject it and remove it from the // This edge introduces a cycle, so we want to reject it and remove it from the
// graph again to keep the "does not contain cycles" invariant. // graph again to keep the "does not contain cycles" invariant.
@@ -129,7 +151,7 @@ where
self.nodes.get_mut(&x).map(|node| node.out_edges.remove(&y)); self.nodes.get_mut(&x).map(|node| node.out_edges.remove(&y));
// No edge was added // No edge was added
return false; return Err(cycle);
} }
// No need to check as we should've found the cycle on the forward pass // No need to check as we should've found the cycle on the forward pass
@@ -141,44 +163,49 @@ where
self.reorder(delta_f, delta_b); self.reorder(delta_f, delta_b);
} }
true Ok(())
} }
/// Forwards depth-first-search /// Forwards depth-first-search
fn dfs_f<'a>( fn dfs_f<'a>(
&'a self, &'a self,
n: &'a Node<V>, n: &'a Node<V, E>,
ub: Order, ub: Order,
visited: &mut HashSet<V>, visited: &mut HashSet<V>,
delta_f: &mut Vec<&'a Node<V>>, delta_f: &mut Vec<&'a Node<V, E>>,
) -> bool { ) -> Result<(), Vec<E>>
where
E: Clone,
{
delta_f.push(n); delta_f.push(n);
n.out_edges.iter().all(|w| { for (w, e) in &n.out_edges {
let node = &self.nodes[w]; let node = &self.nodes[w];
let ord = node.ord.get(); let ord = node.ord.get();
if ord == ub { if ord == ub {
// Found a cycle // Found a cycle
false return Err(vec![e.clone()]);
} else if !visited.contains(w) && ord < ub { } else if !visited.contains(w) && ord < ub {
// Need to check recursively // Need to check recursively
visited.insert(*w); visited.insert(*w);
self.dfs_f(node, ub, visited, delta_f) if let Err(mut chain) = self.dfs_f(node, ub, visited, delta_f) {
} else { chain.push(e.clone());
// Already seen this one or not interesting return Err(chain);
true
} }
}) }
}
Ok(())
} }
/// Backwards depth-first-search /// Backwards depth-first-search
fn dfs_b<'a>( fn dfs_b<'a>(
&'a self, &'a self,
n: &'a Node<V>, n: &'a Node<V, E>,
lb: Order, lb: Order,
visited: &mut HashSet<V>, visited: &mut HashSet<V>,
delta_b: &mut Vec<&'a Node<V>>, delta_b: &mut Vec<&'a Node<V, E>>,
) { ) {
delta_b.push(n); delta_b.push(n);
@@ -192,7 +219,7 @@ where
} }
} }
fn reorder(&self, mut delta_f: Vec<&Node<V>>, mut delta_b: Vec<&Node<V>>) { fn reorder(&self, mut delta_f: Vec<&Node<V, E>>, mut delta_b: Vec<&Node<V, E>>) {
self.sort(&mut delta_f); self.sort(&mut delta_f);
self.sort(&mut delta_b); self.sort(&mut delta_b);
@@ -213,12 +240,25 @@ where
} }
} }
fn sort(&self, ids: &mut [&Node<V>]) { fn sort(&self, ids: &mut [&Node<V, E>]) {
// Can use unstable sort because mutex ids should not be equal // Can use unstable sort because mutex ids should not be equal
ids.sort_unstable_by_key(|v| &v.ord); ids.sort_unstable_by_key(|v| &v.ord);
} }
} }
// Manual `Default` impl as derive causes unnecessarily strong bounds.
impl<V, E> Default for DiGraph<V, E>
where
V: Eq + Hash + Copy,
{
fn default() -> Self {
Self {
nodes: Default::default(),
unused_order: Default::default(),
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use rand::seq::SliceRandom; use rand::seq::SliceRandom;
@@ -226,12 +266,14 @@ mod tests {
use super::*; use super::*;
fn nop() {}
#[test] #[test]
fn test_no_self_cycle() { fn test_no_self_cycle() {
// Regression test for https://github.com/bertptrs/tracing-mutex/issues/7 // Regression test for https://github.com/bertptrs/tracing-mutex/issues/7
let mut graph = DiGraph::default(); let mut graph = DiGraph::default();
assert!(!graph.add_edge(1, 1)); assert!(graph.add_edge(1, 1, nop).is_err());
} }
#[test] #[test]
@@ -239,16 +281,16 @@ mod tests {
let mut graph = DiGraph::default(); let mut graph = DiGraph::default();
// Add some safe edges // Add some safe edges
assert!(graph.add_edge(0, 1)); assert!(graph.add_edge(0, 1, nop).is_ok());
assert!(graph.add_edge(1, 2)); assert!(graph.add_edge(1, 2, nop).is_ok());
assert!(graph.add_edge(2, 3)); assert!(graph.add_edge(2, 3, nop).is_ok());
assert!(graph.add_edge(4, 2)); assert!(graph.add_edge(4, 2, nop).is_ok());
// Try to add an edge that introduces a cycle // Try to add an edge that introduces a cycle
assert!(!graph.add_edge(3, 1)); assert!(graph.add_edge(3, 1, nop).is_err());
// Add an edge that should reorder 0 to be after 4 // Add an edge that should reorder 0 to be after 4
assert!(graph.add_edge(4, 0)); assert!(graph.add_edge(4, 0, nop).is_ok());
} }
/// Fuzz the DiGraph implementation by adding a bunch of valid edges. /// Fuzz the DiGraph implementation by adding a bunch of valid edges.
@@ -256,7 +298,7 @@ mod tests {
/// This test generates all possible forward edges in a 100-node graph consisting of natural /// This test generates all possible forward edges in a 100-node graph consisting of natural
/// numbers, shuffles them, then adds them to the graph. This will always be a valid directed, /// numbers, shuffles them, then adds them to the graph. This will always be a valid directed,
/// acyclic graph because there is a trivial order (the natural numbers) but because the edges /// acyclic graph because there is a trivial order (the natural numbers) but because the edges
/// are added in a random order the DiGraph will still occassionally need to reorder nodes. /// are added in a random order the DiGraph will still occasionally need to reorder nodes.
#[test] #[test]
fn fuzz_digraph() { fn fuzz_digraph() {
// Note: this fuzzer is quadratic in the number of nodes, so this cannot be too large or it // Note: this fuzzer is quadratic in the number of nodes, so this cannot be too large or it
@@ -277,7 +319,7 @@ mod tests {
let mut graph = DiGraph::default(); let mut graph = DiGraph::default();
for (x, y) in edges { for (x, y) in edges {
assert!(graph.add_edge(x, y)); assert!(graph.add_edge(x, y, nop).is_ok());
} }
} }
} }

View File

@@ -64,6 +64,8 @@ pub use lock_api;
#[cfg(feature = "parkinglot")] #[cfg(feature = "parkinglot")]
#[cfg_attr(docsrs, doc(cfg(feature = "parkinglot")))] #[cfg_attr(docsrs, doc(cfg(feature = "parkinglot")))]
pub use parking_lot; pub use parking_lot;
use reporting::Dep;
use reporting::Reportable;
use crate::graph::DiGraph; use crate::graph::DiGraph;
@@ -74,6 +76,7 @@ pub mod lockapi;
#[cfg(feature = "parkinglot")] #[cfg(feature = "parkinglot")]
#[cfg_attr(docsrs, doc(cfg(feature = "parkinglot")))] #[cfg_attr(docsrs, doc(cfg(feature = "parkinglot")))]
pub mod parkinglot; pub mod parkinglot;
mod reporting;
pub mod stdsync; pub mod stdsync;
thread_local! { thread_local! {
@@ -137,19 +140,18 @@ impl MutexId {
/// ///
/// This method panics if the new dependency would introduce a cycle. /// This method panics if the new dependency would introduce a cycle.
pub fn mark_held(&self) { pub fn mark_held(&self) {
let creates_cycle = HELD_LOCKS.with(|locks| { let opt_cycle = HELD_LOCKS.with(|locks| {
if let Some(&previous) = locks.borrow().last() { if let Some(&previous) = locks.borrow().last() {
let mut graph = get_dependency_graph(); let mut graph = get_dependency_graph();
!graph.add_edge(previous, self.value()) graph.add_edge(previous, self.value(), Dep::capture).err()
} else { } else {
false None
} }
}); });
if creates_cycle { if let Some(cycle) = opt_cycle {
// Panic without holding the lock to avoid needlessly poisoning it panic!("{}", Dep::panic_message(&cycle))
panic!("Mutex order graph should not have cycles");
} }
HELD_LOCKS.with(|locks| locks.borrow_mut().push(self.value())); HELD_LOCKS.with(|locks| locks.borrow_mut().push(self.value()));
@@ -260,8 +262,8 @@ impl<'a> Drop for BorrowedMutex<'a> {
} }
/// Get a reference to the current dependency graph /// Get a reference to the current dependency graph
fn get_dependency_graph() -> impl DerefMut<Target = DiGraph<usize>> { fn get_dependency_graph() -> impl DerefMut<Target = DiGraph<usize, Dep>> {
static DEPENDENCY_GRAPH: OnceLock<Mutex<DiGraph<usize>>> = OnceLock::new(); static DEPENDENCY_GRAPH: OnceLock<Mutex<DiGraph<usize, Dep>>> = OnceLock::new();
DEPENDENCY_GRAPH DEPENDENCY_GRAPH
.get_or_init(Default::default) .get_or_init(Default::default)
@@ -292,11 +294,11 @@ mod tests {
let c = LazyMutexId::new(); let c = LazyMutexId::new();
let mut graph = get_dependency_graph(); let mut graph = get_dependency_graph();
assert!(graph.add_edge(a.value(), b.value())); assert!(graph.add_edge(a.value(), b.value(), Dep::capture).is_ok());
assert!(graph.add_edge(b.value(), c.value())); assert!(graph.add_edge(b.value(), c.value(), Dep::capture).is_ok());
// Creating an edge c → a should fail as it introduces a cycle. // Creating an edge c → a should fail as it introduces a cycle.
assert!(!graph.add_edge(c.value(), a.value())); assert!(graph.add_edge(c.value(), a.value(), Dep::capture).is_err());
// Drop graph handle so we can drop vertices without deadlocking // Drop graph handle so we can drop vertices without deadlocking
drop(graph); drop(graph);
@@ -304,7 +306,9 @@ mod tests {
drop(b); drop(b);
// If b's destructor correctly ran correctly we can now add an edge from c to a. // If b's destructor correctly ran correctly we can now add an edge from c to a.
assert!(get_dependency_graph().add_edge(c.value(), a.value())); assert!(get_dependency_graph()
.add_edge(c.value(), a.value(), Dep::capture)
.is_ok());
} }
/// Test creating a cycle, then panicking. /// Test creating a cycle, then panicking.

64
src/reporting.rs Normal file
View File

@@ -0,0 +1,64 @@
//! Cycle reporting primitives
//!
//! This module exposes [`Dep`], which resolves to either something that tracks dependencies or to
//! something that doesn't. It should only be assumed to implement the [`Reportable`] trait.
use std::backtrace::Backtrace;
use std::borrow::Cow;
use std::fmt::Write;
use std::sync::Arc;
#[cfg(feature = "backtraces")]
pub type Dep = MutexDep<Arc<Backtrace>>;
#[cfg(not(feature = "backtraces"))]
pub type Dep = MutexDep<()>;
// Base message to be reported when cycle is detected
const BASE_MESSAGE: &str = "Found cycle in mutex dependency graph:";
pub trait Reportable: Clone {
/// Capture the current state
fn capture() -> Self;
/// Format a trace of state for human readable consumption.
fn panic_message(trace: &[Self]) -> Cow<'static, str>;
}
#[derive(Clone)]
pub struct MutexDep<T>(T);
/// Use a unit as tracing data: no tracing.
///
/// This should have no runtime overhead for capturing traces and should therefore be cheap enough
/// for most purposes.
impl Reportable for MutexDep<()> {
fn capture() -> Self {
Self(())
}
fn panic_message(_trace: &[Self]) -> Cow<'static, str> {
Cow::Borrowed(BASE_MESSAGE)
}
}
/// Use a full backtrace as tracing data
///
/// Capture the entire backtrace which may be expensive. This implementation does not force capture
/// in the event that backtraces are disabled at runtime, so the exact overhead can still be
/// controlled a little.
///
/// N.B. the [`Backtrace`] needs to be wrapped in an Arc as backtraces are not [`Clone`].
impl Reportable for MutexDep<Arc<Backtrace>> {
fn capture() -> Self {
Self(Arc::new(Backtrace::capture()))
}
fn panic_message(trace: &[Self]) -> Cow<'static, str> {
let mut message = format!("{BASE_MESSAGE}\n");
for entry in trace {
let _ = writeln!(message, "{}", entry.0);
}
message.into()
}
}

View File

@@ -696,7 +696,7 @@ pub mod tracing {
} }
#[test] #[test]
#[should_panic(expected = "Mutex order graph should not have cycles")] #[should_panic(expected = "Found cycle in mutex dependency graph")]
fn test_detect_cycle() { fn test_detect_cycle() {
let a = Mutex::new(()); let a = Mutex::new(());
let b = Mutex::new(()); let b = Mutex::new(());