Rework dependency-tracking to be poison-free

Now new dependency edges that introduce cycles are simply rejected, not
affecting the overall graph. This simplifies the visible API and also
removes the need to restore the graph.
This commit is contained in:
2021-05-16 14:16:51 +02:00
parent 440693ab1e
commit 50e99fd07a
4 changed files with 42 additions and 134 deletions

View File

@@ -54,5 +54,6 @@ available for other synchronization primitives.
## Future improvements ## Future improvements
- Improve performance in lock tracing - Improve performance in lock tracing
- Optional logging to make debugging easier
- Better and configurable error handling when detecting cyclic dependencies - Better and configurable error handling when detecting cyclic dependencies
- Support for other locking libraries, such as `parking_lot` - Support for other locking libraries, such as `parking_lot`

View File

@@ -14,8 +14,8 @@ type Order = usize;
/// original paper. /// original paper.
/// ///
/// This digraph tracks its own topological order and updates it when new edges are added to the /// This digraph tracks its own topological order and updates it when new edges are added to the
/// graph. After a cycle has been introduced, the order is no longer kept up to date as it doesn't /// graph. If a cycle is added that would create a cycle, that edge is rejected and the graph is not
/// exist, but new edges are still tracked. Nodes are added implicitly when they're used in edges. /// visibly changed.
/// ///
/// [paper]: https://whileydave.com/publications/pk07_jea/ /// [paper]: https://whileydave.com/publications/pk07_jea/
#[derive(Clone, Default, Debug)] #[derive(Clone, Default, Debug)]
@@ -27,9 +27,6 @@ where
out_edges: HashMap<V, HashSet<V>>, out_edges: HashMap<V, HashSet<V>>,
/// Next topological sort order /// Next topological sort order
next_ord: Order, next_ord: Order,
/// Poison flag, set if a cycle is detected when adding a new edge and
/// unset when removing a node successfully removed the cycle.
contains_cycle: bool,
/// Topological sort order. Order is not guaranteed to be contiguous /// Topological sort order. Order is not guaranteed to be contiguous
ord: HashMap<V, Order>, ord: HashMap<V, Order>,
} }
@@ -71,45 +68,49 @@ where
self.out_edges.get_mut(&other).unwrap().remove(&n); self.out_edges.get_mut(&other).unwrap().remove(&n);
} }
if self.contains_cycle {
// Need to build a valid topological order
self.recompute_topological_order();
}
true true
} }
} }
} }
/// Add an edge to the graph /// Attempt to add an edge to the graph
/// ///
/// Nodes, both from and to, are created as needed when creating new edges. /// Nodes, both from and to, are created as needed when creating new edges. If the new edge
/// would introduce a cycle, the edge is rejected and `false` is returned.
pub(crate) fn add_edge(&mut self, x: V, y: V) -> bool { pub(crate) fn add_edge(&mut self, x: V, y: V) -> bool {
if x == y { if x == y {
// self-edges are not considered cycles // self-edges are not considered cycles
return false; return true;
} }
let (_, out_edges, ub) = self.add_node(x); let (_, out_edges, ub) = self.add_node(x);
if !out_edges.insert(y) { if !out_edges.insert(y) {
// Edge already exists, nothing to be done // Edge already exists, nothing to be done
return false; return true;
} }
let (in_edges, _, lb) = self.add_node(y); let (in_edges, _, lb) = self.add_node(y);
in_edges.insert(x); in_edges.insert(x);
if !self.contains_cycle && lb < ub { if lb < ub {
// This edge might introduce a cycle, need to recompute the topological sort // This edge might introduce a cycle, need to recompute the topological sort
let mut visited = HashSet::new(); let mut visited = HashSet::new();
let mut delta_f = Vec::new(); let mut delta_f = Vec::new();
let mut delta_b = Vec::new(); let mut delta_b = Vec::new();
if !self.dfs_f(y, ub, &mut visited, &mut delta_f) { if !self.dfs_f(y, ub, &mut visited, &mut delta_f) {
self.contains_cycle = true; // This edge introduces a cycle, so we want to reject it and remove it from the
return true; // graph again to keep the "does not contain cycles" invariant.
// We use map instead of unwrap to avoid an `unwrap()` but we know that these
// entries are present as we just added them above.
self.in_edges.get_mut(&y).map(|nodes| nodes.remove(&x));
self.out_edges.get_mut(&x).map(|nodes| nodes.remove(&y));
// No edge was added
return false;
} }
// No need to check as we should've found the cycle on the forward pass // No need to check as we should've found the cycle on the forward pass
@@ -187,78 +188,6 @@ where
// Can use unstable sort because mutex ids should not be equal // Can use unstable sort because mutex ids should not be equal
ids.sort_unstable_by_key(|v| self.ord[v]); ids.sort_unstable_by_key(|v| self.ord[v]);
} }
pub fn has_cycles(&self) -> bool {
self.contains_cycle
}
/// Attempt to recompute a valid topological order.
///
/// This method implements the DFS method to find leave nodes to find the reverse order
fn recompute_topological_order(&mut self) {
// This function should only be called when the graph contains a cycle.
debug_assert!(self.contains_cycle);
let mut permanent_marks = HashSet::with_capacity(self.out_edges.len());
let mut temporary_marks = HashSet::new();
let mut rev_order = Vec::with_capacity(self.out_edges.len());
for node in self.out_edges.keys() {
if permanent_marks.contains(node) {
continue;
}
if !self.visit(
*node,
&mut permanent_marks,
&mut temporary_marks,
&mut rev_order,
) {
// Cycle found, no order possible
return;
}
}
// We didn't find a cycle, so we can reset
self.contains_cycle = false;
// Newly allocated order is contiguous 0..rev_order.len()
self.next_ord = rev_order.len();
self.ord.clear();
self.ord
.extend(rev_order.into_iter().rev().enumerate().map(|(k, v)| (v, k)))
}
/// Helper function for `Self::recompute_topological_order`.
fn visit(
&self,
v: V,
permanent_marks: &mut HashSet<V>,
temporary_marks: &mut HashSet<V>,
rev_order: &mut Vec<V>,
) -> bool {
if permanent_marks.contains(&v) {
return true;
}
if !temporary_marks.insert(v) {
return false;
}
if !self.out_edges[&v]
.iter()
.all(|&w| self.visit(w, permanent_marks, temporary_marks, rev_order))
{
return false;
}
temporary_marks.remove(&v);
permanent_marks.insert(v);
rev_order.push(v);
true
}
} }
#[cfg(test)] #[cfg(test)]
@@ -270,20 +199,15 @@ mod tests {
let mut graph = DiGraph::default(); let mut graph = DiGraph::default();
// Add some safe edges // Add some safe edges
graph.add_edge(0, 1); assert!(graph.add_edge(0, 1));
graph.add_edge(1, 2); assert!(graph.add_edge(1, 2));
graph.add_edge(2, 3); assert!(graph.add_edge(2, 3));
graph.add_edge(4, 2); assert!(graph.add_edge(4, 2));
// Should not have a cycle yet // Try to add an edge that introduces a cycle
assert!(!graph.has_cycles()); assert!(!graph.add_edge(3, 1));
// Introduce cycle 3 → 1 → 2 → 3 // Add an edge that should reorder 0 to be after 4
graph.add_edge(3, 1); assert!(graph.add_edge(4, 0));
assert!(graph.has_cycles());
// Removing 3 should remove that cycle
assert!(graph.remove_node(3));
assert!(!graph.has_cycles())
} }
} }

View File

@@ -10,9 +10,10 @@
//! //!
//! The primary method by which this crate signals an invalid lock acquisition order is by //! The primary method by which this crate signals an invalid lock acquisition order is by
//! panicking. When a cycle is created in the dependency graph when acquiring a lock, the thread //! panicking. When a cycle is created in the dependency graph when acquiring a lock, the thread
//! will instead panic. This panic will not poison the underlying mutex. Each following acquired //! will instead panic. This panic will not poison the underlying mutex.
//! that introduces a **new** dependency will also panic, until enough mutexes are deallocated to //!
//! break the cycle in the graph. //! This conflicting dependency is not added to the graph, so future attempts at locking should
//! succeed as normal.
//! //!
//! # Structure //! # Structure
//! //!
@@ -35,9 +36,8 @@
//! //!
//! - **Allocating a lock** performs an atomic update to a shared counter. //! - **Allocating a lock** performs an atomic update to a shared counter.
//! //!
//! - **Deallocating a mutex** temporarily locks the global dependency graph to remove the lock from //! - **Deallocating a mutex** temporarily locks the global dependency graph to remove the lock
//! it. If the graph contained a cycle, a complete scan of the (now pruned) graph is done to //! entry in the dependency graph.
//! determine if this is still the case.
//! //!
//! These operations have been reasonably optimized, but the performance penalty may yet be too much //! These operations have been reasonably optimized, but the performance penalty may yet be too much
//! for production use. In those cases, it may be beneficial to instead use debug-only versions //! for production use. In those cases, it may be beneficial to instead use debug-only versions
@@ -127,7 +127,7 @@ impl MutexId {
if let Some(&previous) = locks.borrow().last() { if let Some(&previous) = locks.borrow().last() {
let mut graph = get_dependency_graph(); let mut graph = get_dependency_graph();
graph.add_edge(previous, self.value()) && graph.has_cycles() !graph.add_edge(previous, self.value())
} else { } else {
false false
} }
@@ -273,11 +273,6 @@ fn get_dependency_graph() -> impl DerefMut<Target = DiGraph<usize>> {
mod tests { mod tests {
use super::*; use super::*;
lazy_static! {
/// Mutex to isolate tests manipulating the global mutex graph
pub(crate) static ref GRAPH_MUTEX: Mutex<()> = Mutex::new(());
}
#[test] #[test]
fn test_next_mutex_id() { fn test_next_mutex_id() {
let initial = MutexId::new(); let initial = MutexId::new();
@@ -289,26 +284,23 @@ mod tests {
#[test] #[test]
fn test_lazy_mutex_id() { fn test_lazy_mutex_id() {
let _graph_lock = GRAPH_MUTEX.lock();
let a = LazyMutexId::new(); let a = LazyMutexId::new();
let b = LazyMutexId::new(); let b = LazyMutexId::new();
let c = LazyMutexId::new(); let c = LazyMutexId::new();
let mut graph = get_dependency_graph(); let mut graph = get_dependency_graph();
graph.add_edge(a.value(), b.value()); assert!(graph.add_edge(a.value(), b.value()));
graph.add_edge(b.value(), c.value()); assert!(graph.add_edge(b.value(), c.value()));
assert!(!graph.has_cycles()); // Creating an edge c → a should fail as it introduces a cycle.
assert!(!graph.add_edge(c.value(), a.value()));
graph.add_edge(c.value(), a.value());
assert!(graph.has_cycles());
// Drop graph handle so we can drop vertices without deadlocking // Drop graph handle so we can drop vertices without deadlocking
drop(graph); drop(graph);
drop(c);
// If c's destructor correctly ran the graph shouldn't contain cycles anymore drop(b);
assert!(!get_dependency_graph().has_cycles());
// If b's destructor correctly ran correctly we can now add an edge from c to a.
assert!(get_dependency_graph().add_edge(c.value(), a.value()));
} }
} }

View File

@@ -380,12 +380,9 @@ mod tests {
use std::thread; use std::thread;
use super::*; use super::*;
use crate::tests::GRAPH_MUTEX;
#[test] #[test]
fn test_mutex_usage() { fn test_mutex_usage() {
let _graph_lock = GRAPH_MUTEX.lock();
let mutex = Arc::new(TracingMutex::new(())); let mutex = Arc::new(TracingMutex::new(()));
let mutex_clone = mutex.clone(); let mutex_clone = mutex.clone();
@@ -403,8 +400,6 @@ mod tests {
#[test] #[test]
fn test_rwlock_usage() { fn test_rwlock_usage() {
let _graph_lock = GRAPH_MUTEX.lock();
let rwlock = Arc::new(TracingRwLock::new(())); let rwlock = Arc::new(TracingRwLock::new(()));
let rwlock_clone = rwlock.clone(); let rwlock_clone = rwlock.clone();
@@ -425,8 +420,6 @@ mod tests {
#[test] #[test]
fn test_once_usage() { fn test_once_usage() {
let _graph_lock = GRAPH_MUTEX.lock();
let once = Arc::new(TracingOnce::new()); let once = Arc::new(TracingOnce::new());
let once_clone = once.clone(); let once_clone = once.clone();
@@ -448,8 +441,6 @@ mod tests {
#[test] #[test]
#[should_panic(expected = "Mutex order graph should not have cycles")] #[should_panic(expected = "Mutex order graph should not have cycles")]
fn test_detect_cycle() { fn test_detect_cycle() {
let _graph_lock = GRAPH_MUTEX.lock();
let a = TracingMutex::new(()); let a = TracingMutex::new(());
let b = TracingMutex::new(()); let b = TracingMutex::new(());