mirror of
https://github.com/bertptrs/tracing-mutex.git
synced 2025-12-25 12:40:31 +01:00
Refactor MutexID to be self tracking
This avoids the need to implement Drop on every wrapped mutex, and removes the need for unsafe code in this crate.
This commit is contained in:
67
src/graph.rs
67
src/graph.rs
@@ -1,7 +1,6 @@
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use crate::MutexId;
|
||||
use std::hash::Hash;
|
||||
|
||||
type Order = usize;
|
||||
|
||||
@@ -20,19 +19,25 @@ type Order = usize;
|
||||
///
|
||||
/// [paper]: https://whileydave.com/publications/pk07_jea/
|
||||
#[derive(Clone, Default, Debug)]
|
||||
pub struct DiGraph {
|
||||
in_edges: HashMap<MutexId, HashSet<MutexId>>,
|
||||
out_edges: HashMap<MutexId, HashSet<MutexId>>,
|
||||
pub struct DiGraph<V>
|
||||
where
|
||||
V: Eq + Hash + Copy,
|
||||
{
|
||||
in_edges: HashMap<V, HashSet<V>>,
|
||||
out_edges: HashMap<V, HashSet<V>>,
|
||||
/// Next topological sort order
|
||||
next_ord: Order,
|
||||
/// Poison flag, set if a cycle is detected when adding a new edge and
|
||||
/// unset when removing a node successfully removed the cycle.
|
||||
contains_cycle: bool,
|
||||
/// Topological sort order. Order is not guaranteed to be contiguous
|
||||
ord: HashMap<MutexId, Order>,
|
||||
ord: HashMap<V, Order>,
|
||||
}
|
||||
|
||||
impl DiGraph {
|
||||
impl<V> DiGraph<V>
|
||||
where
|
||||
V: Eq + Hash + Copy,
|
||||
{
|
||||
/// Add a new node to the graph.
|
||||
///
|
||||
/// If the node already existed, this function does not add it and uses the existing node data.
|
||||
@@ -40,7 +45,7 @@ impl DiGraph {
|
||||
/// the node in the topological order.
|
||||
///
|
||||
/// New nodes are appended to the end of the topological order when added.
|
||||
fn add_node(&mut self, n: MutexId) -> (&mut HashSet<MutexId>, &mut HashSet<MutexId>, Order) {
|
||||
fn add_node(&mut self, n: V) -> (&mut HashSet<V>, &mut HashSet<V>, Order) {
|
||||
let next_ord = &mut self.next_ord;
|
||||
let in_edges = self.in_edges.entry(n).or_default();
|
||||
let out_edges = self.out_edges.entry(n).or_default();
|
||||
@@ -54,7 +59,7 @@ impl DiGraph {
|
||||
(in_edges, out_edges, order)
|
||||
}
|
||||
|
||||
pub(crate) fn remove_node(&mut self, n: MutexId) -> bool {
|
||||
pub(crate) fn remove_node(&mut self, n: V) -> bool {
|
||||
match self.out_edges.remove(&n) {
|
||||
None => false,
|
||||
Some(out_edges) => {
|
||||
@@ -79,7 +84,7 @@ impl DiGraph {
|
||||
/// Add an edge to the graph
|
||||
///
|
||||
/// Nodes, both from and to, are created as needed when creating new edges.
|
||||
pub(crate) fn add_edge(&mut self, x: MutexId, y: MutexId) -> bool {
|
||||
pub(crate) fn add_edge(&mut self, x: V, y: V) -> bool {
|
||||
if x == y {
|
||||
// self-edges are not considered cycles
|
||||
return false;
|
||||
@@ -120,13 +125,7 @@ impl DiGraph {
|
||||
}
|
||||
|
||||
/// Forwards depth-first-search
|
||||
fn dfs_f(
|
||||
&self,
|
||||
n: MutexId,
|
||||
ub: Order,
|
||||
visited: &mut HashSet<MutexId>,
|
||||
delta_f: &mut Vec<MutexId>,
|
||||
) -> bool {
|
||||
fn dfs_f(&self, n: V, ub: Order, visited: &mut HashSet<V>, delta_f: &mut Vec<V>) -> bool {
|
||||
visited.insert(n);
|
||||
delta_f.push(n);
|
||||
|
||||
@@ -147,13 +146,7 @@ impl DiGraph {
|
||||
}
|
||||
|
||||
/// Backwards depth-first-search
|
||||
fn dfs_b(
|
||||
&self,
|
||||
n: MutexId,
|
||||
lb: Order,
|
||||
visited: &mut HashSet<MutexId>,
|
||||
delta_b: &mut Vec<MutexId>,
|
||||
) {
|
||||
fn dfs_b(&self, n: V, lb: Order, visited: &mut HashSet<V>, delta_b: &mut Vec<V>) {
|
||||
visited.insert(n);
|
||||
delta_b.push(n);
|
||||
|
||||
@@ -164,7 +157,7 @@ impl DiGraph {
|
||||
}
|
||||
}
|
||||
|
||||
fn reorder(&mut self, mut delta_f: Vec<MutexId>, mut delta_b: Vec<MutexId>) {
|
||||
fn reorder(&mut self, mut delta_f: Vec<V>, mut delta_b: Vec<V>) {
|
||||
self.sort(&mut delta_f);
|
||||
self.sort(&mut delta_b);
|
||||
|
||||
@@ -190,7 +183,7 @@ impl DiGraph {
|
||||
}
|
||||
}
|
||||
|
||||
fn sort(&self, ids: &mut [MutexId]) {
|
||||
fn sort(&self, ids: &mut [V]) {
|
||||
// Can use unstable sort because mutex ids should not be equal
|
||||
ids.sort_unstable_by_key(|v| self.ord[v]);
|
||||
}
|
||||
@@ -239,10 +232,10 @@ impl DiGraph {
|
||||
/// Helper function for `Self::recompute_topological_order`.
|
||||
fn visit(
|
||||
&self,
|
||||
v: MutexId,
|
||||
permanent_marks: &mut HashSet<MutexId>,
|
||||
temporary_marks: &mut HashSet<MutexId>,
|
||||
rev_order: &mut Vec<MutexId>,
|
||||
v: V,
|
||||
permanent_marks: &mut HashSet<V>,
|
||||
temporary_marks: &mut HashSet<V>,
|
||||
rev_order: &mut Vec<V>,
|
||||
) -> bool {
|
||||
if permanent_marks.contains(&v) {
|
||||
return true;
|
||||
@@ -271,28 +264,26 @@ impl DiGraph {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::MutexId;
|
||||
|
||||
#[test]
|
||||
fn test_digraph() {
|
||||
let id: Vec<MutexId> = (0..5).map(|_| MutexId::new()).collect();
|
||||
let mut graph = DiGraph::default();
|
||||
|
||||
// Add some safe edges
|
||||
graph.add_edge(id[0], id[1]);
|
||||
graph.add_edge(id[1], id[2]);
|
||||
graph.add_edge(id[2], id[3]);
|
||||
graph.add_edge(id[4], id[2]);
|
||||
graph.add_edge(0, 1);
|
||||
graph.add_edge(1, 2);
|
||||
graph.add_edge(2, 3);
|
||||
graph.add_edge(4, 2);
|
||||
|
||||
// Should not have a cycle yet
|
||||
assert!(!graph.has_cycles());
|
||||
|
||||
// Introduce cycle 3 → 1 → 2 → 3
|
||||
graph.add_edge(id[3], id[1]);
|
||||
graph.add_edge(3, 1);
|
||||
assert!(graph.has_cycles());
|
||||
|
||||
// Removing 3 should remove that cycle
|
||||
assert!(graph.remove_node(id[3]));
|
||||
assert!(graph.remove_node(3));
|
||||
assert!(!graph.has_cycles())
|
||||
}
|
||||
}
|
||||
|
||||
29
src/lib.rs
29
src/lib.rs
@@ -70,11 +70,11 @@ thread_local! {
|
||||
///
|
||||
/// Assuming that locks are roughly released in the reverse order in which they were acquired,
|
||||
/// a stack should be more efficient to keep track of the current state than a set would be.
|
||||
static HELD_LOCKS: RefCell<Vec<MutexId>> = RefCell::new(Vec::new());
|
||||
static HELD_LOCKS: RefCell<Vec<usize>> = RefCell::new(Vec::new());
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref DEPENDENCY_GRAPH: Mutex<DiGraph> = Default::default();
|
||||
static ref DEPENDENCY_GRAPH: Mutex<DiGraph<usize>> = Default::default();
|
||||
}
|
||||
|
||||
/// Dedicated ID type for Mutexes
|
||||
@@ -86,7 +86,6 @@ lazy_static! {
|
||||
///
|
||||
/// One possible alteration is to make this type not `Copy` but `Drop`, and handle deregistering
|
||||
/// the lock from there.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
|
||||
struct MutexId(usize);
|
||||
|
||||
impl MutexId {
|
||||
@@ -105,6 +104,10 @@ impl MutexId {
|
||||
.expect("Mutex ID wraparound happened, results unreliable")
|
||||
}
|
||||
|
||||
pub fn value(&self) -> usize {
|
||||
self.0
|
||||
}
|
||||
|
||||
/// Get a borrowed guard for this lock.
|
||||
///
|
||||
/// This method adds checks adds this Mutex ID to the dependency graph as needed, and adds the
|
||||
@@ -113,12 +116,12 @@ impl MutexId {
|
||||
/// # Panics
|
||||
///
|
||||
/// This method panics if the new dependency would introduce a cycle.
|
||||
pub fn get_borrowed(self) -> BorrowedMutex {
|
||||
pub fn get_borrowed(&self) -> BorrowedMutex {
|
||||
let creates_cycle = HELD_LOCKS.with(|locks| {
|
||||
if let Some(&previous) = locks.borrow().last() {
|
||||
let mut graph = get_depedency_graph();
|
||||
|
||||
graph.add_edge(previous, self) && graph.has_cycles()
|
||||
graph.add_edge(previous, self.value()) && graph.has_cycles()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
@@ -129,7 +132,7 @@ impl MutexId {
|
||||
panic!("Mutex order graph should not have cycles");
|
||||
}
|
||||
|
||||
HELD_LOCKS.with(|locks| locks.borrow_mut().push(self));
|
||||
HELD_LOCKS.with(|locks| locks.borrow_mut().push(self.value()));
|
||||
BorrowedMutex(self)
|
||||
}
|
||||
}
|
||||
@@ -140,8 +143,14 @@ impl fmt::Debug for MutexId {
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for MutexId {
|
||||
fn drop(&mut self) {
|
||||
get_depedency_graph().remove_node(self.value());
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct BorrowedMutex(MutexId);
|
||||
struct BorrowedMutex<'a>(&'a MutexId);
|
||||
|
||||
/// Drop a lock held by the current thread.
|
||||
///
|
||||
@@ -149,7 +158,7 @@ struct BorrowedMutex(MutexId);
|
||||
///
|
||||
/// This function panics if the lock did not appear to be handled by this thread. If that happens,
|
||||
/// that is an indication of a serious design flaw in this library.
|
||||
impl Drop for BorrowedMutex {
|
||||
impl<'a> Drop for BorrowedMutex<'a> {
|
||||
fn drop(&mut self) {
|
||||
let id = self.0;
|
||||
|
||||
@@ -157,7 +166,7 @@ impl Drop for BorrowedMutex {
|
||||
let mut locks = locks.borrow_mut();
|
||||
|
||||
for (i, &lock) in locks.iter().enumerate().rev() {
|
||||
if lock == id {
|
||||
if lock == id.value() {
|
||||
locks.remove(i);
|
||||
return;
|
||||
}
|
||||
@@ -170,7 +179,7 @@ impl Drop for BorrowedMutex {
|
||||
}
|
||||
|
||||
/// Get a reference to the current dependency graph
|
||||
fn get_depedency_graph() -> impl DerefMut<Target = DiGraph> {
|
||||
fn get_depedency_graph() -> impl DerefMut<Target = DiGraph<usize>> {
|
||||
DEPENDENCY_GRAPH
|
||||
.lock()
|
||||
.unwrap_or_else(PoisonError::into_inner)
|
||||
|
||||
@@ -14,10 +14,8 @@
|
||||
//! rwlock.read().unwrap();
|
||||
//! ```
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::ops::Deref;
|
||||
use std::ops::DerefMut;
|
||||
use std::ptr;
|
||||
use std::sync::LockResult;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::MutexGuard;
|
||||
@@ -28,7 +26,6 @@ use std::sync::RwLockWriteGuard;
|
||||
use std::sync::TryLockError;
|
||||
use std::sync::TryLockResult;
|
||||
|
||||
use crate::get_depedency_graph;
|
||||
use crate::BorrowedMutex;
|
||||
use crate::MutexId;
|
||||
|
||||
@@ -69,7 +66,7 @@ pub struct TracingMutex<T> {
|
||||
#[derive(Debug)]
|
||||
pub struct TracingMutexGuard<'a, T> {
|
||||
inner: MutexGuard<'a, T>,
|
||||
mutex: BorrowedMutex,
|
||||
mutex: BorrowedMutex<'a>,
|
||||
}
|
||||
|
||||
fn map_lockresult<T, I, F>(result: LockResult<I>, mapper: F) -> LockResult<T>
|
||||
@@ -138,17 +135,7 @@ impl<T> TracingMutex<T> {
|
||||
}
|
||||
|
||||
pub fn into_inner(self) -> LockResult<T> {
|
||||
self.deregister();
|
||||
|
||||
// Safety: we forget the original immediately after
|
||||
let inner = unsafe { ptr::read(&self.inner) };
|
||||
mem::forget(self);
|
||||
|
||||
inner.into_inner()
|
||||
}
|
||||
|
||||
fn deregister(&self) {
|
||||
get_depedency_graph().remove_node(self.id);
|
||||
self.inner.into_inner()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -164,12 +151,6 @@ impl<T> From<T> for TracingMutex<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for TracingMutex<T> {
|
||||
fn drop(&mut self) {
|
||||
self.deregister();
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Deref for TracingMutexGuard<'a, T> {
|
||||
type Target = MutexGuard<'a, T>;
|
||||
|
||||
@@ -201,15 +182,15 @@ pub struct TracingRwLock<T> {
|
||||
///
|
||||
/// Please refer to [`TracingReadGuard`] and [`TracingWriteGuard`] for usable types.
|
||||
#[derive(Debug)]
|
||||
pub struct TracingRwLockGuard<L> {
|
||||
pub struct TracingRwLockGuard<'a, L> {
|
||||
inner: L,
|
||||
mutex: BorrowedMutex,
|
||||
mutex: BorrowedMutex<'a>,
|
||||
}
|
||||
|
||||
/// Wrapper around [`std::sync::RwLockReadGuard`].
|
||||
pub type TracingReadGuard<'a, T> = TracingRwLockGuard<RwLockReadGuard<'a, T>>;
|
||||
pub type TracingReadGuard<'a, T> = TracingRwLockGuard<'a, RwLockReadGuard<'a, T>>;
|
||||
/// Wrapper around [`std::sync::RwLockWriteGuard`].
|
||||
pub type TracingWriteGuard<'a, T> = TracingRwLockGuard<RwLockWriteGuard<'a, T>>;
|
||||
pub type TracingWriteGuard<'a, T> = TracingRwLockGuard<'a, RwLockWriteGuard<'a, T>>;
|
||||
|
||||
impl<T> TracingRwLock<T> {
|
||||
pub fn new(t: T) -> Self {
|
||||
@@ -256,24 +237,7 @@ impl<T> TracingRwLock<T> {
|
||||
}
|
||||
|
||||
pub fn into_inner(self) -> LockResult<T> {
|
||||
self.deregister();
|
||||
|
||||
// Grab our contents and then forget ourselves
|
||||
// Safety: we immediately forget the mutex after copying
|
||||
let inner = unsafe { ptr::read(&self.inner) };
|
||||
mem::forget(self);
|
||||
|
||||
inner.into_inner()
|
||||
}
|
||||
|
||||
fn deregister(&self) {
|
||||
get_depedency_graph().remove_node(self.id);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for TracingRwLock<T> {
|
||||
fn drop(&mut self) {
|
||||
self.deregister();
|
||||
self.inner.into_inner()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -286,7 +250,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<L, T> Deref for TracingRwLockGuard<L>
|
||||
impl<'a, L, T> Deref for TracingRwLockGuard<'a, L>
|
||||
where
|
||||
L: Deref<Target = T>,
|
||||
{
|
||||
@@ -297,7 +261,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, L> DerefMut for TracingRwLockGuard<L>
|
||||
impl<'a, T, L> DerefMut for TracingRwLockGuard<'a, L>
|
||||
where
|
||||
L: Deref<Target = T> + DerefMut,
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user