Use dedicated type for Mutex IDs

This should prevent tiny mistakes in handling the value.
This commit is contained in:
2021-03-19 21:18:50 +01:00
parent e42d862f19
commit 5f2e0e99a8
3 changed files with 89 additions and 45 deletions

View File

@@ -1,21 +1,23 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::collections::HashSet; use std::collections::HashSet;
use crate::MutexID;
#[derive(Clone, Default, Debug)] #[derive(Clone, Default, Debug)]
pub struct DiGraph { pub struct DiGraph {
in_edges: HashMap<usize, Vec<usize>>, in_edges: HashMap<MutexID, Vec<MutexID>>,
out_edges: HashMap<usize, Vec<usize>>, out_edges: HashMap<MutexID, Vec<MutexID>>,
} }
impl DiGraph { impl DiGraph {
fn add_node(&mut self, node: usize) -> (&mut Vec<usize>, &mut Vec<usize>) { fn add_node(&mut self, node: MutexID) -> (&mut Vec<MutexID>, &mut Vec<MutexID>) {
let in_edges = self.in_edges.entry(node).or_default(); let in_edges = self.in_edges.entry(node).or_default();
let out_edges = self.out_edges.entry(node).or_default(); let out_edges = self.out_edges.entry(node).or_default();
(in_edges, out_edges) (in_edges, out_edges)
} }
pub fn remove_node(&mut self, node: usize) -> bool { pub(crate) fn remove_node(&mut self, node: MutexID) -> bool {
match self.out_edges.remove(&node) { match self.out_edges.remove(&node) {
None => false, None => false,
Some(out_edges) => { Some(out_edges) => {
@@ -38,7 +40,10 @@ impl DiGraph {
} }
} }
pub fn add_edge(&mut self, from: usize, to: usize) -> bool { /// Add an edge to the graph
///
/// Nodes, both from and to, are created as needed when creating new edges.
pub(crate) fn add_edge(&mut self, from: MutexID, to: MutexID) -> bool {
if from == to { if from == to {
return false; return false;
} }
@@ -65,7 +70,12 @@ impl DiGraph {
.any(|node| !self.visit(node, &mut marks, &mut temp)) .any(|node| !self.visit(node, &mut marks, &mut temp))
} }
fn visit(&self, node: usize, marks: &mut HashSet<usize>, temp: &mut HashSet<usize>) -> bool { fn visit(
&self,
node: MutexID,
marks: &mut HashSet<MutexID>,
temp: &mut HashSet<MutexID>,
) -> bool {
if marks.contains(&node) { if marks.contains(&node) {
return true; return true;
} }
@@ -92,25 +102,29 @@ impl DiGraph {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::DiGraph; use super::*;
use crate::MutexID;
#[test] #[test]
fn test_digraph() { fn test_digraph() {
let id: Vec<MutexID> = (0..5).map(|_| MutexID::new()).collect();
let mut graph = DiGraph::default(); let mut graph = DiGraph::default();
graph.add_edge(1, 2); // Add some safe edges
graph.add_edge(2, 3); graph.add_edge(id[0], id[1]);
graph.add_edge(3, 4); graph.add_edge(id[1], id[2]);
graph.add_edge(5, 2); graph.add_edge(id[2], id[3]);
graph.add_edge(id[4], id[2]);
// Should not have a cycle yet
assert!(!graph.has_cycles()); assert!(!graph.has_cycles());
graph.add_edge(4, 2); // Introduce cycle 3 → 1 → 2 → 3
graph.add_edge(id[3], id[1]);
assert!(graph.has_cycles()); assert!(graph.has_cycles());
assert!(graph.remove_node(4)); // Removing 3 should remove that cycle
assert!(graph.remove_node(id[3]));
assert!(!graph.has_cycles()) assert!(!graph.has_cycles())
} }
} }

View File

@@ -13,6 +13,8 @@ mod graph;
pub mod stdsync; pub mod stdsync;
/// Counter for Mutex IDs. Atomic avoids the need for locking. /// Counter for Mutex IDs. Atomic avoids the need for locking.
///
/// Should be part of the `MutexID` impl but static items are not yet a thing.
static ID_SEQUENCE: AtomicUsize = AtomicUsize::new(0); static ID_SEQUENCE: AtomicUsize = AtomicUsize::new(0);
thread_local! { thread_local! {
@@ -20,17 +22,40 @@ thread_local! {
/// ///
/// Assuming that locks are roughly released in the reverse order in which they were acquired, /// Assuming that locks are roughly released in the reverse order in which they were acquired,
/// a stack should be more efficient to keep track of the current state than a set would be. /// a stack should be more efficient to keep track of the current state than a set would be.
static HELD_LOCKS: RefCell<Vec<usize>> = RefCell::new(Vec::new()); static HELD_LOCKS: RefCell<Vec<MutexID>> = RefCell::new(Vec::new());
} }
lazy_static! { lazy_static! {
static ref DEPENDENCY_GRAPH: Mutex<DiGraph> = Default::default(); static ref DEPENDENCY_GRAPH: Mutex<DiGraph> = Default::default();
} }
fn next_mutex_id() -> usize { /// Dedicated ID type for Mutexes
ID_SEQUENCE ///
.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |id| id.checked_add(1)) /// # Unstable
.expect("Mutex ID wraparound happened, results unreliable") ///
/// This type is currently private to prevent usage while the exact implementation is figured out,
/// but it will likely be public in the future.
///
/// One possible alteration is to make this type not `Copy` but `Drop`, and handle deregistering
/// the lock from there.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
struct MutexID(usize);
impl MutexID {
/// Get a new, unique, mutex ID.
///
/// This ID is guaranteed to be unique within the runtime of the program.
///
/// # Panics
///
/// This function may panic when there are no more mutex IDs available. The number of mutex ids
/// is `usize::MAX - 1` which should be plenty for most practical applications.
pub fn new() -> Self {
ID_SEQUENCE
.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |id| id.checked_add(1))
.map(|id| Self(id))
.expect("Mutex ID wraparound happened, results unreliable")
}
} }
/// Get a reference to the current dependency graph /// Get a reference to the current dependency graph
@@ -41,11 +66,17 @@ fn get_depedency_graph() -> impl DerefMut<Target = DiGraph> {
} }
/// Register that a lock is currently held /// Register that a lock is currently held
fn register_lock(lock: usize) { fn register_lock(lock: MutexID) {
HELD_LOCKS.with(|locks| locks.borrow_mut().push(lock)) HELD_LOCKS.with(|locks| locks.borrow_mut().push(lock))
} }
fn drop_lock(id: usize) { /// Drop a lock held by the current thread.
///
/// # Panics
///
/// This function panics if the lock did not appear to be handled by this thread. If that happens,
/// that is an indication of a serious design flaw in this library.
fn drop_lock(id: MutexID) {
HELD_LOCKS.with(|locks| { HELD_LOCKS.with(|locks| {
let mut locks = locks.borrow_mut(); let mut locks = locks.borrow_mut();
@@ -56,7 +87,7 @@ fn drop_lock(id: usize) {
} }
} }
panic!("Tried to drop lock for mutex {} but it wasn't held", id) panic!("Tried to drop lock for mutex {:?} but it wasn't held", id)
}); });
} }
@@ -64,16 +95,23 @@ fn drop_lock(id: usize) {
/// ///
/// If the dependency is new, check for cycles in the dependency graph. If not, there shouldn't be /// If the dependency is new, check for cycles in the dependency graph. If not, there shouldn't be
/// any cycles so we don't need to check. /// any cycles so we don't need to check.
fn register_dependency(lock: usize) { ///
HELD_LOCKS.with(|locks| { /// # Panics
///
/// This function panics if the new dependency would introduce a cycle.
fn register_dependency(lock: MutexID) {
if HELD_LOCKS.with(|locks| {
if let Some(&previous) = locks.borrow().last() { if let Some(&previous) = locks.borrow().last() {
let mut graph = get_depedency_graph(); let mut graph = get_depedency_graph();
if graph.add_edge(previous, lock) && graph.has_cycles() { graph.add_edge(previous, lock) && graph.has_cycles()
panic!("Mutex order graph should not have cycles"); } else {
} false
} }
}) }) {
// Panic without holding the lock to avoid needlessly poisoning it
panic!("Mutex order graph should not have cycles");
}
} }
#[cfg(test)] #[cfg(test)]
@@ -87,10 +125,10 @@ mod tests {
#[test] #[test]
fn test_next_mutex_id() { fn test_next_mutex_id() {
let initial = next_mutex_id(); let initial = MutexID::new();
let next = next_mutex_id(); let next = MutexID::new();
// Can't assert N + 1 because multiple threads running tests // Can't assert N + 1 because multiple threads running tests
assert!(initial < next); assert!(initial.0 < next.0);
} }
} }

View File

@@ -30,15 +30,15 @@ use std::sync::TryLockResult;
use crate::drop_lock; use crate::drop_lock;
use crate::get_depedency_graph; use crate::get_depedency_graph;
use crate::next_mutex_id;
use crate::register_dependency; use crate::register_dependency;
use crate::register_lock; use crate::register_lock;
use crate::MutexID;
/// Wrapper for `std::sync::Mutex` /// Wrapper for `std::sync::Mutex`
#[derive(Debug)] #[derive(Debug)]
pub struct TracingMutex<T> { pub struct TracingMutex<T> {
inner: Mutex<T>, inner: Mutex<T>,
id: usize, id: MutexID,
} }
/// Wrapper for `std::sync::MutexGuard` /// Wrapper for `std::sync::MutexGuard`
@@ -75,7 +75,7 @@ impl<T> TracingMutex<T> {
pub fn new(t: T) -> Self { pub fn new(t: T) -> Self {
Self { Self {
inner: Mutex::new(t), inner: Mutex::new(t),
id: next_mutex_id(), id: MutexID::new(),
} }
} }
@@ -107,10 +107,6 @@ impl<T> TracingMutex<T> {
map_trylockresult(result, mapper) map_trylockresult(result, mapper)
} }
pub fn get_id(&self) -> usize {
self.id
}
pub fn is_poisoned(&self) -> bool { pub fn is_poisoned(&self) -> bool {
self.inner.is_poisoned() self.inner.is_poisoned()
} }
@@ -182,7 +178,7 @@ impl<'a, T> Drop for TracingMutexGuard<'a, T> {
#[derive(Debug)] #[derive(Debug)]
pub struct TracingRwLock<T> { pub struct TracingRwLock<T> {
inner: RwLock<T>, inner: RwLock<T>,
id: usize, id: MutexID,
} }
/// Hybrid wrapper for both `std::sync::RwLockReadGuard` and `std::sync::RwLockWriteGuard`. /// Hybrid wrapper for both `std::sync::RwLockReadGuard` and `std::sync::RwLockWriteGuard`.
@@ -203,14 +199,10 @@ impl<T> TracingRwLock<T> {
pub fn new(t: T) -> Self { pub fn new(t: T) -> Self {
Self { Self {
inner: RwLock::new(t), inner: RwLock::new(t),
id: next_mutex_id(), id: MutexID::new(),
} }
} }
pub fn get_id(&self) -> usize {
self.id
}
#[track_caller] #[track_caller]
pub fn read(&self) -> LockResult<TracingReadGuard<T>> { pub fn read(&self) -> LockResult<TracingReadGuard<T>> {
register_dependency(self.id); register_dependency(self.id);