summaryrefslogtreecommitdiffstats
path: root/vendor/sharded-slab/src
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/sharded-slab/src')
-rw-r--r--vendor/sharded-slab/src/iter.rs14
-rw-r--r--vendor/sharded-slab/src/lib.rs26
-rw-r--r--vendor/sharded-slab/src/page/mod.rs4
-rw-r--r--vendor/sharded-slab/src/page/slot.rs18
-rw-r--r--vendor/sharded-slab/src/shard.rs14
-rw-r--r--vendor/sharded-slab/src/tests/custom_config.rs78
-rw-r--r--vendor/sharded-slab/src/tests/loom_slab.rs4
-rw-r--r--vendor/sharded-slab/src/tests/mod.rs4
-rw-r--r--vendor/sharded-slab/src/tests/properties.rs244
-rw-r--r--vendor/sharded-slab/src/tid.rs20
10 files changed, 395 insertions, 31 deletions
diff --git a/vendor/sharded-slab/src/iter.rs b/vendor/sharded-slab/src/iter.rs
index 54189aa57..e70bebe16 100644
--- a/vendor/sharded-slab/src/iter.rs
+++ b/vendor/sharded-slab/src/iter.rs
@@ -1,15 +1,19 @@
-use crate::{page, shard};
-use std::slice;
+use std::{iter::FusedIterator, slice};
+use crate::{cfg, page, shard};
+
+/// An exclusive fused iterator over the items in a [`Slab`](crate::Slab).
+#[must_use = "iterators are lazy and do nothing unless consumed"]
#[derive(Debug)]
-pub struct UniqueIter<'a, T, C: crate::cfg::Config> {
+pub struct UniqueIter<'a, T, C: cfg::Config> {
pub(super) shards: shard::IterMut<'a, Option<T>, C>,
pub(super) pages: slice::Iter<'a, page::Shared<Option<T>, C>>,
pub(super) slots: Option<page::Iter<'a, T, C>>,
}
-impl<'a, T, C: crate::cfg::Config> Iterator for UniqueIter<'a, T, C> {
+impl<'a, T, C: cfg::Config> Iterator for UniqueIter<'a, T, C> {
type Item = &'a T;
+
fn next(&mut self) -> Option<Self::Item> {
test_println!("UniqueIter::next");
loop {
@@ -37,3 +41,5 @@ impl<'a, T, C: crate::cfg::Config> Iterator for UniqueIter<'a, T, C> {
}
}
}
+
+impl<T, C: cfg::Config> FusedIterator for UniqueIter<'_, T, C> {}
diff --git a/vendor/sharded-slab/src/lib.rs b/vendor/sharded-slab/src/lib.rs
index e57cf50e2..ea9b66de3 100644
--- a/vendor/sharded-slab/src/lib.rs
+++ b/vendor/sharded-slab/src/lib.rs
@@ -215,8 +215,11 @@ mod page;
mod shard;
mod tid;
-pub use cfg::{Config, DefaultConfig};
-pub use clear::Clear;
+pub use self::{
+ cfg::{Config, DefaultConfig},
+ clear::Clear,
+ iter::UniqueIter,
+};
#[doc(inline)]
pub use pool::Pool;
@@ -735,15 +738,26 @@ impl<T, C: cfg::Config> Slab<T, C> {
}
/// Returns an iterator over all the items in the slab.
+ ///
+ /// Because this iterator exclusively borrows the slab (i.e. it holds an
+ /// `&mut Slab<T>`), elements will not be added or removed while the
+ /// iteration is in progress.
pub fn unique_iter(&mut self) -> iter::UniqueIter<'_, T, C> {
let mut shards = self.shards.iter_mut();
- let shard = shards.next().expect("must be at least 1 shard");
- let mut pages = shard.iter();
- let slots = pages.next().and_then(page::Shared::iter);
+
+ let (pages, slots) = match shards.next() {
+ Some(shard) => {
+ let mut pages = shard.iter();
+ let slots = pages.next().and_then(page::Shared::iter);
+ (pages, slots)
+ }
+ None => ([].iter(), None),
+ };
+
iter::UniqueIter {
shards,
- slots,
pages,
+ slots,
}
}
}
diff --git a/vendor/sharded-slab/src/page/mod.rs b/vendor/sharded-slab/src/page/mod.rs
index 0499fb535..a82c247d2 100644
--- a/vendor/sharded-slab/src/page/mod.rs
+++ b/vendor/sharded-slab/src/page/mod.rs
@@ -247,7 +247,7 @@ where
}
pub(crate) fn iter(&self) -> Option<Iter<'a, T, C>> {
- let slab = self.slab.with(|slab| unsafe { (&*slab).as_ref() });
+ let slab = self.slab.with(|slab| unsafe { (*slab).as_ref() });
slab.map(|slab| {
slab.iter()
.filter_map(Shared::make_ref as fn(&'a Slot<Option<T>, C>) -> Option<&'a T>)
@@ -402,7 +402,7 @@ impl<C: cfg::Config> Ord for Addr<C> {
impl<C: cfg::Config> Clone for Addr<C> {
fn clone(&self) -> Self {
- Self::from_usize(self.addr)
+ *self
}
}
diff --git a/vendor/sharded-slab/src/page/slot.rs b/vendor/sharded-slab/src/page/slot.rs
index 3387d5388..bb3f9ac13 100644
--- a/vendor/sharded-slab/src/page/slot.rs
+++ b/vendor/sharded-slab/src/page/slot.rs
@@ -134,7 +134,7 @@ where
let new_refs = refs.incr()?;
match self.lifecycle.compare_exchange(
lifecycle,
- new_refs.pack(current_gen.pack(state.pack(0))),
+ new_refs.pack(lifecycle),
Ordering::AcqRel,
Ordering::Acquire,
) {
@@ -242,7 +242,7 @@ where
let mut spin_exp = 0;
let next_gen = gen.advance();
loop {
- let current_gen = Generation::from_packed(lifecycle);
+ let current_gen = LifecycleGen::from_packed(lifecycle).0;
test_println!("-> release_with; lifecycle={:#x}; expected_gen={:?}; current_gen={:?}; next_gen={:?};",
lifecycle,
gen,
@@ -261,7 +261,7 @@ where
match self.lifecycle.compare_exchange(
lifecycle,
- next_gen.pack(lifecycle),
+ LifecycleGen(next_gen).pack(lifecycle),
Ordering::AcqRel,
Ordering::Acquire,
) {
@@ -499,8 +499,9 @@ impl<T, C: cfg::Config> Slot<T, C> {
// Are we the last guard, and is the slot marked for removal?
let dropping = refs.value == 1 && state == State::Marked;
let new_lifecycle = if dropping {
- // If so, we want to advance the state to "removing"
- gen.pack(State::Removing as usize)
+ // If so, we want to advance the state to "removing".
+ // Also, reset the ref count to 0.
+ LifecycleGen(gen).pack(State::Removing as usize)
} else {
// Otherwise, just subtract 1 from the ref count.
refs.decr().pack(lifecycle)
@@ -583,7 +584,7 @@ impl<C: cfg::Config> Ord for Generation<C> {
impl<C: cfg::Config> Clone for Generation<C> {
fn clone(&self) -> Self {
- Self::new(self.value)
+ *self
}
}
@@ -747,7 +748,7 @@ impl<C: cfg::Config> Ord for RefCount<C> {
impl<C: cfg::Config> Clone for RefCount<C> {
fn clone(&self) -> Self {
- Self::from_usize(self.value)
+ *self
}
}
@@ -875,7 +876,8 @@ impl<T, C: cfg::Config> InitGuard<T, C> {
debug_assert!(state == State::Marked || thread::panicking(), "state was not MARKED; someone else has removed the slot while we have exclusive access!\nactual={:?}", state);
debug_assert!(refs.value == 0 || thread::panicking(), "ref count was not 0; someone else has referenced the slot while we have exclusive access!\nactual={:?}", refs);
- let new_lifecycle = self.generation().pack(State::Removing as usize);
+
+ let new_lifecycle = LifecycleGen(self.generation()).pack(State::Removing as usize);
match slot.lifecycle.compare_exchange(
curr_lifecycle,
diff --git a/vendor/sharded-slab/src/shard.rs b/vendor/sharded-slab/src/shard.rs
index 0d054d7e5..b77a9fcb0 100644
--- a/vendor/sharded-slab/src/shard.rs
+++ b/vendor/sharded-slab/src/shard.rs
@@ -77,7 +77,7 @@ where
let (addr, page_index) = page::indices::<C>(idx);
test_println!("-> {:?}", addr);
- if page_index > self.shared.len() {
+ if page_index >= self.shared.len() {
return None;
}
@@ -132,7 +132,7 @@ where
debug_assert_eq_in_drop!(Tid::<C>::from_packed(idx).as_usize(), self.tid);
let (addr, page_index) = page::indices::<C>(idx);
- if page_index > self.shared.len() {
+ if page_index >= self.shared.len() {
return false;
}
@@ -143,7 +143,7 @@ where
debug_assert_eq_in_drop!(Tid::<C>::from_packed(idx).as_usize(), self.tid);
let (addr, page_index) = page::indices::<C>(idx);
- if page_index > self.shared.len() {
+ if page_index >= self.shared.len() {
return false;
}
@@ -183,7 +183,7 @@ where
debug_assert_eq_in_drop!(Tid::<C>::from_packed(idx).as_usize(), self.tid);
let (addr, page_index) = page::indices::<C>(idx);
- if page_index > self.shared.len() {
+ if page_index >= self.shared.len() {
return false;
}
@@ -194,7 +194,7 @@ where
debug_assert_eq_in_drop!(Tid::<C>::from_packed(idx).as_usize(), self.tid);
let (addr, page_index) = page::indices::<C>(idx);
- if page_index > self.shared.len() {
+ if page_index >= self.shared.len() {
return false;
}
@@ -221,7 +221,7 @@ where
debug_assert_eq_in_drop!(Tid::<C>::from_packed(idx).as_usize(), self.tid);
let (addr, page_index) = page::indices::<C>(idx);
- if page_index > self.shared.len() {
+ if page_index >= self.shared.len() {
return false;
}
@@ -232,7 +232,7 @@ where
debug_assert_eq_in_drop!(Tid::<C>::from_packed(idx).as_usize(), self.tid);
let (addr, page_index) = page::indices::<C>(idx);
- if page_index > self.shared.len() {
+ if page_index >= self.shared.len() {
return false;
}
diff --git a/vendor/sharded-slab/src/tests/custom_config.rs b/vendor/sharded-slab/src/tests/custom_config.rs
new file mode 100644
index 000000000..77f725935
--- /dev/null
+++ b/vendor/sharded-slab/src/tests/custom_config.rs
@@ -0,0 +1,78 @@
+//! Ensures that a custom config behaves as the default config, until limits are reached.
+//! Prevents regression after #80.
+
+use crate::{cfg::CfgPrivate, Config, Slab};
+
+struct CustomConfig;
+
+#[cfg(target_pointer_width = "64")]
+impl Config for CustomConfig {
+ const INITIAL_PAGE_SIZE: usize = 32;
+ const MAX_PAGES: usize = 15;
+ const MAX_THREADS: usize = 256;
+ const RESERVED_BITS: usize = 24;
+}
+
+#[cfg(not(target_pointer_width = "64"))]
+impl Config for CustomConfig {
+ const INITIAL_PAGE_SIZE: usize = 16;
+ const MAX_PAGES: usize = 6;
+ const MAX_THREADS: usize = 128;
+ const RESERVED_BITS: usize = 12;
+}
+
+// We should repeat actions several times to detect invalid lifecycle changes.
+const ITERS: u64 = 5;
+
+#[track_caller]
+fn slab_eq(mut lhs: Slab<u64, impl Config>, mut rhs: Slab<u64, impl Config>) {
+ let mut lhs_vec = lhs.unique_iter().collect::<Vec<_>>();
+ lhs_vec.sort_unstable();
+ let mut rhs_vec = rhs.unique_iter().collect::<Vec<_>>();
+ rhs_vec.sort_unstable();
+ assert_eq!(lhs_vec, rhs_vec);
+}
+
+/// Calls `insert(); remove()` multiple times to detect invalid releasing.
+/// Initially, it revealed bugs in the `Slot::release_with()` implementation.
+#[test]
+fn insert_remove() {
+ eprintln!("bits={}; config={:#?}", usize::BITS, CustomConfig::debug());
+
+ let default_slab = Slab::<u64, _>::new();
+ let custom_slab = Slab::<u64, _>::new_with_config::<CustomConfig>();
+
+ for i in 0..=ITERS {
+ let idx = default_slab.insert(i).unwrap();
+ assert!(default_slab.remove(idx));
+
+ let idx = custom_slab.insert(i).unwrap();
+ assert!(custom_slab.remove(idx));
+ }
+
+ slab_eq(custom_slab, default_slab);
+}
+
+/// Calls `get()` multiple times to detect invalid ref counting.
+/// Initially, it revealed bugs in the `Slot::get()` implementation.
+#[test]
+fn double_get() {
+ eprintln!("bits={}; config={:#?}", usize::BITS, CustomConfig::debug());
+
+ let default_slab = Slab::<u64, _>::new();
+ let custom_slab = Slab::<u64, _>::new_with_config::<CustomConfig>();
+
+ for i in 0..=ITERS {
+ let idx = default_slab.insert(i).unwrap();
+ assert!(default_slab.get(idx).is_some());
+ assert!(default_slab.get(idx).is_some());
+ assert!(default_slab.remove(idx));
+
+ let idx = custom_slab.insert(i).unwrap();
+ assert!(custom_slab.get(idx).is_some());
+ assert!(custom_slab.get(idx).is_some());
+ assert!(custom_slab.remove(idx));
+ }
+
+ slab_eq(custom_slab, default_slab);
+}
diff --git a/vendor/sharded-slab/src/tests/loom_slab.rs b/vendor/sharded-slab/src/tests/loom_slab.rs
index 58422f9a0..1cfeb8463 100644
--- a/vendor/sharded-slab/src/tests/loom_slab.rs
+++ b/vendor/sharded-slab/src/tests/loom_slab.rs
@@ -381,7 +381,7 @@ fn remove_remote_during_insert() {
#[test]
fn unique_iter() {
run_model("unique_iter", || {
- let mut slab = std::sync::Arc::new(Slab::new());
+ let mut slab = Arc::new(Slab::new());
let s = slab.clone();
let t1 = thread::spawn(move || {
@@ -398,7 +398,7 @@ fn unique_iter() {
t1.join().expect("thread 1 should not panic");
t2.join().expect("thread 2 should not panic");
- let slab = std::sync::Arc::get_mut(&mut slab).expect("other arcs should be dropped");
+ let slab = Arc::get_mut(&mut slab).expect("other arcs should be dropped");
let items: Vec<_> = slab.unique_iter().map(|&i| i).collect();
assert!(items.contains(&1), "items: {:?}", items);
assert!(items.contains(&2), "items: {:?}", items);
diff --git a/vendor/sharded-slab/src/tests/mod.rs b/vendor/sharded-slab/src/tests/mod.rs
index be153b573..4bc9fb389 100644
--- a/vendor/sharded-slab/src/tests/mod.rs
+++ b/vendor/sharded-slab/src/tests/mod.rs
@@ -65,7 +65,11 @@ pub(crate) mod util {
}
}
+#[cfg(not(loom))]
+mod custom_config;
#[cfg(loom)]
mod loom_pool;
#[cfg(loom)]
mod loom_slab;
+#[cfg(not(loom))]
+mod properties;
diff --git a/vendor/sharded-slab/src/tests/properties.rs b/vendor/sharded-slab/src/tests/properties.rs
new file mode 100644
index 000000000..8f14085a9
--- /dev/null
+++ b/vendor/sharded-slab/src/tests/properties.rs
@@ -0,0 +1,244 @@
+//! This module contains property-based tests against the public API:
+//! * API never panics.
+//! * Active entries cannot be overridden until removed.
+//! * The slab doesn't produce overlapping keys.
+//! * The slab doesn't leave "lost" keys.
+//! * `get()`, `get_owned`, and `contains()` are consistent.
+//! * `RESERVED_BITS` are actually not used.
+//!
+//! The test is supposed to be deterministic, so it doesn't spawn real threads
+//! and uses `tid::with()` to override the TID for the current thread.
+
+use std::{ops::Range, sync::Arc};
+
+use indexmap::IndexMap;
+use proptest::prelude::*;
+
+use crate::{tid, Config, DefaultConfig, Slab};
+
+const THREADS: Range<usize> = 1..4;
+const ACTIONS: Range<usize> = 1..1000;
+
+#[derive(Debug, Clone)]
+struct Action {
+ tid: usize,
+ kind: ActionKind,
+}
+
+#[derive(Debug, Clone)]
+enum ActionKind {
+ Insert,
+ VacantEntry,
+ RemoveRandom(usize), // key
+ RemoveExistent(usize), // seed
+ TakeRandom(usize), // key
+ TakeExistent(usize), // seed
+ GetRandom(usize), // key
+ GetExistent(usize), // seed
+}
+
+prop_compose! {
+ fn action_strategy()(tid in THREADS, kind in action_kind_strategy()) -> Action {
+ Action { tid, kind }
+ }
+}
+
+fn action_kind_strategy() -> impl Strategy<Value = ActionKind> {
+ prop_oneof![
+ 1 => Just(ActionKind::Insert),
+ 1 => Just(ActionKind::VacantEntry),
+ 1 => prop::num::usize::ANY.prop_map(ActionKind::RemoveRandom),
+ 1 => prop::num::usize::ANY.prop_map(ActionKind::RemoveExistent),
+ 1 => prop::num::usize::ANY.prop_map(ActionKind::TakeRandom),
+ 1 => prop::num::usize::ANY.prop_map(ActionKind::TakeExistent),
+ // Produce `GetRandom` and `GetExistent` more often.
+ 5 => prop::num::usize::ANY.prop_map(ActionKind::GetRandom),
+ 5 => prop::num::usize::ANY.prop_map(ActionKind::GetExistent),
+ ]
+}
+
+/// Stores active entries (added and not yet removed).
+#[derive(Default)]
+struct Active {
+ // Use `IndexMap` to preserve determinism.
+ map: IndexMap<usize, u32>,
+ prev_value: u32,
+}
+
+impl Active {
+ fn next_value(&mut self) -> u32 {
+ self.prev_value += 1;
+ self.prev_value
+ }
+
+ fn get(&self, key: usize) -> Option<u32> {
+ self.map.get(&key).copied()
+ }
+
+ fn get_any(&self, seed: usize) -> Option<(usize, u32)> {
+ if self.map.is_empty() {
+ return None;
+ }
+
+ let index = seed % self.map.len();
+ self.map.get_index(index).map(|(k, v)| (*k, *v))
+ }
+
+ fn insert(&mut self, key: usize, value: u32) {
+ assert_eq!(
+ self.map.insert(key, value),
+ None,
+ "keys of active entries must be unique"
+ );
+ }
+
+ fn remove(&mut self, key: usize) -> Option<u32> {
+ self.map.swap_remove(&key)
+ }
+
+ fn remove_any(&mut self, seed: usize) -> Option<(usize, u32)> {
+ if self.map.is_empty() {
+ return None;
+ }
+
+ let index = seed % self.map.len();
+ self.map.swap_remove_index(index)
+ }
+
+ fn drain(&mut self) -> impl Iterator<Item = (usize, u32)> + '_ {
+ self.map.drain(..)
+ }
+}
+
+fn used_bits<C: Config>(key: usize) -> usize {
+ assert_eq!(
+ C::RESERVED_BITS + Slab::<u32, C>::USED_BITS,
+ std::mem::size_of::<usize>() * 8
+ );
+ key & ((!0) >> C::RESERVED_BITS)
+}
+
+fn apply_action<C: Config>(
+ slab: &Arc<Slab<u32, C>>,
+ active: &mut Active,
+ action: ActionKind,
+) -> Result<(), TestCaseError> {
+ match action {
+ ActionKind::Insert => {
+ let value = active.next_value();
+ let key = slab.insert(value).expect("unexpectedly exhausted slab");
+ prop_assert_eq!(used_bits::<C>(key), key);
+ active.insert(key, value);
+ }
+ ActionKind::VacantEntry => {
+ let value = active.next_value();
+ let entry = slab.vacant_entry().expect("unexpectedly exhausted slab");
+ let key = entry.key();
+ prop_assert_eq!(used_bits::<C>(key), key);
+ entry.insert(value);
+ active.insert(key, value);
+ }
+ ActionKind::RemoveRandom(key) => {
+ let used_key = used_bits::<C>(key);
+ prop_assert_eq!(slab.get(key).map(|e| *e), slab.get(used_key).map(|e| *e));
+ prop_assert_eq!(slab.remove(key), active.remove(used_key).is_some());
+ }
+ ActionKind::RemoveExistent(seed) => {
+ if let Some((key, _value)) = active.remove_any(seed) {
+ prop_assert!(slab.contains(key));
+ prop_assert!(slab.remove(key));
+ }
+ }
+ ActionKind::TakeRandom(key) => {
+ let used_key = used_bits::<C>(key);
+ prop_assert_eq!(slab.get(key).map(|e| *e), slab.get(used_key).map(|e| *e));
+ prop_assert_eq!(slab.take(key), active.remove(used_key));
+ }
+ ActionKind::TakeExistent(seed) => {
+ if let Some((key, value)) = active.remove_any(seed) {
+ prop_assert!(slab.contains(key));
+ prop_assert_eq!(slab.take(key), Some(value));
+ }
+ }
+ ActionKind::GetRandom(key) => {
+ let used_key = used_bits::<C>(key);
+ prop_assert_eq!(slab.get(key).map(|e| *e), slab.get(used_key).map(|e| *e));
+ prop_assert_eq!(slab.get(key).map(|e| *e), active.get(used_key));
+ prop_assert_eq!(
+ slab.clone().get_owned(key).map(|e| *e),
+ active.get(used_key)
+ );
+ }
+ ActionKind::GetExistent(seed) => {
+ if let Some((key, value)) = active.get_any(seed) {
+ prop_assert!(slab.contains(key));
+ prop_assert_eq!(slab.get(key).map(|e| *e), Some(value));
+ prop_assert_eq!(slab.clone().get_owned(key).map(|e| *e), Some(value));
+ }
+ }
+ }
+
+ Ok(())
+}
+
+fn run<C: Config>(actions: Vec<Action>) -> Result<(), TestCaseError> {
+ let mut slab = Arc::new(Slab::new_with_config::<C>());
+ let mut active = Active::default();
+
+ // Apply all actions.
+ for action in actions {
+ // Override the TID for the current thread instead of using multiple real threads
+ // to preserve determinism. We're not checking concurrency issues here, they should be
+ // covered by loom tests anyway. Thus, it's fine to run all actions consequently.
+ tid::with(action.tid, || {
+ apply_action::<C>(&slab, &mut active, action.kind)
+ })?;
+ }
+
+ // Ensure the slab contains all remaining entries.
+ let mut expected_values = Vec::new();
+ for (key, value) in active.drain() {
+ prop_assert!(slab.contains(key));
+ prop_assert_eq!(slab.get(key).map(|e| *e), Some(value));
+ prop_assert_eq!(slab.clone().get_owned(key).map(|e| *e), Some(value));
+ expected_values.push(value);
+ }
+ expected_values.sort();
+
+ // Ensure `unique_iter()` returns all remaining entries.
+ let slab = Arc::get_mut(&mut slab).unwrap();
+ let mut actual_values = slab.unique_iter().copied().collect::<Vec<_>>();
+ actual_values.sort();
+ prop_assert_eq!(actual_values, expected_values);
+
+ Ok(())
+}
+
+proptest! {
+ #[test]
+ fn default_config(actions in prop::collection::vec(action_strategy(), ACTIONS)) {
+ run::<DefaultConfig>(actions)?;
+ }
+
+ #[test]
+ fn custom_config(actions in prop::collection::vec(action_strategy(), ACTIONS)) {
+ run::<CustomConfig>(actions)?;
+ }
+}
+
+struct CustomConfig;
+
+#[cfg(target_pointer_width = "64")]
+impl Config for CustomConfig {
+ const INITIAL_PAGE_SIZE: usize = 32;
+ const MAX_PAGES: usize = 15;
+ const MAX_THREADS: usize = 256;
+ const RESERVED_BITS: usize = 24;
+}
+#[cfg(target_pointer_width = "32")]
+impl Config for CustomConfig {
+ const INITIAL_PAGE_SIZE: usize = 16;
+ const MAX_PAGES: usize = 6;
+ const MAX_THREADS: usize = 128;
+ const RESERVED_BITS: usize = 12;
+}
diff --git a/vendor/sharded-slab/src/tid.rs b/vendor/sharded-slab/src/tid.rs
index 57d64f970..f2cb7e0d9 100644
--- a/vendor/sharded-slab/src/tid.rs
+++ b/vendor/sharded-slab/src/tid.rs
@@ -12,7 +12,6 @@ use std::{
collections::VecDeque,
fmt,
marker::PhantomData,
- sync::PoisonError,
};
/// Uniquely identifies a thread.
@@ -123,7 +122,7 @@ impl<C> Eq for Tid<C> {}
impl<C: cfg::Config> Clone for Tid<C> {
fn clone(&self) -> Self {
- Self::new(self.id)
+ *self
}
}
@@ -186,9 +185,26 @@ impl Registration {
#[cfg(not(all(loom, any(feature = "loom", test))))]
impl Drop for Registration {
fn drop(&mut self) {
+ use std::sync::PoisonError;
+
if let Some(id) = self.0.get() {
let mut free_list = REGISTRY.free.lock().unwrap_or_else(PoisonError::into_inner);
free_list.push_back(id);
}
}
}
+
+#[cfg(all(test, not(loom)))]
+pub(crate) fn with<R>(tid: usize, f: impl FnOnce() -> R) -> R {
+ struct Guard(Option<usize>);
+
+ impl Drop for Guard {
+ fn drop(&mut self) {
+ REGISTRATION.with(|r| r.0.set(self.0.take()));
+ }
+ }
+
+ let prev = REGISTRATION.with(|r| r.0.replace(Some(tid)));
+ let _guard = Guard(prev);
+ f()
+}