diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:18:21 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:18:21 +0000 |
commit | 4e8199b572f2035b7749cba276ece3a26630d23e (patch) | |
tree | f09feeed6a0fe39d027b1908aa63ea6b35e4b631 /compiler/rustc_data_structures | |
parent | Adding upstream version 1.66.0+dfsg1. (diff) | |
download | rustc-4e8199b572f2035b7749cba276ece3a26630d23e.tar.xz rustc-4e8199b572f2035b7749cba276ece3a26630d23e.zip |
Adding upstream version 1.67.1+dfsg1.upstream/1.67.1+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_data_structures')
-rw-r--r-- | compiler/rustc_data_structures/Cargo.toml | 4 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/fingerprint.rs | 2 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/functor.rs | 36 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/intern.rs | 88 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/memmap.rs | 10 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/owning_ref/mod.rs | 24 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/profiling.rs | 6 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/sorted_map.rs | 8 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/sorted_map/index_map.rs | 9 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/stable_hasher.rs | 105 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/sync.rs | 13 |
11 files changed, 126 insertions, 179 deletions
diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml index 9daa21ef6..5afce15e2 100644 --- a/compiler/rustc_data_structures/Cargo.toml +++ b/compiler/rustc_data_structures/Cargo.toml @@ -23,9 +23,9 @@ rustc_macros = { path = "../rustc_macros" } rustc_serialize = { path = "../rustc_serialize" } smallvec = { version = "1.8.1", features = ["const_generics", "union", "may_dangle"] } stable_deref_trait = "1.0.0" -stacker = "0.1.14" +stacker = "0.1.15" tempfile = "3.2" -thin-vec = "0.2.8" +thin-vec = "0.2.9" tracing = "0.1" [dependencies.parking_lot] diff --git a/compiler/rustc_data_structures/src/fingerprint.rs b/compiler/rustc_data_structures/src/fingerprint.rs index a39178016..d98f4e43f 100644 --- a/compiler/rustc_data_structures/src/fingerprint.rs +++ b/compiler/rustc_data_structures/src/fingerprint.rs @@ -140,7 +140,7 @@ impl stable_hasher::StableHasherResult for Fingerprint { } } -impl_stable_hash_via_hash!(Fingerprint); +impl_stable_traits_for_trivial_type!(Fingerprint); impl<E: Encoder> Encodable<E> for Fingerprint { #[inline] diff --git a/compiler/rustc_data_structures/src/functor.rs b/compiler/rustc_data_structures/src/functor.rs index a3d3f9883..84cb417dd 100644 --- a/compiler/rustc_data_structures/src/functor.rs +++ b/compiler/rustc_data_structures/src/functor.rs @@ -34,43 +34,11 @@ impl<T> IdFunctor for Vec<T> { type Inner = T; #[inline] - fn try_map_id<F, E>(self, mut f: F) -> Result<Self, E> + fn try_map_id<F, E>(self, f: F) -> Result<Self, E> where F: FnMut(Self::Inner) -> Result<Self::Inner, E>, { - struct HoleVec<T> { - vec: Vec<mem::ManuallyDrop<T>>, - hole: Option<usize>, - } - - impl<T> Drop for HoleVec<T> { - fn drop(&mut self) { - unsafe { - for (index, slot) in self.vec.iter_mut().enumerate() { - if self.hole != Some(index) { - mem::ManuallyDrop::drop(slot); - } - } - } - } - } - - unsafe { - let (ptr, length, capacity) = self.into_raw_parts(); - let vec = Vec::from_raw_parts(ptr.cast(), length, capacity); - let mut hole_vec = HoleVec { vec, hole: None }; - - for (index, slot) in hole_vec.vec.iter_mut().enumerate() { - hole_vec.hole = Some(index); - let original = mem::ManuallyDrop::take(slot); - let mapped = f(original)?; - *slot = mem::ManuallyDrop::new(mapped); - hole_vec.hole = None; - } - - mem::forget(hole_vec); - Ok(Vec::from_raw_parts(ptr, length, capacity)) - } + self.into_iter().map(f).collect() } } diff --git a/compiler/rustc_data_structures/src/intern.rs b/compiler/rustc_data_structures/src/intern.rs index 009b5d534..ba94f3776 100644 --- a/compiler/rustc_data_structures/src/intern.rs +++ b/compiler/rustc_data_structures/src/intern.rs @@ -4,8 +4,6 @@ use std::hash::{Hash, Hasher}; use std::ops::Deref; use std::ptr; -use crate::fingerprint::Fingerprint; - mod private { #[derive(Clone, Copy, Debug)] pub struct PrivateZst; @@ -72,7 +70,7 @@ impl<'a, T: PartialOrd> PartialOrd for Interned<'a, T> { if ptr::eq(self.0, other.0) { Some(Ordering::Equal) } else { - let res = self.0.partial_cmp(&other.0); + let res = self.0.partial_cmp(other.0); debug_assert_ne!(res, Some(Ordering::Equal)); res } @@ -86,7 +84,7 @@ impl<'a, T: Ord> Ord for Interned<'a, T> { if ptr::eq(self.0, other.0) { Ordering::Equal } else { - let res = self.0.cmp(&other.0); + let res = self.0.cmp(other.0); debug_assert_ne!(res, Ordering::Equal); res } @@ -110,87 +108,5 @@ where } } -/// A helper trait so that `Interned` things can cache stable hashes reproducibly. -pub trait InternedHashingContext { - fn with_def_path_and_no_spans(&mut self, f: impl FnOnce(&mut Self)); -} - -/// A helper type that you can wrap round your own type in order to automatically -/// cache the stable hash on creation and not recompute it whenever the stable hash -/// of the type is computed. -/// This is only done in incremental mode. You can also opt out of caching by using -/// StableHash::ZERO for the hash, in which case the hash gets computed each time. -/// This is useful if you have values that you intern but never (can?) use for stable -/// hashing. -#[derive(Copy, Clone)] -pub struct WithStableHash<T> { - pub internee: T, - pub stable_hash: Fingerprint, -} - -impl<T: PartialEq> PartialEq for WithStableHash<T> { - #[inline] - fn eq(&self, other: &Self) -> bool { - self.internee.eq(&other.internee) - } -} - -impl<T: Eq> Eq for WithStableHash<T> {} - -impl<T: Ord> PartialOrd for WithStableHash<T> { - fn partial_cmp(&self, other: &WithStableHash<T>) -> Option<Ordering> { - Some(self.internee.cmp(&other.internee)) - } -} - -impl<T: Ord> Ord for WithStableHash<T> { - fn cmp(&self, other: &WithStableHash<T>) -> Ordering { - self.internee.cmp(&other.internee) - } -} - -impl<T> Deref for WithStableHash<T> { - type Target = T; - - #[inline] - fn deref(&self) -> &T { - &self.internee - } -} - -impl<T: Hash> Hash for WithStableHash<T> { - #[inline] - fn hash<H: Hasher>(&self, s: &mut H) { - self.internee.hash(s) - } -} - -impl<T: HashStable<CTX>, CTX: InternedHashingContext> HashStable<CTX> for WithStableHash<T> { - fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) { - if self.stable_hash == Fingerprint::ZERO || cfg!(debug_assertions) { - // No cached hash available. This can only mean that incremental is disabled. - // We don't cache stable hashes in non-incremental mode, because they are used - // so rarely that the performance actually suffers. - - // We need to build the hash as if we cached it and then hash that hash, as - // otherwise the hashes will differ between cached and non-cached mode. - let stable_hash: Fingerprint = { - let mut hasher = StableHasher::new(); - hcx.with_def_path_and_no_spans(|hcx| self.internee.hash_stable(hcx, &mut hasher)); - hasher.finish() - }; - if cfg!(debug_assertions) && self.stable_hash != Fingerprint::ZERO { - assert_eq!( - stable_hash, self.stable_hash, - "cached stable hash does not match freshly computed stable hash" - ); - } - stable_hash.hash_stable(hcx, hasher); - } else { - self.stable_hash.hash_stable(hcx, hasher); - } - } -} - #[cfg(test)] mod tests; diff --git a/compiler/rustc_data_structures/src/memmap.rs b/compiler/rustc_data_structures/src/memmap.rs index 917416df6..3d44e17f3 100644 --- a/compiler/rustc_data_structures/src/memmap.rs +++ b/compiler/rustc_data_structures/src/memmap.rs @@ -36,6 +36,12 @@ impl Deref for Mmap { #[inline] fn deref(&self) -> &[u8] { + &self.0 + } +} + +impl AsRef<[u8]> for Mmap { + fn as_ref(&self) -> &[u8] { &*self.0 } } @@ -96,13 +102,13 @@ impl Deref for MmapMut { #[inline] fn deref(&self) -> &[u8] { - &*self.0 + &self.0 } } impl DerefMut for MmapMut { #[inline] fn deref_mut(&mut self) -> &mut [u8] { - &mut *self.0 + &mut self.0 } } diff --git a/compiler/rustc_data_structures/src/owning_ref/mod.rs b/compiler/rustc_data_structures/src/owning_ref/mod.rs index ed5e56618..980a540cc 100644 --- a/compiler/rustc_data_structures/src/owning_ref/mod.rs +++ b/compiler/rustc_data_structures/src/owning_ref/mod.rs @@ -899,25 +899,25 @@ unsafe impl<O, T: ?Sized> StableAddress for OwningRef<O, T> {} impl<O, T: ?Sized> AsRef<T> for OwningRef<O, T> { fn as_ref(&self) -> &T { - &*self + self } } impl<O, T: ?Sized> AsRef<T> for OwningRefMut<O, T> { fn as_ref(&self) -> &T { - &*self + self } } impl<O, T: ?Sized> AsMut<T> for OwningRefMut<O, T> { fn as_mut(&mut self) -> &mut T { - &mut *self + self } } impl<O, T: ?Sized> Borrow<T> for OwningRef<O, T> { fn borrow(&self) -> &T { - &*self + self } } @@ -1021,7 +1021,7 @@ where T: PartialEq, { fn eq(&self, other: &Self) -> bool { - (&*self as &T).eq(&*other as &T) + self.deref().eq(other.deref()) } } @@ -1032,7 +1032,7 @@ where T: PartialOrd, { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { - (&*self as &T).partial_cmp(&*other as &T) + self.deref().partial_cmp(other.deref()) } } @@ -1041,7 +1041,7 @@ where T: Ord, { fn cmp(&self, other: &Self) -> Ordering { - (&*self as &T).cmp(&*other as &T) + self.deref().cmp(other.deref()) } } @@ -1050,7 +1050,7 @@ where T: Hash, { fn hash<H: Hasher>(&self, state: &mut H) { - (&*self as &T).hash(state); + self.deref().hash(state); } } @@ -1059,7 +1059,7 @@ where T: PartialEq, { fn eq(&self, other: &Self) -> bool { - (&*self as &T).eq(&*other as &T) + self.deref().eq(other.deref()) } } @@ -1070,7 +1070,7 @@ where T: PartialOrd, { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { - (&*self as &T).partial_cmp(&*other as &T) + self.deref().partial_cmp(other.deref()) } } @@ -1079,7 +1079,7 @@ where T: Ord, { fn cmp(&self, other: &Self) -> Ordering { - (&*self as &T).cmp(&*other as &T) + self.deref().cmp(other.deref()) } } @@ -1088,7 +1088,7 @@ where T: Hash, { fn hash<H: Hasher>(&self, state: &mut H) { - (&*self as &T).hash(state); + self.deref().hash(state); } } diff --git a/compiler/rustc_data_structures/src/profiling.rs b/compiler/rustc_data_structures/src/profiling.rs index ba1960805..aa7a01eed 100644 --- a/compiler/rustc_data_structures/src/profiling.rs +++ b/compiler/rustc_data_structures/src/profiling.rs @@ -192,7 +192,7 @@ impl SelfProfilerRef { F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>, { let profiler = profiler_ref.profiler.as_ref().unwrap(); - f(&**profiler) + f(profiler) } if self.event_filter_mask.contains(event_filter) { @@ -466,7 +466,7 @@ impl SelfProfilerRef { pub fn with_profiler(&self, f: impl FnOnce(&SelfProfiler)) { if let Some(profiler) = &self.profiler { - f(&profiler) + f(profiler) } } @@ -733,7 +733,7 @@ impl Drop for VerboseTimingGuard<'_> { if let Some((start_time, start_rss, ref message)) = self.start_and_message { let end_rss = get_resident_set_size(); let dur = start_time.elapsed(); - print_time_passes_entry(&message, dur, start_rss, end_rss); + print_time_passes_entry(message, dur, start_rss, end_rss); } } } diff --git a/compiler/rustc_data_structures/src/sorted_map.rs b/compiler/rustc_data_structures/src/sorted_map.rs index fe257e102..d13313dfd 100644 --- a/compiler/rustc_data_structures/src/sorted_map.rs +++ b/compiler/rustc_data_structures/src/sorted_map.rs @@ -1,4 +1,4 @@ -use crate::stable_hasher::{HashStable, StableHasher}; +use crate::stable_hasher::{HashStable, StableHasher, StableOrd}; use std::borrow::Borrow; use std::cmp::Ordering; use std::iter::FromIterator; @@ -10,8 +10,8 @@ mod index_map; pub use index_map::SortedIndexMultiMap; /// `SortedMap` is a data structure with similar characteristics as BTreeMap but -/// slightly different trade-offs: lookup, insertion, and removal are *O*(log(*n*)) -/// and elements can be iterated in order cheaply. +/// slightly different trade-offs: lookup is *O*(log(*n*)), insertion and removal +/// are *O*(*n*) but elements can be iterated in order cheaply. /// /// `SortedMap` can be faster than a `BTreeMap` for small sizes (<50) since it /// stores data in a more compact way. It also supports accessing contiguous @@ -308,7 +308,7 @@ impl<K: Ord, V> FromIterator<(K, V)> for SortedMap<K, V> { } } -impl<K: HashStable<CTX>, V: HashStable<CTX>, CTX> HashStable<CTX> for SortedMap<K, V> { +impl<K: HashStable<CTX> + StableOrd, V: HashStable<CTX>, CTX> HashStable<CTX> for SortedMap<K, V> { #[inline] fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) { self.data.hash_stable(ctx, hasher); diff --git a/compiler/rustc_data_structures/src/sorted_map/index_map.rs b/compiler/rustc_data_structures/src/sorted_map/index_map.rs index 0ec32dc43..c2f0ae328 100644 --- a/compiler/rustc_data_structures/src/sorted_map/index_map.rs +++ b/compiler/rustc_data_structures/src/sorted_map/index_map.rs @@ -120,13 +120,20 @@ where self.items.hash(hasher) } } + impl<I: Idx, K, V, C> HashStable<C> for SortedIndexMultiMap<I, K, V> where K: HashStable<C>, V: HashStable<C>, { fn hash_stable(&self, ctx: &mut C, hasher: &mut StableHasher) { - self.items.hash_stable(ctx, hasher) + let SortedIndexMultiMap { + items, + // We can ignore this field because it is not observable from the outside. + idx_sorted_by_item_key: _, + } = self; + + items.hash_stable(ctx, hasher) } } diff --git a/compiler/rustc_data_structures/src/stable_hasher.rs b/compiler/rustc_data_structures/src/stable_hasher.rs index ce8591734..1a728f82f 100644 --- a/compiler/rustc_data_structures/src/stable_hasher.rs +++ b/compiler/rustc_data_structures/src/stable_hasher.rs @@ -219,7 +219,35 @@ pub trait ToStableHashKey<HCX> { fn to_stable_hash_key(&self, hcx: &HCX) -> Self::KeyType; } -/// Implement HashStable by just calling `Hash::hash()`. +/// Trait for marking a type as having a sort order that is +/// stable across compilation session boundaries. More formally: +/// +/// ```txt +/// Ord::cmp(a1, b1) == Ord:cmp(a2, b2) +/// where a2 = decode(encode(a1, context1), context2) +/// b2 = decode(encode(b1, context1), context2) +/// ``` +/// +/// i.e. the result of `Ord::cmp` is not influenced by encoding +/// the values in one session and then decoding them in another +/// session. +/// +/// This is trivially true for types where encoding and decoding +/// don't change the bytes of the values that are used during +/// comparison and comparison only depends on these bytes (as +/// opposed to some non-local state). Examples are u32, String, +/// Path, etc. +/// +/// But it is not true for: +/// - `*const T` and `*mut T` because the values of these pointers +/// will change between sessions. +/// - `DefIndex`, `CrateNum`, `LocalDefId`, because their concrete +/// values depend on state that might be different between +/// compilation sessions. +pub unsafe trait StableOrd: Ord {} + +/// Implement HashStable by just calling `Hash::hash()`. Also implement `StableOrd` for the type since +/// that has the same requirements. /// /// **WARNING** This is only valid for types that *really* don't need any context for fingerprinting. /// But it is easy to misuse this macro (see [#96013](https://github.com/rust-lang/rust/issues/96013) @@ -227,7 +255,7 @@ pub trait ToStableHashKey<HCX> { /// here in this module. /// /// Use `#[derive(HashStable_Generic)]` instead. -macro_rules! impl_stable_hash_via_hash { +macro_rules! impl_stable_traits_for_trivial_type { ($t:ty) => { impl<CTX> $crate::stable_hasher::HashStable<CTX> for $t { #[inline] @@ -235,26 +263,28 @@ macro_rules! impl_stable_hash_via_hash { ::std::hash::Hash::hash(self, hasher); } } + + unsafe impl $crate::stable_hasher::StableOrd for $t {} }; } -impl_stable_hash_via_hash!(i8); -impl_stable_hash_via_hash!(i16); -impl_stable_hash_via_hash!(i32); -impl_stable_hash_via_hash!(i64); -impl_stable_hash_via_hash!(isize); +impl_stable_traits_for_trivial_type!(i8); +impl_stable_traits_for_trivial_type!(i16); +impl_stable_traits_for_trivial_type!(i32); +impl_stable_traits_for_trivial_type!(i64); +impl_stable_traits_for_trivial_type!(isize); -impl_stable_hash_via_hash!(u8); -impl_stable_hash_via_hash!(u16); -impl_stable_hash_via_hash!(u32); -impl_stable_hash_via_hash!(u64); -impl_stable_hash_via_hash!(usize); +impl_stable_traits_for_trivial_type!(u8); +impl_stable_traits_for_trivial_type!(u16); +impl_stable_traits_for_trivial_type!(u32); +impl_stable_traits_for_trivial_type!(u64); +impl_stable_traits_for_trivial_type!(usize); -impl_stable_hash_via_hash!(u128); -impl_stable_hash_via_hash!(i128); +impl_stable_traits_for_trivial_type!(u128); +impl_stable_traits_for_trivial_type!(i128); -impl_stable_hash_via_hash!(char); -impl_stable_hash_via_hash!(()); +impl_stable_traits_for_trivial_type!(char); +impl_stable_traits_for_trivial_type!(()); impl<CTX> HashStable<CTX> for ! { fn hash_stable(&self, _ctx: &mut CTX, _hasher: &mut StableHasher) { @@ -366,7 +396,7 @@ impl<CTX> HashStable<CTX> for [u8] { impl<T: HashStable<CTX>, CTX> HashStable<CTX> for Vec<T> { #[inline] fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) { - (&self[..]).hash_stable(ctx, hasher); + self[..].hash_stable(ctx, hasher); } } @@ -399,13 +429,13 @@ where } } -impl<A, CTX> HashStable<CTX> for SmallVec<[A; 1]> +impl<A, const N: usize, CTX> HashStable<CTX> for SmallVec<[A; N]> where A: HashStable<CTX>, { #[inline] fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) { - (&self[..]).hash_stable(ctx, hasher); + self[..].hash_stable(ctx, hasher); } } @@ -440,10 +470,14 @@ impl<CTX> HashStable<CTX> for str { impl<CTX> HashStable<CTX> for String { #[inline] fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) { - (&self[..]).hash_stable(hcx, hasher); + self[..].hash_stable(hcx, hasher); } } +// Safety: String comparison only depends on their contents and the +// contents are not changed by (de-)serialization. +unsafe impl StableOrd for String {} + impl<HCX> ToStableHashKey<HCX> for String { type KeyType = String; #[inline] @@ -459,6 +493,9 @@ impl<CTX> HashStable<CTX> for bool { } } +// Safety: sort order of bools is not changed by (de-)serialization. +unsafe impl StableOrd for bool {} + impl<T, CTX> HashStable<CTX> for Option<T> where T: HashStable<CTX>, @@ -474,6 +511,9 @@ where } } +// Safety: the Option wrapper does not add instability to comparison. +unsafe impl<T: StableOrd> StableOrd for Option<T> {} + impl<T1, T2, CTX> HashStable<CTX> for Result<T1, T2> where T1: HashStable<CTX>, @@ -550,8 +590,8 @@ where } } -impl_stable_hash_via_hash!(::std::path::Path); -impl_stable_hash_via_hash!(::std::path::PathBuf); +impl_stable_traits_for_trivial_type!(::std::path::Path); +impl_stable_traits_for_trivial_type!(::std::path::PathBuf); impl<K, V, R, HCX> HashStable<HCX> for ::std::collections::HashMap<K, V, R> where @@ -584,27 +624,26 @@ where impl<K, V, HCX> HashStable<HCX> for ::std::collections::BTreeMap<K, V> where - K: ToStableHashKey<HCX>, + K: HashStable<HCX> + StableOrd, V: HashStable<HCX>, { fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) { - stable_hash_reduce(hcx, hasher, self.iter(), self.len(), |hasher, hcx, (key, value)| { - let key = key.to_stable_hash_key(hcx); - key.hash_stable(hcx, hasher); - value.hash_stable(hcx, hasher); - }); + self.len().hash_stable(hcx, hasher); + for entry in self.iter() { + entry.hash_stable(hcx, hasher); + } } } impl<K, HCX> HashStable<HCX> for ::std::collections::BTreeSet<K> where - K: ToStableHashKey<HCX>, + K: HashStable<HCX> + StableOrd, { fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) { - stable_hash_reduce(hcx, hasher, self.iter(), self.len(), |hasher, hcx, key| { - let key = key.to_stable_hash_key(hcx); - key.hash_stable(hcx, hasher); - }); + self.len().hash_stable(hcx, hasher); + for entry in self.iter() { + entry.hash_stable(hcx, hasher); + } } } diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs index 9c0fb8265..e4f47b22a 100644 --- a/compiler/rustc_data_structures/src/sync.rs +++ b/compiler/rustc_data_structures/src/sync.rs @@ -201,7 +201,7 @@ cfg_if! { #[inline(always)] fn deref(&self) -> &T { - &*self.0 + &self.0 } } @@ -410,6 +410,7 @@ impl<T> Lock<T> { #[cfg(parallel_compiler)] #[inline(always)] + #[track_caller] pub fn lock(&self) -> LockGuard<'_, T> { if ERROR_CHECKING { self.0.try_lock().expect("lock was already held") @@ -420,21 +421,25 @@ impl<T> Lock<T> { #[cfg(not(parallel_compiler))] #[inline(always)] + #[track_caller] pub fn lock(&self) -> LockGuard<'_, T> { self.0.borrow_mut() } #[inline(always)] + #[track_caller] pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R { f(&mut *self.lock()) } #[inline(always)] + #[track_caller] pub fn borrow(&self) -> LockGuard<'_, T> { self.lock() } #[inline(always)] + #[track_caller] pub fn borrow_mut(&self) -> LockGuard<'_, T> { self.lock() } @@ -476,6 +481,7 @@ impl<T> RwLock<T> { #[cfg(not(parallel_compiler))] #[inline(always)] + #[track_caller] pub fn read(&self) -> ReadGuard<'_, T> { self.0.borrow() } @@ -491,6 +497,7 @@ impl<T> RwLock<T> { } #[inline(always)] + #[track_caller] pub fn with_read_lock<F: FnOnce(&T) -> R, R>(&self, f: F) -> R { f(&*self.read()) } @@ -509,6 +516,7 @@ impl<T> RwLock<T> { #[cfg(not(parallel_compiler))] #[inline(always)] + #[track_caller] pub fn write(&self) -> WriteGuard<'_, T> { self.0.borrow_mut() } @@ -524,16 +532,19 @@ impl<T> RwLock<T> { } #[inline(always)] + #[track_caller] pub fn with_write_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R { f(&mut *self.write()) } #[inline(always)] + #[track_caller] pub fn borrow(&self) -> ReadGuard<'_, T> { self.read() } #[inline(always)] + #[track_caller] pub fn borrow_mut(&self) -> WriteGuard<'_, T> { self.write() } |