From 698f8c2f01ea549d77d7dc3338a12e04c11057b9 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 17 Apr 2024 14:02:58 +0200 Subject: Adding upstream version 1.64.0+dfsg1. Signed-off-by: Daniel Baumann --- compiler/rustc_data_structures/src/intern.rs | 196 +++++++++++++++++++++++++++ 1 file changed, 196 insertions(+) create mode 100644 compiler/rustc_data_structures/src/intern.rs (limited to 'compiler/rustc_data_structures/src/intern.rs') diff --git a/compiler/rustc_data_structures/src/intern.rs b/compiler/rustc_data_structures/src/intern.rs new file mode 100644 index 000000000..009b5d534 --- /dev/null +++ b/compiler/rustc_data_structures/src/intern.rs @@ -0,0 +1,196 @@ +use crate::stable_hasher::{HashStable, StableHasher}; +use std::cmp::Ordering; +use std::hash::{Hash, Hasher}; +use std::ops::Deref; +use std::ptr; + +use crate::fingerprint::Fingerprint; + +mod private { + #[derive(Clone, Copy, Debug)] + pub struct PrivateZst; +} + +/// A reference to a value that is interned, and is known to be unique. +/// +/// Note that it is possible to have a `T` and a `Interned` that are (or +/// refer to) equal but different values. But if you have two different +/// `Interned`s, they both refer to the same value, at a single location in +/// memory. This means that equality and hashing can be done on the value's +/// address rather than the value's contents, which can improve performance. +/// +/// The `PrivateZst` field means you can pattern match with `Interned(v, _)` +/// but you can only construct a `Interned` with `new_unchecked`, and not +/// directly. +#[derive(Debug)] +#[rustc_pass_by_value] +pub struct Interned<'a, T>(pub &'a T, pub private::PrivateZst); + +impl<'a, T> Interned<'a, T> { + /// Create a new `Interned` value. The value referred to *must* be interned + /// and thus be unique, and it *must* remain unique in the future. This + /// function has `_unchecked` in the name but is not `unsafe`, because if + /// the uniqueness condition is violated condition it will cause incorrect + /// behaviour but will not affect memory safety. + #[inline] + pub const fn new_unchecked(t: &'a T) -> Self { + Interned(t, private::PrivateZst) + } +} + +impl<'a, T> Clone for Interned<'a, T> { + fn clone(&self) -> Self { + *self + } +} + +impl<'a, T> Copy for Interned<'a, T> {} + +impl<'a, T> Deref for Interned<'a, T> { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + self.0 + } +} + +impl<'a, T> PartialEq for Interned<'a, T> { + #[inline] + fn eq(&self, other: &Self) -> bool { + // Pointer equality implies equality, due to the uniqueness constraint. + ptr::eq(self.0, other.0) + } +} + +impl<'a, T> Eq for Interned<'a, T> {} + +impl<'a, T: PartialOrd> PartialOrd for Interned<'a, T> { + fn partial_cmp(&self, other: &Interned<'a, T>) -> Option { + // Pointer equality implies equality, due to the uniqueness constraint, + // but the contents must be compared otherwise. + if ptr::eq(self.0, other.0) { + Some(Ordering::Equal) + } else { + let res = self.0.partial_cmp(&other.0); + debug_assert_ne!(res, Some(Ordering::Equal)); + res + } + } +} + +impl<'a, T: Ord> Ord for Interned<'a, T> { + fn cmp(&self, other: &Interned<'a, T>) -> Ordering { + // Pointer equality implies equality, due to the uniqueness constraint, + // but the contents must be compared otherwise. + if ptr::eq(self.0, other.0) { + Ordering::Equal + } else { + let res = self.0.cmp(&other.0); + debug_assert_ne!(res, Ordering::Equal); + res + } + } +} + +impl<'a, T> Hash for Interned<'a, T> { + #[inline] + fn hash(&self, s: &mut H) { + // Pointer hashing is sufficient, due to the uniqueness constraint. + ptr::hash(self.0, s) + } +} + +impl HashStable for Interned<'_, T> +where + T: HashStable, +{ + fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) { + self.0.hash_stable(hcx, hasher); + } +} + +/// A helper trait so that `Interned` things can cache stable hashes reproducibly. +pub trait InternedHashingContext { + fn with_def_path_and_no_spans(&mut self, f: impl FnOnce(&mut Self)); +} + +/// A helper type that you can wrap round your own type in order to automatically +/// cache the stable hash on creation and not recompute it whenever the stable hash +/// of the type is computed. +/// This is only done in incremental mode. You can also opt out of caching by using +/// StableHash::ZERO for the hash, in which case the hash gets computed each time. +/// This is useful if you have values that you intern but never (can?) use for stable +/// hashing. +#[derive(Copy, Clone)] +pub struct WithStableHash { + pub internee: T, + pub stable_hash: Fingerprint, +} + +impl PartialEq for WithStableHash { + #[inline] + fn eq(&self, other: &Self) -> bool { + self.internee.eq(&other.internee) + } +} + +impl Eq for WithStableHash {} + +impl PartialOrd for WithStableHash { + fn partial_cmp(&self, other: &WithStableHash) -> Option { + Some(self.internee.cmp(&other.internee)) + } +} + +impl Ord for WithStableHash { + fn cmp(&self, other: &WithStableHash) -> Ordering { + self.internee.cmp(&other.internee) + } +} + +impl Deref for WithStableHash { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + &self.internee + } +} + +impl Hash for WithStableHash { + #[inline] + fn hash(&self, s: &mut H) { + self.internee.hash(s) + } +} + +impl, CTX: InternedHashingContext> HashStable for WithStableHash { + fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) { + if self.stable_hash == Fingerprint::ZERO || cfg!(debug_assertions) { + // No cached hash available. This can only mean that incremental is disabled. + // We don't cache stable hashes in non-incremental mode, because they are used + // so rarely that the performance actually suffers. + + // We need to build the hash as if we cached it and then hash that hash, as + // otherwise the hashes will differ between cached and non-cached mode. + let stable_hash: Fingerprint = { + let mut hasher = StableHasher::new(); + hcx.with_def_path_and_no_spans(|hcx| self.internee.hash_stable(hcx, &mut hasher)); + hasher.finish() + }; + if cfg!(debug_assertions) && self.stable_hash != Fingerprint::ZERO { + assert_eq!( + stable_hash, self.stable_hash, + "cached stable hash does not match freshly computed stable hash" + ); + } + stable_hash.hash_stable(hcx, hasher); + } else { + self.stable_hash.hash_stable(hcx, hasher); + } + } +} + +#[cfg(test)] +mod tests; -- cgit v1.2.3