summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_data_structures
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_data_structures')
-rw-r--r--compiler/rustc_data_structures/Cargo.toml2
-rw-r--r--compiler/rustc_data_structures/src/intern.rs8
-rw-r--r--compiler/rustc_data_structures/src/lib.rs1
-rw-r--r--compiler/rustc_data_structures/src/sharded.rs48
-rw-r--r--compiler/rustc_data_structures/src/small_str.rs68
-rw-r--r--compiler/rustc_data_structures/src/small_str/tests.rs20
-rw-r--r--compiler/rustc_data_structures/src/stable_hasher.rs64
-rw-r--r--compiler/rustc_data_structures/src/sync.rs11
-rw-r--r--compiler/rustc_data_structures/src/temp_dir.rs2
-rw-r--r--compiler/rustc_data_structures/src/unord.rs8
10 files changed, 119 insertions, 113 deletions
diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml
index 78f73d193..a5c3cb3f8 100644
--- a/compiler/rustc_data_structures/Cargo.toml
+++ b/compiler/rustc_data_structures/Cargo.toml
@@ -10,7 +10,7 @@ arrayvec = { version = "0.7", default-features = false }
bitflags = "1.2.1"
cfg-if = "1.0"
ena = "0.14.2"
-indexmap = { version = "1.9.3" }
+indexmap = { version = "2.0.0" }
jobserver_crate = { version = "0.1.13", package = "jobserver" }
libc = "0.2"
measureme = "10.0.0"
diff --git a/compiler/rustc_data_structures/src/intern.rs b/compiler/rustc_data_structures/src/intern.rs
index ba94f3776..e0f8c350c 100644
--- a/compiler/rustc_data_structures/src/intern.rs
+++ b/compiler/rustc_data_structures/src/intern.rs
@@ -1,5 +1,6 @@
use crate::stable_hasher::{HashStable, StableHasher};
use std::cmp::Ordering;
+use std::fmt::{self, Debug};
use std::hash::{Hash, Hasher};
use std::ops::Deref;
use std::ptr;
@@ -20,7 +21,6 @@ mod private {
/// The `PrivateZst` field means you can pattern match with `Interned(v, _)`
/// but you can only construct a `Interned` with `new_unchecked`, and not
/// directly.
-#[derive(Debug)]
#[rustc_pass_by_value]
pub struct Interned<'a, T>(pub &'a T, pub private::PrivateZst);
@@ -108,5 +108,11 @@ where
}
}
+impl<T: Debug> Debug for Interned<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
#[cfg(test)]
mod tests;
diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs
index 859e384d8..3deb9c5c2 100644
--- a/compiler/rustc_data_structures/src/lib.rs
+++ b/compiler/rustc_data_structures/src/lib.rs
@@ -68,7 +68,6 @@ pub mod macros;
pub mod obligation_forest;
pub mod sip128;
pub mod small_c_str;
-pub mod small_str;
pub mod snapshot_map;
pub mod svh;
pub use ena::snapshot_vec;
diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs
index 7ed70ba1e..40cbf1495 100644
--- a/compiler/rustc_data_structures/src/sharded.rs
+++ b/compiler/rustc_data_structures/src/sharded.rs
@@ -1,4 +1,6 @@
use crate::fx::{FxHashMap, FxHasher};
+#[cfg(parallel_compiler)]
+use crate::sync::is_dyn_thread_safe;
use crate::sync::{CacheAligned, Lock, LockGuard};
use std::borrow::Borrow;
use std::collections::hash_map::RawEntryMut;
@@ -18,6 +20,11 @@ pub const SHARDS: usize = 1 << SHARD_BITS;
/// An array of cache-line aligned inner locked structures with convenience methods.
pub struct Sharded<T> {
+ /// This mask is used to ensure that accesses are inbounds of `shards`.
+ /// When dynamic thread safety is off, this field is set to 0 causing only
+ /// a single shard to be used for greater cache efficiency.
+ #[cfg(parallel_compiler)]
+ mask: usize,
shards: [CacheAligned<Lock<T>>; SHARDS],
}
@@ -31,31 +38,54 @@ impl<T: Default> Default for Sharded<T> {
impl<T> Sharded<T> {
#[inline]
pub fn new(mut value: impl FnMut() -> T) -> Self {
- Sharded { shards: [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))) }
+ Sharded {
+ #[cfg(parallel_compiler)]
+ mask: if is_dyn_thread_safe() { SHARDS - 1 } else { 0 },
+ shards: [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))),
+ }
+ }
+
+ #[inline(always)]
+ fn mask(&self) -> usize {
+ #[cfg(parallel_compiler)]
+ {
+ if SHARDS == 1 { 0 } else { self.mask }
+ }
+ #[cfg(not(parallel_compiler))]
+ {
+ 0
+ }
+ }
+
+ #[inline(always)]
+ fn count(&self) -> usize {
+ // `self.mask` is always one below the used shard count
+ self.mask() + 1
}
/// The shard is selected by hashing `val` with `FxHasher`.
#[inline]
pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> {
- if SHARDS == 1 { &self.shards[0].0 } else { self.get_shard_by_hash(make_hash(val)) }
+ self.get_shard_by_hash(if SHARDS == 1 { 0 } else { make_hash(val) })
}
#[inline]
pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> {
- &self.shards[get_shard_index_by_hash(hash)].0
+ self.get_shard_by_index(get_shard_hash(hash))
}
#[inline]
pub fn get_shard_by_index(&self, i: usize) -> &Lock<T> {
- &self.shards[i].0
+ // SAFETY: The index get ANDed with the mask, ensuring it is always inbounds.
+ unsafe { &self.shards.get_unchecked(i & self.mask()).0 }
}
pub fn lock_shards(&self) -> Vec<LockGuard<'_, T>> {
- (0..SHARDS).map(|i| self.shards[i].0.lock()).collect()
+ (0..self.count()).map(|i| self.get_shard_by_index(i).lock()).collect()
}
pub fn try_lock_shards(&self) -> Option<Vec<LockGuard<'_, T>>> {
- (0..SHARDS).map(|i| self.shards[i].0.try_lock()).collect()
+ (0..self.count()).map(|i| self.get_shard_by_index(i).try_lock()).collect()
}
}
@@ -136,11 +166,9 @@ pub fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 {
/// `hash` can be computed with any hasher, so long as that hasher is used
/// consistently for each `Sharded` instance.
#[inline]
-#[allow(clippy::modulo_one)]
-pub fn get_shard_index_by_hash(hash: u64) -> usize {
+fn get_shard_hash(hash: u64) -> usize {
let hash_len = mem::size_of::<usize>();
// Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
// hashbrown also uses the lowest bits, so we can't use those
- let bits = (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize;
- bits % SHARDS
+ (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize
}
diff --git a/compiler/rustc_data_structures/src/small_str.rs b/compiler/rustc_data_structures/src/small_str.rs
deleted file mode 100644
index 800acb1b0..000000000
--- a/compiler/rustc_data_structures/src/small_str.rs
+++ /dev/null
@@ -1,68 +0,0 @@
-use smallvec::SmallVec;
-
-#[cfg(test)]
-mod tests;
-
-/// Like SmallVec but for strings.
-#[derive(Default)]
-pub struct SmallStr<const N: usize>(SmallVec<[u8; N]>);
-
-impl<const N: usize> SmallStr<N> {
- #[inline]
- pub fn new() -> Self {
- SmallStr(SmallVec::default())
- }
-
- #[inline]
- pub fn push_str(&mut self, s: &str) {
- self.0.extend_from_slice(s.as_bytes());
- }
-
- #[inline]
- pub fn empty(&self) -> bool {
- self.0.is_empty()
- }
-
- #[inline]
- pub fn spilled(&self) -> bool {
- self.0.spilled()
- }
-
- #[inline]
- pub fn as_str(&self) -> &str {
- unsafe { std::str::from_utf8_unchecked(self.0.as_slice()) }
- }
-}
-
-impl<const N: usize> std::ops::Deref for SmallStr<N> {
- type Target = str;
-
- #[inline]
- fn deref(&self) -> &str {
- self.as_str()
- }
-}
-
-impl<const N: usize, A: AsRef<str>> FromIterator<A> for SmallStr<N> {
- #[inline]
- fn from_iter<T>(iter: T) -> Self
- where
- T: IntoIterator<Item = A>,
- {
- let mut s = SmallStr::default();
- s.extend(iter);
- s
- }
-}
-
-impl<const N: usize, A: AsRef<str>> Extend<A> for SmallStr<N> {
- #[inline]
- fn extend<T>(&mut self, iter: T)
- where
- T: IntoIterator<Item = A>,
- {
- for a in iter.into_iter() {
- self.push_str(a.as_ref());
- }
- }
-}
diff --git a/compiler/rustc_data_structures/src/small_str/tests.rs b/compiler/rustc_data_structures/src/small_str/tests.rs
deleted file mode 100644
index 7635a9b72..000000000
--- a/compiler/rustc_data_structures/src/small_str/tests.rs
+++ /dev/null
@@ -1,20 +0,0 @@
-use super::*;
-
-#[test]
-fn empty() {
- let s = SmallStr::<1>::new();
- assert!(s.empty());
- assert_eq!("", s.as_str());
- assert!(!s.spilled());
-}
-
-#[test]
-fn from_iter() {
- let s = ["aa", "bb", "cc"].iter().collect::<SmallStr<6>>();
- assert_eq!("aabbcc", s.as_str());
- assert!(!s.spilled());
-
- let s = ["aa", "bb", "cc", "dd"].iter().collect::<SmallStr<6>>();
- assert_eq!("aabbccdd", s.as_str());
- assert!(s.spilled());
-}
diff --git a/compiler/rustc_data_structures/src/stable_hasher.rs b/compiler/rustc_data_structures/src/stable_hasher.rs
index 6d57d81c5..6d75b0fb8 100644
--- a/compiler/rustc_data_structures/src/stable_hasher.rs
+++ b/compiler/rustc_data_structures/src/stable_hasher.rs
@@ -1,6 +1,6 @@
use crate::sip128::SipHasher128;
use rustc_index::bit_set::{self, BitSet};
-use rustc_index::{Idx, IndexVec};
+use rustc_index::{Idx, IndexSlice, IndexVec};
use smallvec::SmallVec;
use std::fmt;
use std::hash::{BuildHasher, Hash, Hasher};
@@ -233,7 +233,17 @@ pub trait ToStableHashKey<HCX> {
/// - `DefIndex`, `CrateNum`, `LocalDefId`, because their concrete
/// values depend on state that might be different between
/// compilation sessions.
-pub unsafe trait StableOrd: Ord {}
+///
+/// The associated constant `CAN_USE_UNSTABLE_SORT` denotes whether
+/// unstable sorting can be used for this type. Set to true if and
+/// only if `a == b` implies `a` and `b` are fully indistinguishable.
+pub unsafe trait StableOrd: Ord {
+ const CAN_USE_UNSTABLE_SORT: bool;
+}
+
+unsafe impl<T: StableOrd> StableOrd for &T {
+ const CAN_USE_UNSTABLE_SORT: bool = T::CAN_USE_UNSTABLE_SORT;
+}
/// Implement HashStable by just calling `Hash::hash()`. Also implement `StableOrd` for the type since
/// that has the same requirements.
@@ -253,7 +263,9 @@ macro_rules! impl_stable_traits_for_trivial_type {
}
}
- unsafe impl $crate::stable_hasher::StableOrd for $t {}
+ unsafe impl $crate::stable_hasher::StableOrd for $t {
+ const CAN_USE_UNSTABLE_SORT: bool = true;
+ }
};
}
@@ -339,6 +351,10 @@ impl<T1: HashStable<CTX>, T2: HashStable<CTX>, CTX> HashStable<CTX> for (T1, T2)
}
}
+unsafe impl<T1: StableOrd, T2: StableOrd> StableOrd for (T1, T2) {
+ const CAN_USE_UNSTABLE_SORT: bool = T1::CAN_USE_UNSTABLE_SORT && T2::CAN_USE_UNSTABLE_SORT;
+}
+
impl<T1, T2, T3, CTX> HashStable<CTX> for (T1, T2, T3)
where
T1: HashStable<CTX>,
@@ -353,6 +369,11 @@ where
}
}
+unsafe impl<T1: StableOrd, T2: StableOrd, T3: StableOrd> StableOrd for (T1, T2, T3) {
+ const CAN_USE_UNSTABLE_SORT: bool =
+ T1::CAN_USE_UNSTABLE_SORT && T2::CAN_USE_UNSTABLE_SORT && T3::CAN_USE_UNSTABLE_SORT;
+}
+
impl<T1, T2, T3, T4, CTX> HashStable<CTX> for (T1, T2, T3, T4)
where
T1: HashStable<CTX>,
@@ -369,6 +390,15 @@ where
}
}
+unsafe impl<T1: StableOrd, T2: StableOrd, T3: StableOrd, T4: StableOrd> StableOrd
+ for (T1, T2, T3, T4)
+{
+ const CAN_USE_UNSTABLE_SORT: bool = T1::CAN_USE_UNSTABLE_SORT
+ && T2::CAN_USE_UNSTABLE_SORT
+ && T3::CAN_USE_UNSTABLE_SORT
+ && T4::CAN_USE_UNSTABLE_SORT;
+}
+
impl<T: HashStable<CTX>, CTX> HashStable<CTX> for [T] {
default fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
self.len().hash_stable(ctx, hasher);
@@ -459,6 +489,10 @@ impl<CTX> HashStable<CTX> for str {
}
}
+unsafe impl StableOrd for &str {
+ const CAN_USE_UNSTABLE_SORT: bool = true;
+}
+
impl<CTX> HashStable<CTX> for String {
#[inline]
fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
@@ -468,7 +502,9 @@ impl<CTX> HashStable<CTX> for String {
// Safety: String comparison only depends on their contents and the
// contents are not changed by (de-)serialization.
-unsafe impl StableOrd for String {}
+unsafe impl StableOrd for String {
+ const CAN_USE_UNSTABLE_SORT: bool = true;
+}
impl<HCX> ToStableHashKey<HCX> for String {
type KeyType = String;
@@ -494,7 +530,9 @@ impl<CTX> HashStable<CTX> for bool {
}
// Safety: sort order of bools is not changed by (de-)serialization.
-unsafe impl StableOrd for bool {}
+unsafe impl StableOrd for bool {
+ const CAN_USE_UNSTABLE_SORT: bool = true;
+}
impl<T, CTX> HashStable<CTX> for Option<T>
where
@@ -512,7 +550,9 @@ where
}
// Safety: the Option wrapper does not add instability to comparison.
-unsafe impl<T: StableOrd> StableOrd for Option<T> {}
+unsafe impl<T: StableOrd> StableOrd for Option<T> {
+ const CAN_USE_UNSTABLE_SORT: bool = T::CAN_USE_UNSTABLE_SORT;
+}
impl<T1, T2, CTX> HashStable<CTX> for Result<T1, T2>
where
@@ -557,6 +597,18 @@ where
}
}
+impl<I: Idx, T, CTX> HashStable<CTX> for IndexSlice<I, T>
+where
+ T: HashStable<CTX>,
+{
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ self.len().hash_stable(ctx, hasher);
+ for v in &self.raw {
+ v.hash_stable(ctx, hasher);
+ }
+ }
+}
+
impl<I: Idx, T, CTX> HashStable<CTX> for IndexVec<I, T>
where
T: HashStable<CTX>,
diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs
index 6c3197d8e..25a082373 100644
--- a/compiler/rustc_data_structures/src/sync.rs
+++ b/compiler/rustc_data_structures/src/sync.rs
@@ -139,9 +139,14 @@ cfg_if! {
impl Atomic<bool> {
pub fn fetch_or(&self, val: bool, _: Ordering) -> bool {
- let result = self.0.get() | val;
- self.0.set(val);
- result
+ let old = self.0.get();
+ self.0.set(val | old);
+ old
+ }
+ pub fn fetch_and(&self, val: bool, _: Ordering) -> bool {
+ let old = self.0.get();
+ self.0.set(val & old);
+ old
}
}
diff --git a/compiler/rustc_data_structures/src/temp_dir.rs b/compiler/rustc_data_structures/src/temp_dir.rs
index a780d2386..621d3011a 100644
--- a/compiler/rustc_data_structures/src/temp_dir.rs
+++ b/compiler/rustc_data_structures/src/temp_dir.rs
@@ -16,7 +16,7 @@ impl Drop for MaybeTempDir {
// occur.
let dir = unsafe { ManuallyDrop::take(&mut self.dir) };
if self.keep {
- dir.into_path();
+ let _ = dir.into_path();
}
}
}
diff --git a/compiler/rustc_data_structures/src/unord.rs b/compiler/rustc_data_structures/src/unord.rs
index 6c8d54146..2b21815b6 100644
--- a/compiler/rustc_data_structures/src/unord.rs
+++ b/compiler/rustc_data_structures/src/unord.rs
@@ -107,6 +107,10 @@ impl<T, I: Iterator<Item = T>> UnordItems<T, I> {
{
UnordItems(self.0.flat_map(f))
}
+
+ pub fn collect<C: From<UnordItems<T, I>>>(self) -> C {
+ self.into()
+ }
}
impl<T> UnordItems<T, std::iter::Empty<T>> {
@@ -140,12 +144,12 @@ impl<T: Ord, I: Iterator<Item = T>> UnordItems<T, I> {
}
#[inline]
- pub fn into_sorted_stable_ord(self, use_stable_sort: bool) -> Vec<T>
+ pub fn into_sorted_stable_ord(self) -> Vec<T>
where
T: Ord + StableOrd,
{
let mut items: Vec<T> = self.0.collect();
- if use_stable_sort {
+ if !T::CAN_USE_UNSTABLE_SORT {
items.sort();
} else {
items.sort_unstable()