summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_data_structures
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_data_structures')
-rw-r--r--compiler/rustc_data_structures/Cargo.toml24
-rw-r--r--compiler/rustc_data_structures/src/fingerprint.rs4
-rw-r--r--compiler/rustc_data_structures/src/flock/linux.rs7
-rw-r--r--compiler/rustc_data_structures/src/fx.rs15
-rw-r--r--compiler/rustc_data_structures/src/graph/vec_graph/mod.rs4
-rw-r--r--compiler/rustc_data_structures/src/lib.rs6
-rw-r--r--compiler/rustc_data_structures/src/map_in_place.rs127
-rw-r--r--compiler/rustc_data_structures/src/obligation_forest/mod.rs41
-rw-r--r--compiler/rustc_data_structures/src/obligation_forest/tests.rs8
-rw-r--r--compiler/rustc_data_structures/src/profiling.rs64
-rw-r--r--compiler/rustc_data_structures/src/sorted_map.rs37
-rw-r--r--compiler/rustc_data_structures/src/sso/set.rs2
-rw-r--r--compiler/rustc_data_structures/src/sync.rs4
-rw-r--r--compiler/rustc_data_structures/src/thin_vec.rs135
-rw-r--r--compiler/rustc_data_structures/src/thin_vec/tests.rs42
-rw-r--r--compiler/rustc_data_structures/src/transitive_relation.rs133
-rw-r--r--compiler/rustc_data_structures/src/transitive_relation/tests.rs48
-rw-r--r--compiler/rustc_data_structures/src/unord.rs382
18 files changed, 663 insertions, 420 deletions
diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml
index 5c641f54f..9daa21ef6 100644
--- a/compiler/rustc_data_structures/Cargo.toml
+++ b/compiler/rustc_data_structures/Cargo.toml
@@ -4,29 +4,29 @@ version = "0.0.0"
edition = "2021"
[lib]
-doctest = false
[dependencies]
arrayvec = { version = "0.7", default-features = false }
+bitflags = "1.2.1"
+cfg-if = "0.1.2"
ena = "0.14"
indexmap = { version = "1.9.1" }
-tracing = "0.1"
jobserver_crate = { version = "0.1.13", package = "jobserver" }
-rustc_serialize = { path = "../rustc_serialize" }
-rustc_macros = { path = "../rustc_macros" }
-rustc_graphviz = { path = "../rustc_graphviz" }
-cfg-if = "0.1.2"
-stable_deref_trait = "1.0.0"
-rayon = { version = "0.4.0", package = "rustc-rayon", optional = true }
+libc = "0.2"
+measureme = "10.0.0"
rayon-core = { version = "0.4.0", package = "rustc-rayon-core", optional = true }
+rayon = { version = "0.4.0", package = "rustc-rayon", optional = true }
+rustc_graphviz = { path = "../rustc_graphviz" }
rustc-hash = "1.1.0"
-smallvec = { version = "1.8.1", features = ["const_generics", "union", "may_dangle"] }
rustc_index = { path = "../rustc_index", package = "rustc_index" }
-bitflags = "1.2.1"
-measureme = "10.0.0"
-libc = "0.2"
+rustc_macros = { path = "../rustc_macros" }
+rustc_serialize = { path = "../rustc_serialize" }
+smallvec = { version = "1.8.1", features = ["const_generics", "union", "may_dangle"] }
+stable_deref_trait = "1.0.0"
stacker = "0.1.14"
tempfile = "3.2"
+thin-vec = "0.2.8"
+tracing = "0.1"
[dependencies.parking_lot]
version = "0.11"
diff --git a/compiler/rustc_data_structures/src/fingerprint.rs b/compiler/rustc_data_structures/src/fingerprint.rs
index 5ff2d18dd..a39178016 100644
--- a/compiler/rustc_data_structures/src/fingerprint.rs
+++ b/compiler/rustc_data_structures/src/fingerprint.rs
@@ -29,7 +29,7 @@ impl Fingerprint {
// quality hash values, let's still combine the two values because the
// Fingerprints in DefPathHash have the StableCrateId portion which is
// the same for all DefPathHashes from the same crate. Combining the
- // two halfs makes sure we get a good quality hash in such cases too.
+ // two halves makes sure we get a good quality hash in such cases too.
self.0.wrapping_mul(3).wrapping_add(self.1)
}
@@ -120,7 +120,7 @@ impl FingerprintHasher for crate::unhash::Unhasher {
// quality hash values, let's still combine the two values because the
// Fingerprints in DefPathHash have the StableCrateId portion which is
// the same for all DefPathHashes from the same crate. Combining the
- // two halfs makes sure we get a good quality hash in such cases too.
+ // two halves makes sure we get a good quality hash in such cases too.
//
// Since `Unhasher` is used only in the context of HashMaps, it is OK
// to combine the two components in an order-independent way (which is
diff --git a/compiler/rustc_data_structures/src/flock/linux.rs b/compiler/rustc_data_structures/src/flock/linux.rs
index bb3ecfbc3..9ed26e490 100644
--- a/compiler/rustc_data_structures/src/flock/linux.rs
+++ b/compiler/rustc_data_structures/src/flock/linux.rs
@@ -14,12 +14,7 @@ pub struct Lock {
impl Lock {
pub fn new(p: &Path, wait: bool, create: bool, exclusive: bool) -> io::Result<Lock> {
- let file = OpenOptions::new()
- .read(true)
- .write(true)
- .create(create)
- .mode(libc::S_IRWXU as u32)
- .open(p)?;
+ let file = OpenOptions::new().read(true).write(true).create(create).mode(0o600).open(p)?;
let mut operation = if exclusive { libc::LOCK_EX } else { libc::LOCK_SH };
if !wait {
diff --git a/compiler/rustc_data_structures/src/fx.rs b/compiler/rustc_data_structures/src/fx.rs
index bbeb193db..0d0c51b68 100644
--- a/compiler/rustc_data_structures/src/fx.rs
+++ b/compiler/rustc_data_structures/src/fx.rs
@@ -2,13 +2,26 @@ use std::hash::BuildHasherDefault;
pub use rustc_hash::{FxHashMap, FxHashSet, FxHasher};
+pub type StdEntry<'a, K, V> = std::collections::hash_map::Entry<'a, K, V>;
+
pub type FxIndexMap<K, V> = indexmap::IndexMap<K, V, BuildHasherDefault<FxHasher>>;
pub type FxIndexSet<V> = indexmap::IndexSet<V, BuildHasherDefault<FxHasher>>;
+pub type IndexEntry<'a, K, V> = indexmap::map::Entry<'a, K, V>;
#[macro_export]
macro_rules! define_id_collections {
- ($map_name:ident, $set_name:ident, $key:ty) => {
+ ($map_name:ident, $set_name:ident, $entry_name:ident, $key:ty) => {
pub type $map_name<T> = $crate::fx::FxHashMap<$key, T>;
pub type $set_name = $crate::fx::FxHashSet<$key>;
+ pub type $entry_name<'a, T> = $crate::fx::StdEntry<'a, $key, T>;
+ };
+}
+
+#[macro_export]
+macro_rules! define_stable_id_collections {
+ ($map_name:ident, $set_name:ident, $entry_name:ident, $key:ty) => {
+ pub type $map_name<T> = $crate::fx::FxIndexMap<$key, T>;
+ pub type $set_name = $crate::fx::FxIndexSet<$key>;
+ pub type $entry_name<'a, T> = $crate::fx::IndexEntry<'a, $key, T>;
};
}
diff --git a/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs b/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs
index 3d91bcade..e8efbd09a 100644
--- a/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs
+++ b/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs
@@ -29,8 +29,8 @@ impl<N: Idx + Ord> VecGraph<N> {
// Store the *target* of each edge into `edge_targets`.
let edge_targets: Vec<N> = edge_pairs.iter().map(|&(_, target)| target).collect();
- // Create the *edge starts* array. We are iterating over over
- // the (sorted) edge pairs. We maintain the invariant that the
+ // Create the *edge starts* array. We are iterating over the
+ // (sorted) edge pairs. We maintain the invariant that the
// length of the `node_starts` array is enough to store the
// current source node -- so when we see that the source node
// for an edge is greater than the current length, we grow the
diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs
index 265f45b72..3a2000233 100644
--- a/compiler/rustc_data_structures/src/lib.rs
+++ b/compiler/rustc_data_structures/src/lib.rs
@@ -13,7 +13,6 @@
#![feature(cell_leak)]
#![feature(control_flow_enum)]
#![feature(extend_one)]
-#![feature(let_else)]
#![feature(hash_raw_entry)]
#![feature(hasher_prefixfree_extras)]
#![feature(maybe_uninit_uninit_array)]
@@ -23,11 +22,14 @@
#![feature(new_uninit)]
#![feature(once_cell)]
#![feature(rustc_attrs)]
+#![feature(negative_impls)]
#![feature(test)]
#![feature(thread_id_value)]
#![feature(vec_into_raw_parts)]
#![allow(rustc::default_hash_types)]
#![allow(rustc::potential_query_instability)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate tracing;
@@ -73,7 +75,6 @@ pub mod profiling;
pub mod sharded;
pub mod stack;
pub mod sync;
-pub mod thin_vec;
pub mod tiny_list;
pub mod transitive_relation;
pub mod vec_linked_list;
@@ -86,6 +87,7 @@ pub mod steal;
pub mod tagged_ptr;
pub mod temp_dir;
pub mod unhash;
+pub mod unord;
pub use ena::undo_log;
pub use ena::unify;
diff --git a/compiler/rustc_data_structures/src/map_in_place.rs b/compiler/rustc_data_structures/src/map_in_place.rs
index 874de03d3..a0d4b7ade 100644
--- a/compiler/rustc_data_structures/src/map_in_place.rs
+++ b/compiler/rustc_data_structures/src/map_in_place.rs
@@ -1,5 +1,6 @@
use smallvec::{Array, SmallVec};
use std::ptr;
+use thin_vec::ThinVec;
pub trait MapInPlace<T>: Sized {
fn map_in_place<F>(&mut self, mut f: F)
@@ -15,94 +16,64 @@ pub trait MapInPlace<T>: Sized {
I: IntoIterator<Item = T>;
}
-impl<T> MapInPlace<T> for Vec<T> {
- fn flat_map_in_place<F, I>(&mut self, mut f: F)
- where
- F: FnMut(T) -> I,
- I: IntoIterator<Item = T>,
- {
- let mut read_i = 0;
- let mut write_i = 0;
- unsafe {
- let mut old_len = self.len();
- self.set_len(0); // make sure we just leak elements in case of panic
+// The implementation of this method is syntactically identical for all the
+// different vector types.
+macro_rules! flat_map_in_place {
+ () => {
+ fn flat_map_in_place<F, I>(&mut self, mut f: F)
+ where
+ F: FnMut(T) -> I,
+ I: IntoIterator<Item = T>,
+ {
+ let mut read_i = 0;
+ let mut write_i = 0;
+ unsafe {
+ let mut old_len = self.len();
+ self.set_len(0); // make sure we just leak elements in case of panic
- while read_i < old_len {
- // move the read_i'th item out of the vector and map it
- // to an iterator
- let e = ptr::read(self.as_ptr().add(read_i));
- let iter = f(e).into_iter();
- read_i += 1;
+ while read_i < old_len {
+ // move the read_i'th item out of the vector and map it
+ // to an iterator
+ let e = ptr::read(self.as_ptr().add(read_i));
+ let iter = f(e).into_iter();
+ read_i += 1;
- for e in iter {
- if write_i < read_i {
- ptr::write(self.as_mut_ptr().add(write_i), e);
- write_i += 1;
- } else {
- // If this is reached we ran out of space
- // in the middle of the vector.
- // However, the vector is in a valid state here,
- // so we just do a somewhat inefficient insert.
- self.set_len(old_len);
- self.insert(write_i, e);
+ for e in iter {
+ if write_i < read_i {
+ ptr::write(self.as_mut_ptr().add(write_i), e);
+ write_i += 1;
+ } else {
+ // If this is reached we ran out of space
+ // in the middle of the vector.
+ // However, the vector is in a valid state here,
+ // so we just do a somewhat inefficient insert.
+ self.set_len(old_len);
+ self.insert(write_i, e);
- old_len = self.len();
- self.set_len(0);
+ old_len = self.len();
+ self.set_len(0);
- read_i += 1;
- write_i += 1;
+ read_i += 1;
+ write_i += 1;
+ }
}
}
- }
- // write_i tracks the number of actually written new items.
- self.set_len(write_i);
+ // write_i tracks the number of actually written new items.
+ self.set_len(write_i);
+ }
}
- }
+ };
}
-impl<T, A: Array<Item = T>> MapInPlace<T> for SmallVec<A> {
- fn flat_map_in_place<F, I>(&mut self, mut f: F)
- where
- F: FnMut(T) -> I,
- I: IntoIterator<Item = T>,
- {
- let mut read_i = 0;
- let mut write_i = 0;
- unsafe {
- let mut old_len = self.len();
- self.set_len(0); // make sure we just leak elements in case of panic
-
- while read_i < old_len {
- // move the read_i'th item out of the vector and map it
- // to an iterator
- let e = ptr::read(self.as_ptr().add(read_i));
- let iter = f(e).into_iter();
- read_i += 1;
-
- for e in iter {
- if write_i < read_i {
- ptr::write(self.as_mut_ptr().add(write_i), e);
- write_i += 1;
- } else {
- // If this is reached we ran out of space
- // in the middle of the vector.
- // However, the vector is in a valid state here,
- // so we just do a somewhat inefficient insert.
- self.set_len(old_len);
- self.insert(write_i, e);
-
- old_len = self.len();
- self.set_len(0);
+impl<T> MapInPlace<T> for Vec<T> {
+ flat_map_in_place!();
+}
- read_i += 1;
- write_i += 1;
- }
- }
- }
+impl<T, A: Array<Item = T>> MapInPlace<T> for SmallVec<A> {
+ flat_map_in_place!();
+}
- // write_i tracks the number of actually written new items.
- self.set_len(write_i);
- }
- }
+impl<T> MapInPlace<T> for ThinVec<T> {
+ flat_map_in_place!();
}
diff --git a/compiler/rustc_data_structures/src/obligation_forest/mod.rs b/compiler/rustc_data_structures/src/obligation_forest/mod.rs
index 07a96dd7d..10e673cd9 100644
--- a/compiler/rustc_data_structures/src/obligation_forest/mod.rs
+++ b/compiler/rustc_data_structures/src/obligation_forest/mod.rs
@@ -95,6 +95,10 @@ pub trait ForestObligation: Clone + Debug {
pub trait ObligationProcessor {
type Obligation: ForestObligation;
type Error: Debug;
+ type OUT: OutcomeTrait<
+ Obligation = Self::Obligation,
+ Error = Error<Self::Obligation, Self::Error>,
+ >;
fn needs_process_obligation(&self, obligation: &Self::Obligation) -> bool;
@@ -111,12 +115,20 @@ pub trait ObligationProcessor {
/// In other words, if we had O1 which required O2 which required
/// O3 which required O1, we would give an iterator yielding O1,
/// O2, O3 (O1 is not yielded twice).
- fn process_backedge<'c, I>(&mut self, cycle: I, _marker: PhantomData<&'c Self::Obligation>)
+ fn process_backedge<'c, I>(
+ &mut self,
+ cycle: I,
+ _marker: PhantomData<&'c Self::Obligation>,
+ ) -> Result<(), Self::Error>
where
I: Clone + Iterator<Item = &'c Self::Obligation>;
}
/// The result type used by `process_obligation`.
+// `repr(C)` to inhibit the niche filling optimization. Otherwise, the `match` appearing
+// in `process_obligations` is significantly slower, which can substantially affect
+// benchmarks like `rustc-perf`'s inflate and keccak.
+#[repr(C)]
#[derive(Debug)]
pub enum ProcessResult<O, E> {
Unchanged,
@@ -398,12 +410,11 @@ impl<O: ForestObligation> ObligationForest<O> {
/// Performs a fixpoint computation over the obligation list.
#[inline(never)]
- pub fn process_obligations<P, OUT>(&mut self, processor: &mut P) -> OUT
+ pub fn process_obligations<P>(&mut self, processor: &mut P) -> P::OUT
where
P: ObligationProcessor<Obligation = O>,
- OUT: OutcomeTrait<Obligation = O, Error = Error<O, P::Error>>,
{
- let mut outcome = OUT::new();
+ let mut outcome = P::OUT::new();
// Fixpoint computation: we repeat until the inner loop stalls.
loop {
@@ -469,7 +480,7 @@ impl<O: ForestObligation> ObligationForest<O> {
}
self.mark_successes();
- self.process_cycles(processor);
+ self.process_cycles(processor, &mut outcome);
self.compress(|obl| outcome.record_completed(obl));
}
@@ -554,7 +565,7 @@ impl<O: ForestObligation> ObligationForest<O> {
/// Report cycles between all `Success` nodes, and convert all `Success`
/// nodes to `Done`. This must be called after `mark_successes`.
- fn process_cycles<P>(&mut self, processor: &mut P)
+ fn process_cycles<P>(&mut self, processor: &mut P, outcome: &mut P::OUT)
where
P: ObligationProcessor<Obligation = O>,
{
@@ -564,7 +575,7 @@ impl<O: ForestObligation> ObligationForest<O> {
// to handle the no-op cases immediately to avoid the cost of the
// function call.
if node.state.get() == NodeState::Success {
- self.find_cycles_from_node(&mut stack, processor, index);
+ self.find_cycles_from_node(&mut stack, processor, index, outcome);
}
}
@@ -572,8 +583,13 @@ impl<O: ForestObligation> ObligationForest<O> {
self.reused_node_vec = stack;
}
- fn find_cycles_from_node<P>(&self, stack: &mut Vec<usize>, processor: &mut P, index: usize)
- where
+ fn find_cycles_from_node<P>(
+ &self,
+ stack: &mut Vec<usize>,
+ processor: &mut P,
+ index: usize,
+ outcome: &mut P::OUT,
+ ) where
P: ObligationProcessor<Obligation = O>,
{
let node = &self.nodes[index];
@@ -582,17 +598,20 @@ impl<O: ForestObligation> ObligationForest<O> {
None => {
stack.push(index);
for &dep_index in node.dependents.iter() {
- self.find_cycles_from_node(stack, processor, dep_index);
+ self.find_cycles_from_node(stack, processor, dep_index, outcome);
}
stack.pop();
node.state.set(NodeState::Done);
}
Some(rpos) => {
// Cycle detected.
- processor.process_backedge(
+ let result = processor.process_backedge(
stack[rpos..].iter().map(|&i| &self.nodes[i].obligation),
PhantomData,
);
+ if let Err(err) = result {
+ outcome.record_error(Error { error: err, backtrace: self.error_at(index) });
+ }
}
}
}
diff --git a/compiler/rustc_data_structures/src/obligation_forest/tests.rs b/compiler/rustc_data_structures/src/obligation_forest/tests.rs
index e2991aae1..bc252f772 100644
--- a/compiler/rustc_data_structures/src/obligation_forest/tests.rs
+++ b/compiler/rustc_data_structures/src/obligation_forest/tests.rs
@@ -64,6 +64,7 @@ where
{
type Obligation = O;
type Error = E;
+ type OUT = TestOutcome<O, E>;
fn needs_process_obligation(&self, _obligation: &Self::Obligation) -> bool {
true
@@ -76,10 +77,15 @@ where
(self.process_obligation)(obligation)
}
- fn process_backedge<'c, I>(&mut self, _cycle: I, _marker: PhantomData<&'c Self::Obligation>)
+ fn process_backedge<'c, I>(
+ &mut self,
+ _cycle: I,
+ _marker: PhantomData<&'c Self::Obligation>,
+ ) -> Result<(), Self::Error>
where
I: Clone + Iterator<Item = &'c Self::Obligation>,
{
+ Ok(())
}
}
diff --git a/compiler/rustc_data_structures/src/profiling.rs b/compiler/rustc_data_structures/src/profiling.rs
index d8b26f984..ba1960805 100644
--- a/compiler/rustc_data_structures/src/profiling.rs
+++ b/compiler/rustc_data_structures/src/profiling.rs
@@ -158,30 +158,21 @@ pub struct SelfProfilerRef {
// actually enabled.
event_filter_mask: EventFilter,
- // Print verbose generic activities to stdout
+ // Print verbose generic activities to stderr?
print_verbose_generic_activities: bool,
-
- // Print extra verbose generic activities to stdout
- print_extra_verbose_generic_activities: bool,
}
impl SelfProfilerRef {
pub fn new(
profiler: Option<Arc<SelfProfiler>>,
print_verbose_generic_activities: bool,
- print_extra_verbose_generic_activities: bool,
) -> SelfProfilerRef {
// If there is no SelfProfiler then the filter mask is set to NONE,
// ensuring that nothing ever tries to actually access it.
let event_filter_mask =
profiler.as_ref().map_or(EventFilter::empty(), |p| p.event_filter_mask);
- SelfProfilerRef {
- profiler,
- event_filter_mask,
- print_verbose_generic_activities,
- print_extra_verbose_generic_activities,
- }
+ SelfProfilerRef { profiler, event_filter_mask, print_verbose_generic_activities }
}
/// This shim makes sure that calls only get executed if the filter mask
@@ -214,7 +205,7 @@ impl SelfProfilerRef {
/// Start profiling a verbose generic activity. Profiling continues until the
/// VerboseTimingGuard returned from this call is dropped. In addition to recording
/// a measureme event, "verbose" generic activities also print a timing entry to
- /// stdout if the compiler is invoked with -Ztime or -Ztime-passes.
+ /// stderr if the compiler is invoked with -Ztime-passes.
pub fn verbose_generic_activity<'a>(
&'a self,
event_label: &'static str,
@@ -225,11 +216,8 @@ impl SelfProfilerRef {
VerboseTimingGuard::start(message, self.generic_activity(event_label))
}
- /// Start profiling an extra verbose generic activity. Profiling continues until the
- /// VerboseTimingGuard returned from this call is dropped. In addition to recording
- /// a measureme event, "extra verbose" generic activities also print a timing entry to
- /// stdout if the compiler is invoked with -Ztime-passes.
- pub fn extra_verbose_generic_activity<'a, A>(
+ /// Like `verbose_generic_activity`, but with an extra arg.
+ pub fn verbose_generic_activity_with_arg<'a, A>(
&'a self,
event_label: &'static str,
event_arg: A,
@@ -237,7 +225,7 @@ impl SelfProfilerRef {
where
A: Borrow<str> + Into<String>,
{
- let message = if self.print_extra_verbose_generic_activities {
+ let message = if self.print_verbose_generic_activities {
Some(format!("{}({})", event_label, event_arg.borrow()))
} else {
None
@@ -745,27 +733,9 @@ impl Drop for VerboseTimingGuard<'_> {
if let Some((start_time, start_rss, ref message)) = self.start_and_message {
let end_rss = get_resident_set_size();
let dur = start_time.elapsed();
-
- if should_print_passes(dur, start_rss, end_rss) {
- print_time_passes_entry(&message, dur, start_rss, end_rss);
- }
- }
- }
-}
-
-fn should_print_passes(dur: Duration, start_rss: Option<usize>, end_rss: Option<usize>) -> bool {
- if dur.as_millis() > 5 {
- return true;
- }
-
- if let (Some(start_rss), Some(end_rss)) = (start_rss, end_rss) {
- let change_rss = end_rss.abs_diff(start_rss);
- if change_rss > 0 {
- return true;
+ print_time_passes_entry(&message, dur, start_rss, end_rss);
}
}
-
- false
}
pub fn print_time_passes_entry(
@@ -774,6 +744,26 @@ pub fn print_time_passes_entry(
start_rss: Option<usize>,
end_rss: Option<usize>,
) {
+ // Print the pass if its duration is greater than 5 ms, or it changed the
+ // measured RSS.
+ let is_notable = || {
+ if dur.as_millis() > 5 {
+ return true;
+ }
+
+ if let (Some(start_rss), Some(end_rss)) = (start_rss, end_rss) {
+ let change_rss = end_rss.abs_diff(start_rss);
+ if change_rss > 0 {
+ return true;
+ }
+ }
+
+ false
+ };
+ if !is_notable() {
+ return;
+ }
+
let rss_to_mb = |rss| (rss as f64 / 1_000_000.0).round() as usize;
let rss_change_to_mb = |rss| (rss as f64 / 1_000_000.0).round() as i128;
diff --git a/compiler/rustc_data_structures/src/sorted_map.rs b/compiler/rustc_data_structures/src/sorted_map.rs
index 9efea1228..fe257e102 100644
--- a/compiler/rustc_data_structures/src/sorted_map.rs
+++ b/compiler/rustc_data_structures/src/sorted_map.rs
@@ -96,6 +96,23 @@ impl<K: Ord, V> SortedMap<K, V> {
}
}
+ /// Gets a mutable reference to the value in the entry, or insert a new one.
+ #[inline]
+ pub fn get_mut_or_insert_default(&mut self, key: K) -> &mut V
+ where
+ K: Eq,
+ V: Default,
+ {
+ let index = match self.lookup_index_for(&key) {
+ Ok(index) => index,
+ Err(index) => {
+ self.data.insert(index, (key, V::default()));
+ index
+ }
+ };
+ unsafe { &mut self.data.get_unchecked_mut(index).1 }
+ }
+
#[inline]
pub fn clear(&mut self) {
self.data.clear();
@@ -164,7 +181,7 @@ impl<K: Ord, V> SortedMap<K, V> {
/// It is up to the caller to make sure that the elements are sorted by key
/// and that there are no duplicates.
#[inline]
- pub fn insert_presorted(&mut self, mut elements: Vec<(K, V)>) {
+ pub fn insert_presorted(&mut self, elements: Vec<(K, V)>) {
if elements.is_empty() {
return;
}
@@ -173,28 +190,28 @@ impl<K: Ord, V> SortedMap<K, V> {
let start_index = self.lookup_index_for(&elements[0].0);
- let drain = match start_index {
+ let elements = match start_index {
Ok(index) => {
- let mut drain = elements.drain(..);
- self.data[index] = drain.next().unwrap();
- drain
+ let mut elements = elements.into_iter();
+ self.data[index] = elements.next().unwrap();
+ elements
}
Err(index) => {
if index == self.data.len() || elements.last().unwrap().0 < self.data[index].0 {
// We can copy the whole range without having to mix with
// existing elements.
- self.data.splice(index..index, elements.drain(..));
+ self.data.splice(index..index, elements.into_iter());
return;
}
- let mut drain = elements.drain(..);
- self.data.insert(index, drain.next().unwrap());
- drain
+ let mut elements = elements.into_iter();
+ self.data.insert(index, elements.next().unwrap());
+ elements
}
};
// Insert the rest
- for (k, v) in drain {
+ for (k, v) in elements {
self.insert(k, v);
}
}
diff --git a/compiler/rustc_data_structures/src/sso/set.rs b/compiler/rustc_data_structures/src/sso/set.rs
index 4fda3adb7..406f0270d 100644
--- a/compiler/rustc_data_structures/src/sso/set.rs
+++ b/compiler/rustc_data_structures/src/sso/set.rs
@@ -27,7 +27,7 @@ pub struct SsoHashSet<T> {
map: SsoHashMap<T, ()>,
}
-/// Adapter function used ot return
+/// Adapter function used to return
/// result if SsoHashMap functions into
/// result SsoHashSet should return.
#[inline(always)]
diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs
index 52952a793..9c0fb8265 100644
--- a/compiler/rustc_data_structures/src/sync.rs
+++ b/compiler/rustc_data_structures/src/sync.rs
@@ -48,7 +48,7 @@ cfg_if! {
/// the native atomic types.
/// You should use this type through the `AtomicU64`, `AtomicUsize`, etc, type aliases
/// as it's not intended to be used separately.
- #[derive(Debug)]
+ #[derive(Debug, Default)]
pub struct Atomic<T: Copy>(Cell<T>);
impl<T: Copy> Atomic<T> {
@@ -56,9 +56,7 @@ cfg_if! {
pub fn new(v: T) -> Self {
Atomic(Cell::new(v))
}
- }
- impl<T: Copy> Atomic<T> {
#[inline]
pub fn into_inner(self) -> T {
self.0.into_inner()
diff --git a/compiler/rustc_data_structures/src/thin_vec.rs b/compiler/rustc_data_structures/src/thin_vec.rs
deleted file mode 100644
index 716259142..000000000
--- a/compiler/rustc_data_structures/src/thin_vec.rs
+++ /dev/null
@@ -1,135 +0,0 @@
-use crate::stable_hasher::{HashStable, StableHasher};
-
-use std::iter::FromIterator;
-
-/// A vector type optimized for cases where this size is usually 0 (cf. `SmallVec`).
-/// The `Option<Box<..>>` wrapping allows us to represent a zero sized vector with `None`,
-/// which uses only a single (null) pointer.
-#[derive(Clone, Encodable, Decodable, Debug, Hash, Eq, PartialEq)]
-pub struct ThinVec<T>(Option<Box<Vec<T>>>);
-
-impl<T> ThinVec<T> {
- pub fn new() -> Self {
- ThinVec(None)
- }
-
- pub fn iter(&self) -> std::slice::Iter<'_, T> {
- self.into_iter()
- }
-
- pub fn iter_mut(&mut self) -> std::slice::IterMut<'_, T> {
- self.into_iter()
- }
-
- pub fn push(&mut self, item: T) {
- match *self {
- ThinVec(Some(ref mut vec)) => vec.push(item),
- ThinVec(None) => *self = vec![item].into(),
- }
- }
-}
-
-impl<T> From<Vec<T>> for ThinVec<T> {
- fn from(vec: Vec<T>) -> Self {
- if vec.is_empty() { ThinVec(None) } else { ThinVec(Some(Box::new(vec))) }
- }
-}
-
-impl<T> Into<Vec<T>> for ThinVec<T> {
- fn into(self) -> Vec<T> {
- match self {
- ThinVec(None) => Vec::new(),
- ThinVec(Some(vec)) => *vec,
- }
- }
-}
-
-impl<T> ::std::ops::Deref for ThinVec<T> {
- type Target = [T];
- fn deref(&self) -> &[T] {
- match *self {
- ThinVec(None) => &[],
- ThinVec(Some(ref vec)) => vec,
- }
- }
-}
-
-impl<T> ::std::ops::DerefMut for ThinVec<T> {
- fn deref_mut(&mut self) -> &mut [T] {
- match *self {
- ThinVec(None) => &mut [],
- ThinVec(Some(ref mut vec)) => vec,
- }
- }
-}
-
-impl<T> FromIterator<T> for ThinVec<T> {
- fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
- // `Vec::from_iter()` should not allocate if the iterator is empty.
- let vec: Vec<_> = iter.into_iter().collect();
- if vec.is_empty() { ThinVec(None) } else { ThinVec(Some(Box::new(vec))) }
- }
-}
-
-impl<T> IntoIterator for ThinVec<T> {
- type Item = T;
- type IntoIter = std::vec::IntoIter<T>;
-
- fn into_iter(self) -> Self::IntoIter {
- // This is still performant because `Vec::new()` does not allocate.
- self.0.map_or_else(Vec::new, |ptr| *ptr).into_iter()
- }
-}
-
-impl<'a, T> IntoIterator for &'a ThinVec<T> {
- type Item = &'a T;
- type IntoIter = std::slice::Iter<'a, T>;
-
- fn into_iter(self) -> Self::IntoIter {
- self.as_ref().iter()
- }
-}
-
-impl<'a, T> IntoIterator for &'a mut ThinVec<T> {
- type Item = &'a mut T;
- type IntoIter = std::slice::IterMut<'a, T>;
-
- fn into_iter(self) -> Self::IntoIter {
- self.as_mut().iter_mut()
- }
-}
-
-impl<T> Extend<T> for ThinVec<T> {
- fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
- match *self {
- ThinVec(Some(ref mut vec)) => vec.extend(iter),
- ThinVec(None) => *self = iter.into_iter().collect::<Vec<_>>().into(),
- }
- }
-
- fn extend_one(&mut self, item: T) {
- self.push(item)
- }
-
- fn extend_reserve(&mut self, additional: usize) {
- match *self {
- ThinVec(Some(ref mut vec)) => vec.reserve(additional),
- ThinVec(None) => *self = Vec::with_capacity(additional).into(),
- }
- }
-}
-
-impl<T: HashStable<CTX>, CTX> HashStable<CTX> for ThinVec<T> {
- fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
- (**self).hash_stable(hcx, hasher)
- }
-}
-
-impl<T> Default for ThinVec<T> {
- fn default() -> Self {
- Self(None)
- }
-}
-
-#[cfg(test)]
-mod tests;
diff --git a/compiler/rustc_data_structures/src/thin_vec/tests.rs b/compiler/rustc_data_structures/src/thin_vec/tests.rs
deleted file mode 100644
index 0221b9912..000000000
--- a/compiler/rustc_data_structures/src/thin_vec/tests.rs
+++ /dev/null
@@ -1,42 +0,0 @@
-use super::*;
-
-impl<T> ThinVec<T> {
- fn into_vec(self) -> Vec<T> {
- self.into()
- }
-}
-
-#[test]
-fn test_from_iterator() {
- assert_eq!(std::iter::empty().collect::<ThinVec<String>>().into_vec(), Vec::<String>::new());
- assert_eq!(std::iter::once(42).collect::<ThinVec<_>>().into_vec(), vec![42]);
- assert_eq!([1, 2].into_iter().collect::<ThinVec<_>>().into_vec(), vec![1, 2]);
- assert_eq!([1, 2, 3].into_iter().collect::<ThinVec<_>>().into_vec(), vec![1, 2, 3]);
-}
-
-#[test]
-fn test_into_iterator_owned() {
- assert_eq!(ThinVec::new().into_iter().collect::<Vec<String>>(), Vec::<String>::new());
- assert_eq!(ThinVec::from(vec![1]).into_iter().collect::<Vec<_>>(), vec![1]);
- assert_eq!(ThinVec::from(vec![1, 2]).into_iter().collect::<Vec<_>>(), vec![1, 2]);
- assert_eq!(ThinVec::from(vec![1, 2, 3]).into_iter().collect::<Vec<_>>(), vec![1, 2, 3]);
-}
-
-#[test]
-fn test_into_iterator_ref() {
- assert_eq!(ThinVec::new().iter().collect::<Vec<&String>>(), Vec::<&String>::new());
- assert_eq!(ThinVec::from(vec![1]).iter().collect::<Vec<_>>(), vec![&1]);
- assert_eq!(ThinVec::from(vec![1, 2]).iter().collect::<Vec<_>>(), vec![&1, &2]);
- assert_eq!(ThinVec::from(vec![1, 2, 3]).iter().collect::<Vec<_>>(), vec![&1, &2, &3]);
-}
-
-#[test]
-fn test_into_iterator_ref_mut() {
- assert_eq!(ThinVec::new().iter_mut().collect::<Vec<&mut String>>(), Vec::<&mut String>::new());
- assert_eq!(ThinVec::from(vec![1]).iter_mut().collect::<Vec<_>>(), vec![&mut 1]);
- assert_eq!(ThinVec::from(vec![1, 2]).iter_mut().collect::<Vec<_>>(), vec![&mut 1, &mut 2]);
- assert_eq!(
- ThinVec::from(vec![1, 2, 3]).iter_mut().collect::<Vec<_>>(),
- vec![&mut 1, &mut 2, &mut 3],
- );
-}
diff --git a/compiler/rustc_data_structures/src/transitive_relation.rs b/compiler/rustc_data_structures/src/transitive_relation.rs
index 0ff64969b..cf6162038 100644
--- a/compiler/rustc_data_structures/src/transitive_relation.rs
+++ b/compiler/rustc_data_structures/src/transitive_relation.rs
@@ -1,55 +1,67 @@
-use crate::fx::FxIndexSet;
-use crate::sync::Lock;
+use crate::frozen::Frozen;
+use crate::fx::{FxHashSet, FxIndexSet};
use rustc_index::bit_set::BitMatrix;
use std::fmt::Debug;
use std::hash::Hash;
use std::mem;
+use std::ops::Deref;
#[cfg(test)]
mod tests;
#[derive(Clone, Debug)]
-pub struct TransitiveRelation<T> {
+pub struct TransitiveRelationBuilder<T> {
// List of elements. This is used to map from a T to a usize.
elements: FxIndexSet<T>,
// List of base edges in the graph. Require to compute transitive
// closure.
- edges: Vec<Edge>,
-
- // This is a cached transitive closure derived from the edges.
- // Currently, we build it lazily and just throw out any existing
- // copy whenever a new edge is added. (The Lock is to permit
- // the lazy computation.) This is kind of silly, except for the
- // fact its size is tied to `self.elements.len()`, so I wanted to
- // wait before building it up to avoid reallocating as new edges
- // are added with new elements. Perhaps better would be to ask the
- // user for a batch of edges to minimize this effect, but I
- // already wrote the code this way. :P -nmatsakis
- closure: Lock<Option<BitMatrix<usize, usize>>>,
+ edges: FxHashSet<Edge>,
}
-// HACK(eddyb) manual impl avoids `Default` bound on `T`.
-impl<T: Eq + Hash> Default for TransitiveRelation<T> {
- fn default() -> Self {
+#[derive(Debug)]
+pub struct TransitiveRelation<T> {
+ // Frozen transitive relation elements and edges.
+ builder: Frozen<TransitiveRelationBuilder<T>>,
+
+ // Cached transitive closure derived from the edges.
+ closure: Frozen<BitMatrix<usize, usize>>,
+}
+
+impl<T> Deref for TransitiveRelation<T> {
+ type Target = Frozen<TransitiveRelationBuilder<T>>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.builder
+ }
+}
+
+impl<T: Clone> Clone for TransitiveRelation<T> {
+ fn clone(&self) -> Self {
TransitiveRelation {
- elements: Default::default(),
- edges: Default::default(),
- closure: Default::default(),
+ builder: Frozen::freeze(self.builder.deref().clone()),
+ closure: Frozen::freeze(self.closure.deref().clone()),
}
}
}
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Debug)]
+// HACK(eddyb) manual impl avoids `Default` bound on `T`.
+impl<T: Eq + Hash> Default for TransitiveRelationBuilder<T> {
+ fn default() -> Self {
+ TransitiveRelationBuilder { elements: Default::default(), edges: Default::default() }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Debug, Hash)]
struct Index(usize);
-#[derive(Clone, PartialEq, Eq, Debug)]
+#[derive(Clone, PartialEq, Eq, Debug, Hash)]
struct Edge {
source: Index,
target: Index,
}
-impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
+impl<T: Eq + Hash + Copy> TransitiveRelationBuilder<T> {
pub fn is_empty(&self) -> bool {
self.edges.is_empty()
}
@@ -63,23 +75,19 @@ impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
}
fn add_index(&mut self, a: T) -> Index {
- let (index, added) = self.elements.insert_full(a);
- if added {
- // if we changed the dimensions, clear the cache
- *self.closure.get_mut() = None;
- }
+ let (index, _added) = self.elements.insert_full(a);
Index(index)
}
/// Applies the (partial) function to each edge and returns a new
- /// relation. If `f` returns `None` for any end-point, returns
- /// `None`.
- pub fn maybe_map<F, U>(&self, mut f: F) -> Option<TransitiveRelation<U>>
+ /// relation builder. If `f` returns `None` for any end-point,
+ /// returns `None`.
+ pub fn maybe_map<F, U>(&self, mut f: F) -> Option<TransitiveRelationBuilder<U>>
where
F: FnMut(T) -> Option<U>,
U: Clone + Debug + Eq + Hash + Copy,
{
- let mut result = TransitiveRelation::default();
+ let mut result = TransitiveRelationBuilder::default();
for edge in &self.edges {
result.add(f(self.elements[edge.source.0])?, f(self.elements[edge.target.0])?);
}
@@ -91,12 +99,38 @@ impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
let a = self.add_index(a);
let b = self.add_index(b);
let edge = Edge { source: a, target: b };
- if !self.edges.contains(&edge) {
- self.edges.push(edge);
+ self.edges.insert(edge);
+ }
+
+ /// Compute the transitive closure derived from the edges, and converted to
+ /// the final result. After this, all elements will be immutable to maintain
+ /// the correctness of the result.
+ pub fn freeze(self) -> TransitiveRelation<T> {
+ let mut matrix = BitMatrix::new(self.elements.len(), self.elements.len());
+ let mut changed = true;
+ while changed {
+ changed = false;
+ for edge in &self.edges {
+ // add an edge from S -> T
+ changed |= matrix.insert(edge.source.0, edge.target.0);
- // added an edge, clear the cache
- *self.closure.get_mut() = None;
+ // add all outgoing edges from T into S
+ changed |= matrix.union_rows(edge.target.0, edge.source.0);
+ }
}
+ TransitiveRelation { builder: Frozen::freeze(self), closure: Frozen::freeze(matrix) }
+ }
+}
+
+impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
+ /// Applies the (partial) function to each edge and returns a new
+ /// relation including transitive closures.
+ pub fn maybe_map<F, U>(&self, f: F) -> Option<TransitiveRelation<U>>
+ where
+ F: FnMut(T) -> Option<U>,
+ U: Clone + Debug + Eq + Hash + Copy,
+ {
+ Some(self.builder.maybe_map(f)?.freeze())
}
/// Checks whether `a < target` (transitively)
@@ -322,30 +356,7 @@ impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
where
OP: FnOnce(&BitMatrix<usize, usize>) -> R,
{
- let mut closure_cell = self.closure.borrow_mut();
- let mut closure = closure_cell.take();
- if closure.is_none() {
- closure = Some(self.compute_closure());
- }
- let result = op(closure.as_ref().unwrap());
- *closure_cell = closure;
- result
- }
-
- fn compute_closure(&self) -> BitMatrix<usize, usize> {
- let mut matrix = BitMatrix::new(self.elements.len(), self.elements.len());
- let mut changed = true;
- while changed {
- changed = false;
- for edge in &self.edges {
- // add an edge from S -> T
- changed |= matrix.insert(edge.source.0, edge.target.0);
-
- // add all outgoing edges from T into S
- changed |= matrix.union_rows(edge.target.0, edge.source.0);
- }
- }
- matrix
+ op(&self.closure)
}
/// Lists all the base edges in the graph: the initial _non-transitive_ set of element
diff --git a/compiler/rustc_data_structures/src/transitive_relation/tests.rs b/compiler/rustc_data_structures/src/transitive_relation/tests.rs
index e1f4c7ee0..e756c546e 100644
--- a/compiler/rustc_data_structures/src/transitive_relation/tests.rs
+++ b/compiler/rustc_data_structures/src/transitive_relation/tests.rs
@@ -10,9 +10,10 @@ impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
#[test]
fn test_one_step() {
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "b");
relation.add("a", "c");
+ let relation = relation.freeze();
assert!(relation.contains("a", "c"));
assert!(relation.contains("a", "b"));
assert!(!relation.contains("b", "a"));
@@ -21,7 +22,7 @@ fn test_one_step() {
#[test]
fn test_many_steps() {
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "b");
relation.add("a", "c");
relation.add("a", "f");
@@ -31,6 +32,7 @@ fn test_many_steps() {
relation.add("b", "e");
relation.add("e", "g");
+ let relation = relation.freeze();
assert!(relation.contains("a", "b"));
assert!(relation.contains("a", "c"));
@@ -51,9 +53,10 @@ fn mubs_triangle() {
// ^
// |
// b
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "tcx");
relation.add("b", "tcx");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["tcx"]);
assert_eq!(relation.parents("a"), vec!["tcx"]);
assert_eq!(relation.parents("b"), vec!["tcx"]);
@@ -72,7 +75,7 @@ fn mubs_best_choice1() {
// need the second pare down call to get the right result (after
// intersection, we have [1, 2], but 2 -> 1).
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("0", "1");
relation.add("0", "2");
@@ -80,6 +83,7 @@ fn mubs_best_choice1() {
relation.add("3", "1");
relation.add("3", "2");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("0", "3"), vec!["2"]);
assert_eq!(relation.parents("0"), vec!["2"]);
@@ -99,7 +103,7 @@ fn mubs_best_choice2() {
// Like the preceding test, but in this case intersection is [2,
// 1], and hence we rely on the first pare down call.
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("0", "1");
relation.add("0", "2");
@@ -107,6 +111,7 @@ fn mubs_best_choice2() {
relation.add("3", "1");
relation.add("3", "2");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("0", "3"), vec!["1"]);
assert_eq!(relation.parents("0"), vec!["1"]);
@@ -118,12 +123,13 @@ fn mubs_best_choice2() {
fn mubs_no_best_choice() {
// in this case, the intersection yields [1, 2], and the "pare
// down" calls find nothing to remove.
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("0", "1");
relation.add("0", "2");
relation.add("3", "1");
relation.add("3", "2");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("0", "3"), vec!["1", "2"]);
assert_eq!(relation.parents("0"), vec!["1", "2"]);
@@ -135,7 +141,7 @@ fn mubs_best_choice_scc() {
// in this case, 1 and 2 form a cycle; we pick arbitrarily (but
// consistently).
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("0", "1");
relation.add("0", "2");
@@ -144,6 +150,7 @@ fn mubs_best_choice_scc() {
relation.add("3", "1");
relation.add("3", "2");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("0", "3"), vec!["1"]);
assert_eq!(relation.parents("0"), vec!["1"]);
@@ -157,13 +164,14 @@ fn pdub_crisscross() {
// /\ |
// b -> b1 ---+
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "a1");
relation.add("a", "b1");
relation.add("b", "a1");
relation.add("b", "b1");
relation.add("a1", "x");
relation.add("b1", "x");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["a1", "b1"]);
assert_eq!(relation.postdom_upper_bound("a", "b"), Some("x"));
@@ -179,7 +187,7 @@ fn pdub_crisscross_more() {
// /\ /\ |
// b -> b1 -> b2 ---------+
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "a1");
relation.add("a", "b1");
relation.add("b", "a1");
@@ -194,6 +202,7 @@ fn pdub_crisscross_more() {
relation.add("a3", "x");
relation.add("b2", "x");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["a1", "b1"]);
assert_eq!(relation.minimal_upper_bounds("a1", "b1"), vec!["a2", "b2"]);
@@ -210,11 +219,12 @@ fn pdub_lub() {
// |
// b -> b1 ---+
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "a1");
relation.add("b", "b1");
relation.add("a1", "x");
relation.add("b1", "x");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["x"]);
assert_eq!(relation.postdom_upper_bound("a", "b"), Some("x"));
@@ -233,10 +243,11 @@ fn mubs_intermediate_node_on_one_side_only() {
// b
// "digraph { a -> c -> d; b -> d; }",
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "c");
relation.add("c", "d");
relation.add("b", "d");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["d"]);
}
@@ -252,12 +263,13 @@ fn mubs_scc_1() {
// b
// "digraph { a -> c -> d; d -> c; a -> d; b -> d; }",
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "c");
relation.add("c", "d");
relation.add("d", "c");
relation.add("a", "d");
relation.add("b", "d");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["c"]);
}
@@ -272,12 +284,13 @@ fn mubs_scc_2() {
// +--- b
// "digraph { a -> c -> d; d -> c; b -> d; b -> c; }",
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "c");
relation.add("c", "d");
relation.add("d", "c");
relation.add("b", "d");
relation.add("b", "c");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["c"]);
}
@@ -292,13 +305,14 @@ fn mubs_scc_3() {
// b ---+
// "digraph { a -> c -> d -> e -> c; b -> d; b -> e; }",
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "c");
relation.add("c", "d");
relation.add("d", "e");
relation.add("e", "c");
relation.add("b", "d");
relation.add("b", "e");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["c"]);
}
@@ -314,13 +328,14 @@ fn mubs_scc_4() {
// b ---+
// "digraph { a -> c -> d -> e -> c; a -> d; b -> e; }"
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
relation.add("a", "c");
relation.add("c", "d");
relation.add("d", "e");
relation.add("e", "c");
relation.add("a", "d");
relation.add("b", "e");
+ let relation = relation.freeze();
assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["c"]);
}
@@ -352,10 +367,11 @@ fn parent() {
(1, /*->*/ 3),
];
- let mut relation = TransitiveRelation::default();
+ let mut relation = TransitiveRelationBuilder::default();
for (a, b) in pairs {
relation.add(a, b);
}
+ let relation = relation.freeze();
let p = relation.postdom_parent(3);
assert_eq!(p, Some(0));
diff --git a/compiler/rustc_data_structures/src/unord.rs b/compiler/rustc_data_structures/src/unord.rs
new file mode 100644
index 000000000..c015f1232
--- /dev/null
+++ b/compiler/rustc_data_structures/src/unord.rs
@@ -0,0 +1,382 @@
+//! This module contains collection types that don't expose their internal
+//! ordering. This is a useful property for deterministic computations, such
+//! as required by the query system.
+
+use rustc_hash::{FxHashMap, FxHashSet};
+use smallvec::SmallVec;
+use std::{
+ borrow::Borrow,
+ hash::Hash,
+ iter::{Product, Sum},
+};
+
+use crate::{
+ fingerprint::Fingerprint,
+ stable_hasher::{HashStable, StableHasher, ToStableHashKey},
+};
+
+/// `UnordItems` is the order-less version of `Iterator`. It only contains methods
+/// that don't (easily) expose an ordering of the underlying items.
+///
+/// Most methods take an `Fn` where the `Iterator`-version takes an `FnMut`. This
+/// is to reduce the risk of accidentally leaking the internal order via the closure
+/// environment. Otherwise one could easily do something like
+///
+/// ```rust,ignore (pseudo code)
+/// let mut ordered = vec![];
+/// unordered_items.all(|x| ordered.push(x));
+/// ```
+///
+/// It's still possible to do the same thing with an `Fn` by using interior mutability,
+/// but the chance of doing it accidentally is reduced.
+pub struct UnordItems<T, I: Iterator<Item = T>>(I);
+
+impl<T, I: Iterator<Item = T>> UnordItems<T, I> {
+ #[inline]
+ pub fn map<U, F: Fn(T) -> U>(self, f: F) -> UnordItems<U, impl Iterator<Item = U>> {
+ UnordItems(self.0.map(f))
+ }
+
+ #[inline]
+ pub fn all<U, F: Fn(T) -> bool>(mut self, f: F) -> bool {
+ self.0.all(f)
+ }
+
+ #[inline]
+ pub fn any<U, F: Fn(T) -> bool>(mut self, f: F) -> bool {
+ self.0.any(f)
+ }
+
+ #[inline]
+ pub fn filter<U, F: Fn(&T) -> bool>(self, f: F) -> UnordItems<T, impl Iterator<Item = T>> {
+ UnordItems(self.0.filter(f))
+ }
+
+ #[inline]
+ pub fn filter_map<U, F: Fn(T) -> Option<U>>(
+ self,
+ f: F,
+ ) -> UnordItems<U, impl Iterator<Item = U>> {
+ UnordItems(self.0.filter_map(f))
+ }
+
+ #[inline]
+ pub fn max(self) -> Option<T>
+ where
+ T: Ord,
+ {
+ self.0.max()
+ }
+
+ #[inline]
+ pub fn min(self) -> Option<T>
+ where
+ T: Ord,
+ {
+ self.0.min()
+ }
+
+ #[inline]
+ pub fn sum<S>(self) -> S
+ where
+ S: Sum<T>,
+ {
+ self.0.sum()
+ }
+
+ #[inline]
+ pub fn product<S>(self) -> S
+ where
+ S: Product<T>,
+ {
+ self.0.product()
+ }
+
+ #[inline]
+ pub fn count(self) -> usize {
+ self.0.count()
+ }
+}
+
+impl<'a, T: Clone + 'a, I: Iterator<Item = &'a T>> UnordItems<&'a T, I> {
+ #[inline]
+ pub fn cloned(self) -> UnordItems<T, impl Iterator<Item = T>> {
+ UnordItems(self.0.cloned())
+ }
+}
+
+impl<'a, T: Copy + 'a, I: Iterator<Item = &'a T>> UnordItems<&'a T, I> {
+ #[inline]
+ pub fn copied(self) -> UnordItems<T, impl Iterator<Item = T>> {
+ UnordItems(self.0.copied())
+ }
+}
+
+impl<T: Ord, I: Iterator<Item = T>> UnordItems<T, I> {
+ pub fn into_sorted<HCX>(self, hcx: &HCX) -> Vec<T>
+ where
+ T: ToStableHashKey<HCX>,
+ {
+ let mut items: Vec<T> = self.0.collect();
+ items.sort_by_cached_key(|x| x.to_stable_hash_key(hcx));
+ items
+ }
+
+ pub fn into_sorted_small_vec<HCX, const LEN: usize>(self, hcx: &HCX) -> SmallVec<[T; LEN]>
+ where
+ T: ToStableHashKey<HCX>,
+ {
+ let mut items: SmallVec<[T; LEN]> = self.0.collect();
+ items.sort_by_cached_key(|x| x.to_stable_hash_key(hcx));
+ items
+ }
+}
+
+/// This is a set collection type that tries very hard to not expose
+/// any internal iteration. This is a useful property when trying to
+/// uphold the determinism invariants imposed by the query system.
+///
+/// This collection type is a good choice for set-like collections the
+/// keys of which don't have a semantic ordering.
+///
+/// See [MCP 533](https://github.com/rust-lang/compiler-team/issues/533)
+/// for more information.
+#[derive(Debug, Eq, PartialEq, Clone, Encodable, Decodable)]
+pub struct UnordSet<V: Eq + Hash> {
+ inner: FxHashSet<V>,
+}
+
+impl<V: Eq + Hash> Default for UnordSet<V> {
+ fn default() -> Self {
+ Self { inner: FxHashSet::default() }
+ }
+}
+
+impl<V: Eq + Hash> UnordSet<V> {
+ #[inline]
+ pub fn new() -> Self {
+ Self { inner: Default::default() }
+ }
+
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.inner.len()
+ }
+
+ #[inline]
+ pub fn insert(&mut self, v: V) -> bool {
+ self.inner.insert(v)
+ }
+
+ #[inline]
+ pub fn contains<Q: ?Sized>(&self, v: &Q) -> bool
+ where
+ V: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.inner.contains(v)
+ }
+
+ #[inline]
+ pub fn items<'a>(&'a self) -> UnordItems<&'a V, impl Iterator<Item = &'a V>> {
+ UnordItems(self.inner.iter())
+ }
+
+ #[inline]
+ pub fn into_items(self) -> UnordItems<V, impl Iterator<Item = V>> {
+ UnordItems(self.inner.into_iter())
+ }
+
+ // We can safely extend this UnordSet from a set of unordered values because that
+ // won't expose the internal ordering anywhere.
+ #[inline]
+ pub fn extend<I: Iterator<Item = V>>(&mut self, items: UnordItems<V, I>) {
+ self.inner.extend(items.0)
+ }
+}
+
+impl<V: Hash + Eq> Extend<V> for UnordSet<V> {
+ fn extend<T: IntoIterator<Item = V>>(&mut self, iter: T) {
+ self.inner.extend(iter)
+ }
+}
+
+impl<HCX, V: Hash + Eq + HashStable<HCX>> HashStable<HCX> for UnordSet<V> {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+ hash_iter_order_independent(self.inner.iter(), hcx, hasher);
+ }
+}
+
+/// This is a map collection type that tries very hard to not expose
+/// any internal iteration. This is a useful property when trying to
+/// uphold the determinism invariants imposed by the query system.
+///
+/// This collection type is a good choice for map-like collections the
+/// keys of which don't have a semantic ordering.
+///
+/// See [MCP 533](https://github.com/rust-lang/compiler-team/issues/533)
+/// for more information.
+#[derive(Debug, Eq, PartialEq, Clone, Encodable, Decodable)]
+pub struct UnordMap<K: Eq + Hash, V> {
+ inner: FxHashMap<K, V>,
+}
+
+impl<K: Eq + Hash, V> Default for UnordMap<K, V> {
+ fn default() -> Self {
+ Self { inner: FxHashMap::default() }
+ }
+}
+
+impl<K: Hash + Eq, V> Extend<(K, V)> for UnordMap<K, V> {
+ fn extend<T: IntoIterator<Item = (K, V)>>(&mut self, iter: T) {
+ self.inner.extend(iter)
+ }
+}
+
+impl<K: Eq + Hash, V> UnordMap<K, V> {
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.inner.len()
+ }
+
+ #[inline]
+ pub fn insert(&mut self, k: K, v: V) -> Option<V> {
+ self.inner.insert(k, v)
+ }
+
+ #[inline]
+ pub fn contains_key<Q: ?Sized>(&self, k: &Q) -> bool
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.inner.contains_key(k)
+ }
+
+ #[inline]
+ pub fn items<'a>(&'a self) -> UnordItems<(&'a K, &'a V), impl Iterator<Item = (&'a K, &'a V)>> {
+ UnordItems(self.inner.iter())
+ }
+
+ #[inline]
+ pub fn into_items(self) -> UnordItems<(K, V), impl Iterator<Item = (K, V)>> {
+ UnordItems(self.inner.into_iter())
+ }
+
+ // We can safely extend this UnordMap from a set of unordered values because that
+ // won't expose the internal ordering anywhere.
+ #[inline]
+ pub fn extend<I: Iterator<Item = (K, V)>>(&mut self, items: UnordItems<(K, V), I>) {
+ self.inner.extend(items.0)
+ }
+}
+
+impl<HCX, K: Hash + Eq + HashStable<HCX>, V: HashStable<HCX>> HashStable<HCX> for UnordMap<K, V> {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+ hash_iter_order_independent(self.inner.iter(), hcx, hasher);
+ }
+}
+
+/// This is a collection type that tries very hard to not expose
+/// any internal iteration. This is a useful property when trying to
+/// uphold the determinism invariants imposed by the query system.
+///
+/// This collection type is a good choice for collections the
+/// keys of which don't have a semantic ordering and don't implement
+/// `Hash` or `Eq`.
+///
+/// See [MCP 533](https://github.com/rust-lang/compiler-team/issues/533)
+/// for more information.
+#[derive(Default, Debug, Eq, PartialEq, Clone, Encodable, Decodable)]
+pub struct UnordBag<V> {
+ inner: Vec<V>,
+}
+
+impl<V> UnordBag<V> {
+ #[inline]
+ pub fn new() -> Self {
+ Self { inner: Default::default() }
+ }
+
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.inner.len()
+ }
+
+ #[inline]
+ pub fn push(&mut self, v: V) {
+ self.inner.push(v);
+ }
+
+ #[inline]
+ pub fn items<'a>(&'a self) -> UnordItems<&'a V, impl Iterator<Item = &'a V>> {
+ UnordItems(self.inner.iter())
+ }
+
+ #[inline]
+ pub fn into_items(self) -> UnordItems<V, impl Iterator<Item = V>> {
+ UnordItems(self.inner.into_iter())
+ }
+
+ // We can safely extend this UnordSet from a set of unordered values because that
+ // won't expose the internal ordering anywhere.
+ #[inline]
+ pub fn extend<I: Iterator<Item = V>>(&mut self, items: UnordItems<V, I>) {
+ self.inner.extend(items.0)
+ }
+}
+
+impl<T> Extend<T> for UnordBag<T> {
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ self.inner.extend(iter)
+ }
+}
+
+impl<HCX, V: Hash + Eq + HashStable<HCX>> HashStable<HCX> for UnordBag<V> {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+ hash_iter_order_independent(self.inner.iter(), hcx, hasher);
+ }
+}
+
+fn hash_iter_order_independent<
+ HCX,
+ T: HashStable<HCX>,
+ I: Iterator<Item = T> + ExactSizeIterator,
+>(
+ mut it: I,
+ hcx: &mut HCX,
+ hasher: &mut StableHasher,
+) {
+ let len = it.len();
+ len.hash_stable(hcx, hasher);
+
+ match len {
+ 0 => {
+ // We're done
+ }
+ 1 => {
+ // No need to instantiate a hasher
+ it.next().unwrap().hash_stable(hcx, hasher);
+ }
+ _ => {
+ let mut accumulator = Fingerprint::ZERO;
+ for item in it {
+ let mut item_hasher = StableHasher::new();
+ item.hash_stable(hcx, &mut item_hasher);
+ let item_fingerprint: Fingerprint = item_hasher.finish();
+ accumulator = accumulator.combine_commutative(item_fingerprint);
+ }
+ accumulator.hash_stable(hcx, hasher);
+ }
+ }
+}
+
+// Do not implement IntoIterator for the collections in this module.
+// They only exist to hide iteration order in the first place.
+impl<T> !IntoIterator for UnordBag<T> {}
+impl<V> !IntoIterator for UnordSet<V> {}
+impl<K, V> !IntoIterator for UnordMap<K, V> {}
+impl<T, I> !IntoIterator for UnordItems<T, I> {}