diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:57:19 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:57:19 +0000 |
commit | a0b8f38ab54ac451646aa00cd5e91b6c76f22a84 (patch) | |
tree | fc451898ccaf445814e26b46664d78702178101d /compiler/rustc_incremental/src | |
parent | Adding debian version 1.71.1+dfsg1-2. (diff) | |
download | rustc-a0b8f38ab54ac451646aa00cd5e91b6c76f22a84.tar.xz rustc-a0b8f38ab54ac451646aa00cd5e91b6c76f22a84.zip |
Merging upstream version 1.72.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_incremental/src')
-rw-r--r-- | compiler/rustc_incremental/src/assert_dep_graph.rs | 28 | ||||
-rw-r--r-- | compiler/rustc_incremental/src/assert_module_sources.rs | 9 | ||||
-rw-r--r-- | compiler/rustc_incremental/src/lib.rs | 1 | ||||
-rw-r--r-- | compiler/rustc_incremental/src/persist/dirty_clean.rs | 11 | ||||
-rw-r--r-- | compiler/rustc_incremental/src/persist/fs.rs | 203 | ||||
-rw-r--r-- | compiler/rustc_incremental/src/persist/fs/tests.rs | 27 | ||||
-rw-r--r-- | compiler/rustc_incremental/src/persist/load.rs | 8 | ||||
-rw-r--r-- | compiler/rustc_incremental/src/persist/save.rs | 12 | ||||
-rw-r--r-- | compiler/rustc_incremental/src/persist/work_product.rs | 6 |
9 files changed, 151 insertions, 154 deletions
diff --git a/compiler/rustc_incremental/src/assert_dep_graph.rs b/compiler/rustc_incremental/src/assert_dep_graph.rs index 22bd12f2e..52a84b204 100644 --- a/compiler/rustc_incremental/src/assert_dep_graph.rs +++ b/compiler/rustc_incremental/src/assert_dep_graph.rs @@ -35,7 +35,7 @@ use crate::errors; use rustc_ast as ast; -use rustc_data_structures::fx::FxHashSet; +use rustc_data_structures::fx::FxIndexSet; use rustc_data_structures::graph::implementation::{Direction, NodeIndex, INCOMING, OUTGOING}; use rustc_graphviz as dot; use rustc_hir as hir; @@ -258,7 +258,7 @@ fn dump_graph(query: &DepGraphQuery) { } #[allow(missing_docs)] -pub struct GraphvizDepGraph(FxHashSet<DepKind>, Vec<(DepKind, DepKind)>); +pub struct GraphvizDepGraph(FxIndexSet<DepKind>, Vec<(DepKind, DepKind)>); impl<'a> dot::GraphWalk<'a> for GraphvizDepGraph { type Node = DepKind; @@ -303,7 +303,7 @@ impl<'a> dot::Labeller<'a> for GraphvizDepGraph { fn node_set<'q>( query: &'q DepGraphQuery, filter: &DepNodeFilter, -) -> Option<FxHashSet<&'q DepNode>> { +) -> Option<FxIndexSet<&'q DepNode>> { debug!("node_set(filter={:?})", filter); if filter.accepts_all() { @@ -315,9 +315,9 @@ fn node_set<'q>( fn filter_nodes<'q>( query: &'q DepGraphQuery, - sources: &Option<FxHashSet<&'q DepNode>>, - targets: &Option<FxHashSet<&'q DepNode>>, -) -> FxHashSet<DepKind> { + sources: &Option<FxIndexSet<&'q DepNode>>, + targets: &Option<FxIndexSet<&'q DepNode>>, +) -> FxIndexSet<DepKind> { if let Some(sources) = sources { if let Some(targets) = targets { walk_between(query, sources, targets) @@ -333,10 +333,10 @@ fn filter_nodes<'q>( fn walk_nodes<'q>( query: &'q DepGraphQuery, - starts: &FxHashSet<&'q DepNode>, + starts: &FxIndexSet<&'q DepNode>, direction: Direction, -) -> FxHashSet<DepKind> { - let mut set = FxHashSet::default(); +) -> FxIndexSet<DepKind> { + let mut set = FxIndexSet::default(); for &start in starts { debug!("walk_nodes: start={:?} outgoing?={:?}", start, direction == OUTGOING); if set.insert(start.kind) { @@ -357,9 +357,9 @@ fn walk_nodes<'q>( fn walk_between<'q>( query: &'q DepGraphQuery, - sources: &FxHashSet<&'q DepNode>, - targets: &FxHashSet<&'q DepNode>, -) -> FxHashSet<DepKind> { + sources: &FxIndexSet<&'q DepNode>, + targets: &FxIndexSet<&'q DepNode>, +) -> FxIndexSet<DepKind> { // This is a bit tricky. We want to include a node only if it is: // (a) reachable from a source and (b) will reach a target. And we // have to be careful about cycles etc. Luckily efficiency is not @@ -426,8 +426,8 @@ fn walk_between<'q>( } } -fn filter_edges(query: &DepGraphQuery, nodes: &FxHashSet<DepKind>) -> Vec<(DepKind, DepKind)> { - let uniq: FxHashSet<_> = query +fn filter_edges(query: &DepGraphQuery, nodes: &FxIndexSet<DepKind>) -> Vec<(DepKind, DepKind)> { + let uniq: FxIndexSet<_> = query .edges() .into_iter() .map(|(s, t)| (s.kind, t.kind)) diff --git a/compiler/rustc_incremental/src/assert_module_sources.rs b/compiler/rustc_incremental/src/assert_module_sources.rs index c550e553b..0111a6d30 100644 --- a/compiler/rustc_incremental/src/assert_module_sources.rs +++ b/compiler/rustc_incremental/src/assert_module_sources.rs @@ -24,7 +24,7 @@ use crate::errors; use rustc_ast as ast; -use rustc_data_structures::fx::FxHashSet; +use rustc_data_structures::unord::UnordSet; use rustc_hir::def_id::LOCAL_CRATE; use rustc_middle::mir::mono::CodegenUnitNameBuilder; use rustc_middle::ty::TyCtxt; @@ -52,7 +52,7 @@ pub fn assert_module_sources(tcx: TyCtxt<'_>) { struct AssertModuleSource<'tcx> { tcx: TyCtxt<'tcx>, - available_cgus: FxHashSet<Symbol>, + available_cgus: UnordSet<Symbol>, } impl<'tcx> AssertModuleSource<'tcx> { @@ -118,9 +118,8 @@ impl<'tcx> AssertModuleSource<'tcx> { debug!("mapping '{}' to cgu name '{}'", self.field(attr, sym::module), cgu_name); if !self.available_cgus.contains(&cgu_name) { - let mut cgu_names: Vec<&str> = - self.available_cgus.iter().map(|cgu| cgu.as_str()).collect(); - cgu_names.sort(); + let cgu_names: Vec<&str> = + self.available_cgus.items().map(|cgu| cgu.as_str()).into_sorted_stable_ord(); self.tcx.sess.emit_err(errors::NoModuleNamed { span: attr.span, user_path, diff --git a/compiler/rustc_incremental/src/lib.rs b/compiler/rustc_incremental/src/lib.rs index 11710c368..b9171fad5 100644 --- a/compiler/rustc_incremental/src/lib.rs +++ b/compiler/rustc_incremental/src/lib.rs @@ -4,7 +4,6 @@ #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![feature(never_type)] #![recursion_limit = "256"] -#![allow(rustc::potential_query_instability)] #![deny(rustc::untranslatable_diagnostic)] #![deny(rustc::diagnostic_outside_of_impl)] diff --git a/compiler/rustc_incremental/src/persist/dirty_clean.rs b/compiler/rustc_incremental/src/persist/dirty_clean.rs index 43274091c..f9cd01fd8 100644 --- a/compiler/rustc_incremental/src/persist/dirty_clean.rs +++ b/compiler/rustc_incremental/src/persist/dirty_clean.rs @@ -22,6 +22,7 @@ use crate::errors; use rustc_ast::{self as ast, Attribute, NestedMetaItem}; use rustc_data_structures::fx::FxHashSet; +use rustc_data_structures::unord::UnordSet; use rustc_hir::def_id::LocalDefId; use rustc_hir::intravisit; use rustc_hir::Node as HirNode; @@ -125,7 +126,7 @@ const LABELS_ADT: &[&[&str]] = &[BASE_HIR, BASE_STRUCT]; // // type_of for these. -type Labels = FxHashSet<String>; +type Labels = UnordSet<String>; /// Represents the requested configuration by rustc_clean/dirty struct Assertion { @@ -197,7 +198,7 @@ impl<'tcx> DirtyCleanVisitor<'tcx> { let (name, mut auto) = self.auto_labels(item_id, attr); let except = self.except(attr); let loaded_from_disk = self.loaded_from_disk(attr); - for e in except.iter() { + for e in except.items().into_sorted_stable_ord() { if !auto.remove(e) { self.tcx.sess.emit_fatal(errors::AssertionAuto { span: attr.span, name, e }); } @@ -376,15 +377,15 @@ impl<'tcx> DirtyCleanVisitor<'tcx> { continue; }; self.checked_attrs.insert(attr.id); - for label in assertion.clean { + for label in assertion.clean.items().into_sorted_stable_ord() { let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap(); self.assert_clean(item_span, dep_node); } - for label in assertion.dirty { + for label in assertion.dirty.items().into_sorted_stable_ord() { let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap(); self.assert_dirty(item_span, dep_node); } - for label in assertion.loaded_from_disk { + for label in assertion.loaded_from_disk.items().into_sorted_stable_ord() { let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap(); self.assert_loaded_from_disk(item_span, dep_node); } diff --git a/compiler/rustc_incremental/src/persist/fs.rs b/compiler/rustc_incremental/src/persist/fs.rs index e3c688b3e..7708deece 100644 --- a/compiler/rustc_incremental/src/persist/fs.rs +++ b/compiler/rustc_incremental/src/persist/fs.rs @@ -104,8 +104,9 @@ //! implemented. use crate::errors; -use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_data_structures::fx::{FxHashSet, FxIndexSet}; use rustc_data_structures::svh::Svh; +use rustc_data_structures::unord::{UnordMap, UnordSet}; use rustc_data_structures::{base_n, flock}; use rustc_errors::ErrorGuaranteed; use rustc_fs_util::{link_or_copy, try_canonicalize, LinkOrCopy}; @@ -635,8 +636,8 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> { // First do a pass over the crate directory, collecting lock files and // session directories - let mut session_directories = FxHashSet::default(); - let mut lock_files = FxHashSet::default(); + let mut session_directories = FxIndexSet::default(); + let mut lock_files = UnordSet::default(); for dir_entry in crate_directory.read_dir()? { let Ok(dir_entry) = dir_entry else { @@ -657,10 +658,11 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> { // This is something we don't know, leave it alone } } + session_directories.sort(); // Now map from lock files to session directories - let lock_file_to_session_dir: FxHashMap<String, Option<String>> = lock_files - .into_iter() + let lock_file_to_session_dir: UnordMap<String, Option<String>> = lock_files + .into_items() .map(|lock_file_name| { assert!(lock_file_name.ends_with(LOCK_FILE_EXT)); let dir_prefix_end = lock_file_name.len() - LOCK_FILE_EXT.len(); @@ -670,11 +672,13 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> { }; (lock_file_name, session_dir.map(String::clone)) }) - .collect(); + .into(); // Delete all lock files, that don't have an associated directory. They must // be some kind of leftover - for (lock_file_name, directory_name) in &lock_file_to_session_dir { + for (lock_file_name, directory_name) in + lock_file_to_session_dir.items().into_sorted_stable_ord() + { if directory_name.is_none() { let Ok(timestamp) = extract_timestamp_from_session_dir(lock_file_name) else { debug!( @@ -685,19 +689,19 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> { continue; }; - let lock_file_path = crate_directory.join(&**lock_file_name); + let lock_file_path = crate_directory.join(&*lock_file_name); if is_old_enough_to_be_collected(timestamp) { debug!( "garbage_collect_session_directories() - deleting \ - garbage lock file: {}", + garbage lock file: {}", lock_file_path.display() ); delete_session_dir_lock_file(sess, &lock_file_path); } else { debug!( "garbage_collect_session_directories() - lock file with \ - no session dir not old enough to be collected: {}", + no session dir not old enough to be collected: {}", lock_file_path.display() ); } @@ -705,14 +709,14 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> { } // Filter out `None` directories - let lock_file_to_session_dir: FxHashMap<String, String> = lock_file_to_session_dir - .into_iter() + let lock_file_to_session_dir: UnordMap<String, String> = lock_file_to_session_dir + .into_items() .filter_map(|(lock_file_name, directory_name)| directory_name.map(|n| (lock_file_name, n))) - .collect(); + .into(); // Delete all session directories that don't have a lock file. for directory_name in session_directories { - if !lock_file_to_session_dir.values().any(|dir| *dir == directory_name) { + if !lock_file_to_session_dir.items().any(|(_, dir)| *dir == directory_name) { let path = crate_directory.join(directory_name); if let Err(err) = safe_remove_dir_all(&path) { sess.emit_warning(errors::InvalidGcFailed { path: &path, err }); @@ -721,103 +725,103 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> { } // Now garbage collect the valid session directories. - let mut deletion_candidates = vec![]; + let deletion_candidates = + lock_file_to_session_dir.items().filter_map(|(lock_file_name, directory_name)| { + debug!("garbage_collect_session_directories() - inspecting: {}", directory_name); - for (lock_file_name, directory_name) in &lock_file_to_session_dir { - debug!("garbage_collect_session_directories() - inspecting: {}", directory_name); - - let Ok(timestamp) = extract_timestamp_from_session_dir(directory_name) else { + let Ok(timestamp) = extract_timestamp_from_session_dir(directory_name) else { debug!( "found session-dir with malformed timestamp: {}", crate_directory.join(directory_name).display() ); // Ignore it - continue; + return None; }; - if is_finalized(directory_name) { - let lock_file_path = crate_directory.join(lock_file_name); - match flock::Lock::new( - &lock_file_path, - false, // don't wait - false, // don't create the lock-file - true, - ) { - // get an exclusive lock - Ok(lock) => { - debug!( - "garbage_collect_session_directories() - \ + if is_finalized(directory_name) { + let lock_file_path = crate_directory.join(lock_file_name); + match flock::Lock::new( + &lock_file_path, + false, // don't wait + false, // don't create the lock-file + true, + ) { + // get an exclusive lock + Ok(lock) => { + debug!( + "garbage_collect_session_directories() - \ successfully acquired lock" - ); - debug!( - "garbage_collect_session_directories() - adding \ + ); + debug!( + "garbage_collect_session_directories() - adding \ deletion candidate: {}", - directory_name - ); - - // Note that we are holding on to the lock - deletion_candidates.push(( - timestamp, - crate_directory.join(directory_name), - Some(lock), - )); - } - Err(_) => { - debug!( - "garbage_collect_session_directories() - \ + directory_name + ); + + // Note that we are holding on to the lock + return Some(( + (timestamp, crate_directory.join(directory_name)), + Some(lock), + )); + } + Err(_) => { + debug!( + "garbage_collect_session_directories() - \ not collecting, still in use" - ); + ); + } } - } - } else if is_old_enough_to_be_collected(timestamp) { - // When cleaning out "-working" session directories, i.e. - // session directories that might still be in use by another - // compiler instance, we only look a directories that are - // at least ten seconds old. This is supposed to reduce the - // chance of deleting a directory in the time window where - // the process has allocated the directory but has not yet - // acquired the file-lock on it. - - // Try to acquire the directory lock. If we can't, it - // means that the owning process is still alive and we - // leave this directory alone. - let lock_file_path = crate_directory.join(lock_file_name); - match flock::Lock::new( - &lock_file_path, - false, // don't wait - false, // don't create the lock-file - true, - ) { - // get an exclusive lock - Ok(lock) => { - debug!( - "garbage_collect_session_directories() - \ + } else if is_old_enough_to_be_collected(timestamp) { + // When cleaning out "-working" session directories, i.e. + // session directories that might still be in use by another + // compiler instance, we only look a directories that are + // at least ten seconds old. This is supposed to reduce the + // chance of deleting a directory in the time window where + // the process has allocated the directory but has not yet + // acquired the file-lock on it. + + // Try to acquire the directory lock. If we can't, it + // means that the owning process is still alive and we + // leave this directory alone. + let lock_file_path = crate_directory.join(lock_file_name); + match flock::Lock::new( + &lock_file_path, + false, // don't wait + false, // don't create the lock-file + true, + ) { + // get an exclusive lock + Ok(lock) => { + debug!( + "garbage_collect_session_directories() - \ successfully acquired lock" - ); + ); - delete_old(sess, &crate_directory.join(directory_name)); + delete_old(sess, &crate_directory.join(directory_name)); - // Let's make it explicit that the file lock is released at this point, - // or rather, that we held on to it until here - drop(lock); - } - Err(_) => { - debug!( - "garbage_collect_session_directories() - \ + // Let's make it explicit that the file lock is released at this point, + // or rather, that we held on to it until here + drop(lock); + } + Err(_) => { + debug!( + "garbage_collect_session_directories() - \ not collecting, still in use" - ); + ); + } } - } - } else { - debug!( - "garbage_collect_session_directories() - not finalized, not \ + } else { + debug!( + "garbage_collect_session_directories() - not finalized, not \ old enough" - ); - } - } + ); + } + None + }); + let deletion_candidates = deletion_candidates.into(); // Delete all but the most recent of the candidates - for (path, lock) in all_except_most_recent(deletion_candidates) { + all_except_most_recent(deletion_candidates).into_items().all(|(path, lock)| { debug!("garbage_collect_session_directories() - deleting `{}`", path.display()); if let Err(err) = safe_remove_dir_all(&path) { @@ -829,7 +833,8 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> { // Let's make it explicit that the file lock is released at this point, // or rather, that we held on to it until here drop(lock); - } + true + }); Ok(()) } @@ -845,18 +850,18 @@ fn delete_old(sess: &Session, path: &Path) { } fn all_except_most_recent( - deletion_candidates: Vec<(SystemTime, PathBuf, Option<flock::Lock>)>, -) -> FxHashMap<PathBuf, Option<flock::Lock>> { - let most_recent = deletion_candidates.iter().map(|&(timestamp, ..)| timestamp).max(); + deletion_candidates: UnordMap<(SystemTime, PathBuf), Option<flock::Lock>>, +) -> UnordMap<PathBuf, Option<flock::Lock>> { + let most_recent = deletion_candidates.items().map(|(&(timestamp, _), _)| timestamp).max(); if let Some(most_recent) = most_recent { deletion_candidates - .into_iter() - .filter(|&(timestamp, ..)| timestamp != most_recent) - .map(|(_, path, lock)| (path, lock)) + .into_items() + .filter(|&((timestamp, _), _)| timestamp != most_recent) + .map(|((_, path), lock)| (path, lock)) .collect() } else { - FxHashMap::default() + UnordMap::default() } } diff --git a/compiler/rustc_incremental/src/persist/fs/tests.rs b/compiler/rustc_incremental/src/persist/fs/tests.rs index 184796948..644b81876 100644 --- a/compiler/rustc_incremental/src/persist/fs/tests.rs +++ b/compiler/rustc_incremental/src/persist/fs/tests.rs @@ -2,26 +2,19 @@ use super::*; #[test] fn test_all_except_most_recent() { + let input: UnordMap<_, Option<flock::Lock>> = UnordMap::from_iter([ + ((UNIX_EPOCH + Duration::new(4, 0), PathBuf::from("4")), None), + ((UNIX_EPOCH + Duration::new(1, 0), PathBuf::from("1")), None), + ((UNIX_EPOCH + Duration::new(5, 0), PathBuf::from("5")), None), + ((UNIX_EPOCH + Duration::new(3, 0), PathBuf::from("3")), None), + ((UNIX_EPOCH + Duration::new(2, 0), PathBuf::from("2")), None), + ]); assert_eq!( - all_except_most_recent(vec![ - (UNIX_EPOCH + Duration::new(4, 0), PathBuf::from("4"), None), - (UNIX_EPOCH + Duration::new(1, 0), PathBuf::from("1"), None), - (UNIX_EPOCH + Duration::new(5, 0), PathBuf::from("5"), None), - (UNIX_EPOCH + Duration::new(3, 0), PathBuf::from("3"), None), - (UNIX_EPOCH + Duration::new(2, 0), PathBuf::from("2"), None), - ]) - .keys() - .cloned() - .collect::<FxHashSet<PathBuf>>(), - [PathBuf::from("1"), PathBuf::from("2"), PathBuf::from("3"), PathBuf::from("4"),] - .into_iter() - .collect::<FxHashSet<PathBuf>>() + all_except_most_recent(input).into_items().map(|(path, _)| path).into_sorted_stable_ord(), + vec![PathBuf::from("1"), PathBuf::from("2"), PathBuf::from("3"), PathBuf::from("4")] ); - assert_eq!( - all_except_most_recent(vec![]).keys().cloned().collect::<FxHashSet<PathBuf>>(), - FxHashSet::default() - ); + assert!(all_except_most_recent(UnordMap::default()).is_empty()); } #[test] diff --git a/compiler/rustc_incremental/src/persist/load.rs b/compiler/rustc_incremental/src/persist/load.rs index a4407a93f..bb479b5bd 100644 --- a/compiler/rustc_incremental/src/persist/load.rs +++ b/compiler/rustc_incremental/src/persist/load.rs @@ -1,8 +1,8 @@ //! Code to save/load the dep-graph from files. use crate::errors; -use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::memmap::Mmap; +use rustc_data_structures::unord::UnordMap; use rustc_middle::dep_graph::{SerializedDepGraph, WorkProduct, WorkProductId}; use rustc_middle::query::on_disk_cache::OnDiskCache; use rustc_serialize::opaque::MemDecoder; @@ -16,7 +16,7 @@ use super::file_format; use super::fs::*; use super::work_product; -type WorkProductMap = FxHashMap<WorkProductId, WorkProduct>; +type WorkProductMap = UnordMap<WorkProductId, WorkProduct>; #[derive(Debug)] /// Represents the result of an attempt to load incremental compilation data. @@ -147,7 +147,7 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture { let report_incremental_info = sess.opts.unstable_opts.incremental_info; let expected_hash = sess.opts.dep_tracking_hash(false); - let mut prev_work_products = FxHashMap::default(); + let mut prev_work_products = UnordMap::default(); // If we are only building with -Zquery-dep-graph but without an actual // incr. comp. session directory, we skip this. Otherwise we'd fail @@ -163,7 +163,7 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture { Decodable::decode(&mut work_product_decoder); for swp in work_products { - let all_files_exist = swp.work_product.saved_files.iter().all(|(_, path)| { + let all_files_exist = swp.work_product.saved_files.items().all(|(_, path)| { let exists = in_incr_comp_dir_sess(sess, path).exists(); if !exists && sess.opts.unstable_opts.incremental_info { eprintln!("incremental: could not find file for work product: {path}",); diff --git a/compiler/rustc_incremental/src/persist/save.rs b/compiler/rustc_incremental/src/persist/save.rs index 7376be6be..bfaa52f9c 100644 --- a/compiler/rustc_incremental/src/persist/save.rs +++ b/compiler/rustc_incremental/src/persist/save.rs @@ -1,5 +1,5 @@ use crate::errors; -use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::fx::FxIndexMap; use rustc_data_structures::sync::join; use rustc_middle::dep_graph::{DepGraph, SerializedDepGraph, WorkProduct, WorkProductId}; use rustc_middle::ty::TyCtxt; @@ -79,7 +79,7 @@ pub fn save_dep_graph(tcx: TyCtxt<'_>) { pub fn save_work_product_index( sess: &Session, dep_graph: &DepGraph, - new_work_products: FxHashMap<WorkProductId, WorkProduct>, + new_work_products: FxIndexMap<WorkProductId, WorkProduct>, ) { if sess.opts.incremental.is_none() { return; @@ -105,7 +105,7 @@ pub fn save_work_product_index( if !new_work_products.contains_key(id) { work_product::delete_workproduct_files(sess, wp); debug_assert!( - !wp.saved_files.iter().all(|(_, path)| in_incr_comp_dir_sess(sess, path).exists()) + !wp.saved_files.items().all(|(_, path)| in_incr_comp_dir_sess(sess, path).exists()) ); } } @@ -113,13 +113,13 @@ pub fn save_work_product_index( // Check that we did not delete one of the current work-products: debug_assert!({ new_work_products.iter().all(|(_, wp)| { - wp.saved_files.iter().all(|(_, path)| in_incr_comp_dir_sess(sess, path).exists()) + wp.saved_files.items().all(|(_, path)| in_incr_comp_dir_sess(sess, path).exists()) }) }); } fn encode_work_product_index( - work_products: &FxHashMap<WorkProductId, WorkProduct>, + work_products: &FxIndexMap<WorkProductId, WorkProduct>, encoder: &mut FileEncoder, ) { let serialized_products: Vec<_> = work_products @@ -146,7 +146,7 @@ fn encode_query_cache(tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResult pub fn build_dep_graph( sess: &Session, prev_graph: SerializedDepGraph, - prev_work_products: FxHashMap<WorkProductId, WorkProduct>, + prev_work_products: FxIndexMap<WorkProductId, WorkProduct>, ) -> Option<DepGraph> { if sess.opts.incremental.is_none() { // No incremental compilation. diff --git a/compiler/rustc_incremental/src/persist/work_product.rs b/compiler/rustc_incremental/src/persist/work_product.rs index dc98fbeb0..bce5ca1e1 100644 --- a/compiler/rustc_incremental/src/persist/work_product.rs +++ b/compiler/rustc_incremental/src/persist/work_product.rs @@ -4,7 +4,7 @@ use crate::errors; use crate::persist::fs::*; -use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::unord::UnordMap; use rustc_fs_util::link_or_copy; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; use rustc_session::Session; @@ -20,7 +20,7 @@ pub fn copy_cgu_workproduct_to_incr_comp_cache_dir( debug!(?cgu_name, ?files); sess.opts.incremental.as_ref()?; - let mut saved_files = FxHashMap::default(); + let mut saved_files = UnordMap::default(); for (ext, path) in files { let file_name = format!("{cgu_name}.{ext}"); let path_in_incr_dir = in_incr_comp_dir_sess(sess, &file_name); @@ -46,7 +46,7 @@ pub fn copy_cgu_workproduct_to_incr_comp_cache_dir( /// Removes files for a given work product. pub fn delete_workproduct_files(sess: &Session, work_product: &WorkProduct) { - for (_, path) in &work_product.saved_files { + for (_, path) in work_product.saved_files.items().into_sorted_stable_ord() { let path = in_incr_comp_dir_sess(sess, path); if let Err(err) = std_fs::remove_file(&path) { sess.emit_warning(errors::DeleteWorkProduct { path: &path, err }); |