diff options
Diffstat (limited to 'vendor/gix-pack/src/multi_index')
-rw-r--r-- | vendor/gix-pack/src/multi_index/access.rs | 4 | ||||
-rw-r--r-- | vendor/gix-pack/src/multi_index/chunk.rs | 16 | ||||
-rw-r--r-- | vendor/gix-pack/src/multi_index/verify.rs | 48 | ||||
-rw-r--r-- | vendor/gix-pack/src/multi_index/write.rs | 37 |
4 files changed, 46 insertions, 59 deletions
diff --git a/vendor/gix-pack/src/multi_index/access.rs b/vendor/gix-pack/src/multi_index/access.rs index 488f996d3..0150c7206 100644 --- a/vendor/gix-pack/src/multi_index/access.rs +++ b/vendor/gix-pack/src/multi_index/access.rs @@ -89,7 +89,7 @@ impl File { prefix, candidates, &self.fan, - |idx| self.oid_at_index(idx), + &|idx| self.oid_at_index(idx), self.num_objects, ) } @@ -98,7 +98,7 @@ impl File { /// /// Use this index for finding additional information via [`File::pack_id_and_pack_offset_at_index()`]. pub fn lookup(&self, id: impl AsRef<gix_hash::oid>) -> Option<EntryIndex> { - crate::index::access::lookup(id, &self.fan, |idx| self.oid_at_index(idx)) + crate::index::access::lookup(id.as_ref(), &self.fan, &|idx| self.oid_at_index(idx)) } /// Given the `index` ranging from 0 to [File::num_objects()], return the pack index and its absolute offset into the pack. diff --git a/vendor/gix-pack/src/multi_index/chunk.rs b/vendor/gix-pack/src/multi_index/chunk.rs index 48a003ca0..86e43714d 100644 --- a/vendor/gix-pack/src/multi_index/chunk.rs +++ b/vendor/gix-pack/src/multi_index/chunk.rs @@ -82,7 +82,7 @@ pub mod index_names { /// Write all `paths` in order to `out`, including padding. pub fn write( paths: impl IntoIterator<Item = impl AsRef<Path>>, - mut out: impl std::io::Write, + out: &mut dyn std::io::Write, ) -> std::io::Result<()> { let mut written_bytes = 0; for path in paths { @@ -130,9 +130,9 @@ pub mod fanout { /// Write the fanout for the given entries, which must be sorted by oid pub(crate) fn write( sorted_entries: &[multi_index::write::Entry], - mut out: impl std::io::Write, + out: &mut dyn std::io::Write, ) -> std::io::Result<()> { - let fanout = crate::index::write::encode::fanout(sorted_entries.iter().map(|e| e.id.first_byte())); + let fanout = crate::index::encode::fanout(&mut sorted_entries.iter().map(|e| e.id.first_byte())); for value in fanout.iter() { out.write_all(&value.to_be_bytes())?; @@ -157,7 +157,7 @@ pub mod lookup { pub(crate) fn write( sorted_entries: &[multi_index::write::Entry], - mut out: impl std::io::Write, + out: &mut dyn std::io::Write, ) -> std::io::Result<()> { for entry in sorted_entries { out.write_all(entry.id.as_slice())?; @@ -188,9 +188,9 @@ pub mod offsets { pub(crate) fn write( sorted_entries: &[multi_index::write::Entry], large_offsets_needed: bool, - mut out: impl std::io::Write, + out: &mut dyn std::io::Write, ) -> std::io::Result<()> { - use crate::index::write::encode::{HIGH_BIT, LARGE_OFFSET_THRESHOLD}; + use crate::index::encode::{HIGH_BIT, LARGE_OFFSET_THRESHOLD}; let mut num_large_offsets = 0u32; for entry in sorted_entries { @@ -226,7 +226,7 @@ pub mod offsets { pub mod large_offsets { use std::ops::Range; - use crate::{index::write::encode::LARGE_OFFSET_THRESHOLD, multi_index}; + use crate::{index::encode::LARGE_OFFSET_THRESHOLD, multi_index}; /// The id uniquely identifying the large offsets table (with 64 bit offsets) pub const ID: gix_chunk::Id = *b"LOFF"; @@ -254,7 +254,7 @@ pub mod large_offsets { pub(crate) fn write( sorted_entries: &[multi_index::write::Entry], mut num_large_offsets: usize, - mut out: impl std::io::Write, + out: &mut dyn std::io::Write, ) -> std::io::Result<()> { for offset in sorted_entries .iter() diff --git a/vendor/gix-pack/src/multi_index/verify.rs b/vendor/gix-pack/src/multi_index/verify.rs index 856a48501..0903b3568 100644 --- a/vendor/gix-pack/src/multi_index/verify.rs +++ b/vendor/gix-pack/src/multi_index/verify.rs @@ -1,6 +1,6 @@ use std::{cmp::Ordering, sync::atomic::AtomicBool, time::Instant}; -use gix_features::progress::Progress; +use gix_features::progress::{Count, DynNestedProgress, Progress}; use crate::{index, multi_index::File}; @@ -39,13 +39,11 @@ pub mod integrity { } /// Returned by [`multi_index::File::verify_integrity()`][crate::multi_index::File::verify_integrity()]. - pub struct Outcome<P> { + pub struct Outcome { /// The computed checksum of the multi-index which matched the stored one. pub actual_index_checksum: gix_hash::ObjectId, /// The for each entry in [`index_names()`][super::File::index_names()] provide the corresponding pack traversal outcome. pub pack_traverse_statistics: Vec<crate::index::traverse::Statistics>, - /// The provided progress instance. - pub progress: P, } /// The progress ids used in [`multi_index::File::verify_integrity()`][crate::multi_index::File::verify_integrity()]. @@ -80,7 +78,7 @@ impl File { /// of this index file, and return it if it does. pub fn verify_checksum( &self, - progress: impl Progress, + progress: &mut dyn Progress, should_interrupt: &AtomicBool, ) -> Result<gix_hash::ObjectId, checksum::Error> { crate::verify::checksum_on_disk_or_mmap( @@ -96,14 +94,11 @@ impl File { /// Similar to [`verify_integrity()`][File::verify_integrity()] but without any deep inspection of objects. /// /// Instead we only validate the contents of the multi-index itself. - pub fn verify_integrity_fast<P>( + pub fn verify_integrity_fast( &self, - progress: P, + progress: &mut dyn DynNestedProgress, should_interrupt: &AtomicBool, - ) -> Result<(gix_hash::ObjectId, P), integrity::Error> - where - P: Progress, - { + ) -> Result<gix_hash::ObjectId, integrity::Error> { self.verify_integrity_inner( progress, should_interrupt, @@ -114,35 +109,33 @@ impl File { index::traverse::Error::Processor(err) => err, _ => unreachable!("BUG: no other error type is possible"), }) - .map(|o| (o.actual_index_checksum, o.progress)) + .map(|o| o.actual_index_checksum) } /// Similar to [`crate::Bundle::verify_integrity()`] but checks all contained indices and their packs. /// /// Note that it's considered a failure if an index doesn't have a corresponding pack. - pub fn verify_integrity<C, P, F>( + pub fn verify_integrity<C, F>( &self, - progress: P, + progress: &mut dyn DynNestedProgress, should_interrupt: &AtomicBool, options: index::verify::integrity::Options<F>, - ) -> Result<integrity::Outcome<P>, index::traverse::Error<integrity::Error>> + ) -> Result<integrity::Outcome, index::traverse::Error<integrity::Error>> where - P: Progress, C: crate::cache::DecodeEntry, F: Fn() -> C + Send + Clone, { self.verify_integrity_inner(progress, should_interrupt, true, options) } - fn verify_integrity_inner<C, P, F>( + fn verify_integrity_inner<C, F>( &self, - mut progress: P, + progress: &mut dyn DynNestedProgress, should_interrupt: &AtomicBool, deep_check: bool, options: index::verify::integrity::Options<F>, - ) -> Result<integrity::Outcome<P>, index::traverse::Error<integrity::Error>> + ) -> Result<integrity::Outcome, index::traverse::Error<integrity::Error>> where - P: Progress, C: crate::cache::DecodeEntry, F: Fn() -> C + Send + Clone, { @@ -150,7 +143,7 @@ impl File { let actual_index_checksum = self .verify_checksum( - progress.add_child_with_id( + &mut progress.add_child_with_id( format!("{}: checksum", self.path.display()), integrity::ProgressId::ChecksumBytes.into(), ), @@ -176,7 +169,7 @@ impl File { let mut pack_ids_and_offsets = Vec::with_capacity(self.num_objects as usize); { let order_start = Instant::now(); - let mut progress = progress.add_child_with_id("checking oid order", gix_features::progress::UNKNOWN); + let mut progress = progress.add_child_with_id("checking oid order".into(), gix_features::progress::UNKNOWN); progress.init( Some(self.num_objects as usize), gix_features::progress::count("objects"), @@ -238,8 +231,10 @@ impl File { let multi_index_entries_to_check = &pack_ids_slice[..slice_end]; { let offset_start = Instant::now(); - let mut offsets_progress = - progress.add_child_with_id("verify object offsets", integrity::ProgressId::ObjectOffsets.into()); + let mut offsets_progress = progress.add_child_with_id( + "verify object offsets".into(), + integrity::ProgressId::ObjectOffsets.into(), + ); offsets_progress.init( Some(pack_ids_and_offsets.len()), gix_features::progress::count("objects"), @@ -278,7 +273,6 @@ impl File { let crate::bundle::verify::integrity::Outcome { actual_index_checksum: _, pack_traverse_outcome, - progress: returned_progress, } = bundle .verify_integrity(progress, should_interrupt, options.clone()) .map_err(|err| { @@ -315,7 +309,6 @@ impl File { Interrupted => Interrupted, } })?; - progress = returned_progress; pack_traverse_statistics.push(pack_traverse_outcome); } } @@ -325,13 +318,12 @@ impl File { "BUG: our slicing should allow to visit all objects" ); - progress.set_name("Validating multi-pack"); + progress.set_name("Validating multi-pack".into()); progress.show_throughput(operation_start); Ok(integrity::Outcome { actual_index_checksum, pack_traverse_statistics, - progress, }) } } diff --git a/vendor/gix-pack/src/multi_index/write.rs b/vendor/gix-pack/src/multi_index/write.rs index 9002af9eb..881033091 100644 --- a/vendor/gix-pack/src/multi_index/write.rs +++ b/vendor/gix-pack/src/multi_index/write.rs @@ -5,7 +5,7 @@ use std::{ time::{Instant, SystemTime}, }; -use gix_features::progress::Progress; +use gix_features::progress::{Count, DynNestedProgress, Progress}; use crate::multi_index; @@ -40,11 +40,9 @@ pub struct Options { } /// The result of [`multi_index::File::write_from_index_paths()`]. -pub struct Outcome<P> { +pub struct Outcome { /// The calculated multi-index checksum of the file at `multi_index_path`. pub multi_index_checksum: gix_hash::ObjectId, - /// The input progress - pub progress: P, } /// The progress ids used in [`write_from_index_paths()`][multi_index::File::write_from_index_paths()]. @@ -79,16 +77,13 @@ impl multi_index::File { /// Create a new multi-index file for writing to `out` from the pack index files at `index_paths`. /// /// Progress is sent to `progress` and interruptions checked via `should_interrupt`. - pub fn write_from_index_paths<P>( + pub fn write_from_index_paths( mut index_paths: Vec<PathBuf>, - out: impl std::io::Write, - mut progress: P, + out: &mut dyn std::io::Write, + progress: &mut dyn DynNestedProgress, should_interrupt: &AtomicBool, Options { object_hash }: Options, - ) -> Result<Outcome<P>, Error> - where - P: Progress, - { + ) -> Result<Outcome, Error> { let out = gix_features::hash::Write::new(out, object_hash); let (index_paths_sorted, index_filenames_sorted) = { index_paths.sort(); @@ -102,8 +97,10 @@ impl multi_index::File { let entries = { let mut entries = Vec::new(); let start = Instant::now(); - let mut progress = - progress.add_child_with_id("Collecting entries", ProgressId::FromPathsCollectingEntries.into()); + let mut progress = progress.add_child_with_id( + "Collecting entries".into(), + ProgressId::FromPathsCollectingEntries.into(), + ); progress.init(Some(index_paths_sorted.len()), gix_features::progress::count("indices")); // This could be parallelized… but it's probably not worth it unless you have 500mio objects. @@ -129,7 +126,7 @@ impl multi_index::File { progress.show_throughput(start); let start = Instant::now(); - progress.set_name("Deduplicate"); + progress.set_name("Deduplicate".into()); progress.init(Some(entries.len()), gix_features::progress::count("entries")); entries.sort_by(|l, r| { l.id.cmp(&r.id) @@ -168,7 +165,8 @@ impl multi_index::File { ); } - let mut write_progress = progress.add_child_with_id("Writing multi-index", ProgressId::BytesWritten.into()); + let mut write_progress = + progress.add_child_with_id("Writing multi-index".into(), ProgressId::BytesWritten.into()); let write_start = Instant::now(); write_progress.init( Some(cf.planned_storage_size() as usize + Self::HEADER_LEN), @@ -187,7 +185,7 @@ impl multi_index::File { )?; { - progress.set_name("Writing chunks"); + progress.set_name("Writing chunks".into()); progress.init(Some(cf.num_chunks()), gix_features::progress::count("chunks")); let mut chunk_write = cf.into_write(&mut out, bytes_written)?; @@ -220,14 +218,11 @@ impl multi_index::File { out.inner.inner.write_all(multi_index_checksum.as_slice())?; out.progress.show_throughput(write_start); - Ok(Outcome { - multi_index_checksum, - progress, - }) + Ok(Outcome { multi_index_checksum }) } fn write_header( - mut out: impl std::io::Write, + out: &mut dyn std::io::Write, num_chunks: u8, num_indices: u32, object_hash: gix_hash::Kind, |