diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 18:31:44 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 18:31:44 +0000 |
commit | c23a457e72abe608715ac76f076f47dc42af07a5 (patch) | |
tree | 2772049aaf84b5c9d0ed12ec8d86812f7a7904b6 /vendor/gix-pack/src/cache | |
parent | Releasing progress-linux version 1.73.0+dfsg1-1~progress7.99u1. (diff) | |
download | rustc-c23a457e72abe608715ac76f076f47dc42af07a5.tar.xz rustc-c23a457e72abe608715ac76f076f47dc42af07a5.zip |
Merging upstream version 1.74.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/gix-pack/src/cache')
-rw-r--r-- | vendor/gix-pack/src/cache/delta/from_offsets.rs | 8 | ||||
-rw-r--r-- | vendor/gix-pack/src/cache/delta/mod.rs | 8 | ||||
-rw-r--r-- | vendor/gix-pack/src/cache/delta/traverse/mod.rs | 21 | ||||
-rw-r--r-- | vendor/gix-pack/src/cache/delta/traverse/resolve.rs | 117 | ||||
-rw-r--r-- | vendor/gix-pack/src/cache/object.rs | 2 |
5 files changed, 67 insertions, 89 deletions
diff --git a/vendor/gix-pack/src/cache/delta/from_offsets.rs b/vendor/gix-pack/src/cache/delta/from_offsets.rs index 065b1ca20..d790dcc0f 100644 --- a/vendor/gix-pack/src/cache/delta/from_offsets.rs +++ b/vendor/gix-pack/src/cache/delta/from_offsets.rs @@ -42,11 +42,11 @@ impl<T> Tree<T> { /// /// Note that the sort order is ascending. The given pack file path must match the provided offsets. pub fn from_offsets_in_pack( - pack_path: impl AsRef<std::path::Path>, + pack_path: &std::path::Path, data_sorted_by_offsets: impl Iterator<Item = T>, - get_pack_offset: impl Fn(&T) -> data::Offset, - resolve_in_pack_id: impl Fn(&gix_hash::oid) -> Option<data::Offset>, - mut progress: impl Progress, + get_pack_offset: &dyn Fn(&T) -> data::Offset, + resolve_in_pack_id: &dyn Fn(&gix_hash::oid) -> Option<data::Offset>, + progress: &mut dyn Progress, should_interrupt: &AtomicBool, object_hash: gix_hash::Kind, ) -> Result<Self, Error> { diff --git a/vendor/gix-pack/src/cache/delta/mod.rs b/vendor/gix-pack/src/cache/delta/mod.rs index f4c1b6fc6..64b392f76 100644 --- a/vendor/gix-pack/src/cache/delta/mod.rs +++ b/vendor/gix-pack/src/cache/delta/mod.rs @@ -179,11 +179,11 @@ mod tests { fn tree(index_path: &str, pack_path: &str) -> Result<(), Box<dyn std::error::Error>> { let idx = pack::index::File::at(fixture_path(index_path), gix_hash::Kind::Sha1)?; crate::cache::delta::Tree::from_offsets_in_pack( - fixture_path(pack_path), + &fixture_path(pack_path), idx.sorted_offsets().into_iter(), - |ofs| *ofs, - |id| idx.lookup(id).map(|index| idx.pack_offset_at_index(index)), - gix_features::progress::Discard, + &|ofs| *ofs, + &|id| idx.lookup(id).map(|index| idx.pack_offset_at_index(index)), + &mut gix_features::progress::Discard, &AtomicBool::new(false), gix_hash::Kind::Sha1, )?; diff --git a/vendor/gix-pack/src/cache/delta/traverse/mod.rs b/vendor/gix-pack/src/cache/delta/traverse/mod.rs index e933af838..0598cf92e 100644 --- a/vendor/gix-pack/src/cache/delta/traverse/mod.rs +++ b/vendor/gix-pack/src/cache/delta/traverse/mod.rs @@ -1,5 +1,6 @@ use std::sync::atomic::{AtomicBool, Ordering}; +use gix_features::progress::DynNestedProgress; use gix_features::{ parallel::in_parallel_with_slice, progress::{self, Progress}, @@ -55,11 +56,11 @@ pub struct Context<'a> { } /// Options for [`Tree::traverse()`]. -pub struct Options<'a, P1, P2> { +pub struct Options<'a, 's> { /// is a progress instance to track progress for each object in the traversal. - pub object_progress: P1, + pub object_progress: Box<dyn DynNestedProgress>, /// is a progress instance to track the overall progress. - pub size_progress: P2, + pub size_progress: &'s mut dyn Progress, /// If `Some`, only use the given amount of threads. Otherwise, the amount of threads to use will be selected based on /// the amount of available logical cores. pub thread_limit: Option<usize>, @@ -99,7 +100,7 @@ where /// This method returns a vector of all tree items, along with their potentially modified custom node data. /// /// _Note_ that this method consumed the Tree to assure safe parallel traversal with mutation support. - pub fn traverse<F, P1, P2, MBFN, E, R>( + pub fn traverse<F, MBFN, E, R>( mut self, resolve: F, resolve_data: &R, @@ -108,17 +109,15 @@ where Options { thread_limit, mut object_progress, - mut size_progress, + size_progress, should_interrupt, object_hash, - }: Options<'_, P1, P2>, + }: Options<'_, '_>, ) -> Result<Outcome<T>, Error> where F: for<'r> Fn(EntryRange, &'r R) -> Option<&'r [u8]> + Send + Clone, R: Send + Sync, - P1: Progress, - P2: Progress, - MBFN: FnMut(&mut T, &<P1 as Progress>::SubProgress, Context<'_>) -> Result<(), E> + Send + Clone, + MBFN: FnMut(&mut T, &dyn Progress, Context<'_>) -> Result<(), E> + Send + Clone, E: std::error::Error + Send + Sync + 'static, { self.set_pack_entries_end_and_resolve_ref_offsets(pack_entries_end)?; @@ -150,7 +149,9 @@ where resolve::State { delta_bytes: Vec::<u8>::with_capacity(4096), fully_resolved_delta_bytes: Vec::<u8>::with_capacity(4096), - progress: threading::lock(&object_progress).add_child(format!("thread {thread_index}")), + progress: Box::new( + threading::lock(&object_progress).add_child(format!("thread {thread_index}")), + ), resolve: resolve.clone(), modify_base: inspect_object.clone(), child_items: child_items.clone(), diff --git a/vendor/gix-pack/src/cache/delta/traverse/resolve.rs b/vendor/gix-pack/src/cache/delta/traverse/resolve.rs index 0a4d29191..daf6f273e 100644 --- a/vendor/gix-pack/src/cache/delta/traverse/resolve.rs +++ b/vendor/gix-pack/src/cache/delta/traverse/resolve.rs @@ -17,19 +17,19 @@ use crate::{ data::EntryRange, }; -pub(crate) struct State<P, F, MBFN, T: Send> { +pub(crate) struct State<F, MBFN, T: Send> { pub delta_bytes: Vec<u8>, pub fully_resolved_delta_bytes: Vec<u8>, - pub progress: P, + pub progress: Box<dyn Progress>, pub resolve: F, pub modify_base: MBFN, pub child_items: ItemSliceSend<Item<T>>, } #[allow(clippy::too_many_arguments)] -pub(crate) fn deltas<T, F, MBFN, E, R, P>( - object_counter: Option<gix_features::progress::StepShared>, - size_counter: Option<gix_features::progress::StepShared>, +pub(crate) fn deltas<T, F, MBFN, E, R>( + objects: gix_features::progress::StepShared, + size: gix_features::progress::StepShared, node: &mut Item<T>, State { delta_bytes, @@ -38,7 +38,7 @@ pub(crate) fn deltas<T, F, MBFN, E, R, P>( resolve, modify_base, child_items, - }: &mut State<P, F, MBFN, T>, + }: &mut State<F, MBFN, T>, resolve_data: &R, hash_len: usize, threads_left: &AtomicIsize, @@ -47,20 +47,20 @@ pub(crate) fn deltas<T, F, MBFN, E, R, P>( where T: Send, R: Send + Sync, - P: Progress, F: for<'r> Fn(EntryRange, &'r R) -> Option<&'r [u8]> + Send + Clone, - MBFN: FnMut(&mut T, &P, Context<'_>) -> Result<(), E> + Send + Clone, + MBFN: FnMut(&mut T, &dyn Progress, Context<'_>) -> Result<(), E> + Send + Clone, E: std::error::Error + Send + Sync + 'static, { let mut decompressed_bytes_by_pack_offset = BTreeMap::new(); - let decompress_from_resolver = |slice: EntryRange, out: &mut Vec<u8>| -> Result<(data::Entry, u64), Error> { + let mut inflate = zlib::Inflate::default(); + let mut decompress_from_resolver = |slice: EntryRange, out: &mut Vec<u8>| -> Result<(data::Entry, u64), Error> { let bytes = resolve(slice.clone(), resolve_data).ok_or(Error::ResolveFailed { pack_offset: slice.start, })?; let entry = data::Entry::from_bytes(bytes, slice.start, hash_len); let compressed = &bytes[entry.header_size()..]; let decompressed_len = entry.decompressed_size as usize; - decompress_all_at_once_with(compressed, decompressed_len, out)?; + decompress_all_at_once_with(&mut inflate, compressed, decompressed_len, out)?; Ok((entry, slice.end)) }; @@ -103,10 +103,8 @@ where }, ) .map_err(|err| Box::new(err) as Box<dyn std::error::Error + Send + Sync>)?; - object_counter.as_ref().map(|c| c.fetch_add(1, Ordering::SeqCst)); - size_counter - .as_ref() - .map(|c| c.fetch_add(base_bytes.len(), Ordering::SeqCst)); + objects.fetch_add(1, Ordering::Relaxed); + size.fetch_add(base_bytes.len(), Ordering::Relaxed); } for mut child in base.into_child_iter() { @@ -121,7 +119,7 @@ where let (result_size, consumed) = data::delta::decode_header_size(&delta_bytes[consumed..]); header_ofs += consumed; - set_len(fully_resolved_delta_bytes, result_size as usize); + fully_resolved_delta_bytes.resize(result_size as usize, 0); data::delta::apply(&base_bytes, fully_resolved_delta_bytes, &delta_bytes[header_ofs..]); // FIXME: this actually invalidates the "pack_offset()" computation, which is not obvious to consumers @@ -136,7 +134,7 @@ where } else { modify_base( child.data(), - progress, + &progress, Context { entry: &child_entry, entry_end, @@ -145,10 +143,8 @@ where }, ) .map_err(|err| Box::new(err) as Box<dyn std::error::Error + Send + Sync>)?; - object_counter.as_ref().map(|c| c.fetch_add(1, Ordering::SeqCst)); - size_counter - .as_ref() - .map(|c| c.fetch_add(base_bytes.len(), Ordering::SeqCst)); + objects.fetch_add(1, Ordering::Relaxed); + size.fetch_add(base_bytes.len(), Ordering::Relaxed); } } @@ -168,9 +164,9 @@ where return deltas_mt( initial_threads, decompressed_bytes_by_pack_offset, - object_counter, - size_counter, - progress, + objects, + size, + &progress, nodes, resolve.clone(), resolve_data, @@ -190,12 +186,12 @@ where /// system. Since this thread will take a controlling function, we may spawn one more than that. In threaded mode, we will finish /// all remaining work. #[allow(clippy::too_many_arguments)] -pub(crate) fn deltas_mt<T, F, MBFN, E, R, P>( +pub(crate) fn deltas_mt<T, F, MBFN, E, R>( mut threads_to_create: isize, decompressed_bytes_by_pack_offset: BTreeMap<u64, (data::Entry, u64, Vec<u8>)>, - object_counter: Option<gix_features::progress::StepShared>, - size_counter: Option<gix_features::progress::StepShared>, - progress: &P, + objects: gix_features::progress::StepShared, + size: gix_features::progress::StepShared, + progress: &dyn Progress, nodes: Vec<(u16, Node<'_, T>)>, resolve: F, resolve_data: &R, @@ -207,9 +203,8 @@ pub(crate) fn deltas_mt<T, F, MBFN, E, R, P>( where T: Send, R: Send + Sync, - P: Progress, F: for<'r> Fn(EntryRange, &'r R) -> Option<&'r [u8]> + Send + Clone, - MBFN: FnMut(&mut T, &P, Context<'_>) -> Result<(), E> + Send + Clone, + MBFN: FnMut(&mut T, &dyn Progress, Context<'_>) -> Result<(), E> + Send + Clone, E: std::error::Error + Send + Sync + 'static, { let nodes = gix_features::threading::Mutable::new(nodes); @@ -229,13 +224,14 @@ where let decompressed_bytes_by_pack_offset = &decompressed_bytes_by_pack_offset; let resolve = resolve.clone(); let mut modify_base = modify_base.clone(); - let object_counter = object_counter.as_ref(); - let size_counter = size_counter.as_ref(); + let objects = &objects; + let size = &size; move || -> Result<(), Error> { let mut fully_resolved_delta_bytes = Vec::new(); let mut delta_bytes = Vec::new(); - let decompress_from_resolver = + let mut inflate = zlib::Inflate::default(); + let mut decompress_from_resolver = |slice: EntryRange, out: &mut Vec<u8>| -> Result<(data::Entry, u64), Error> { let bytes = resolve(slice.clone(), resolve_data).ok_or(Error::ResolveFailed { pack_offset: slice.start, @@ -243,7 +239,7 @@ where let entry = data::Entry::from_bytes(bytes, slice.start, hash_len); let compressed = &bytes[entry.header_size()..]; let decompressed_len = entry.decompressed_size as usize; - decompress_all_at_once_with(compressed, decompressed_len, out)?; + decompress_all_at_once_with(&mut inflate, compressed, decompressed_len, out)?; Ok((entry, slice.end)) }; @@ -280,10 +276,8 @@ where }, ) .map_err(|err| Box::new(err) as Box<dyn std::error::Error + Send + Sync>)?; - object_counter.as_ref().map(|c| c.fetch_add(1, Ordering::SeqCst)); - size_counter - .as_ref() - .map(|c| c.fetch_add(base_bytes.len(), Ordering::SeqCst)); + objects.fetch_add(1, Ordering::Relaxed); + size.fetch_add(base_bytes.len(), Ordering::Relaxed); } for mut child in base.into_child_iter() { @@ -328,10 +322,8 @@ where }, ) .map_err(|err| Box::new(err) as Box<dyn std::error::Error + Send + Sync>)?; - object_counter.as_ref().map(|c| c.fetch_add(1, Ordering::SeqCst)); - size_counter - .as_ref() - .map(|c| c.fetch_add(base_bytes.len(), Ordering::SeqCst)); + objects.fetch_add(1, Ordering::Relaxed); + size.fetch_add(base_bytes.len(), Ordering::Relaxed); } } } @@ -357,6 +349,9 @@ where // but may instead find a good way to set the polling interval instead of hard-coding it. std::thread::sleep(poll_interval); // Get out of threads are already starving or they would be starving soon as no work is left. + // + // Lint: ScopedJoinHandle is not the same depending on active features and is not exposed in some cases. + #[allow(clippy::redundant_closure_for_method_calls)] if threads.iter().any(|t| t.is_finished()) { let mut running_threads = Vec::new(); for thread in threads.drain(..) { @@ -389,35 +384,17 @@ where }) } -fn set_len(v: &mut Vec<u8>, new_len: usize) { - if new_len > v.len() { - v.reserve_exact(new_len.saturating_sub(v.capacity()) + (v.capacity() - v.len())); - // SAFETY: - // 1. we have reserved enough capacity to fit `new_len` - // 2. the caller is trusted to write into `v` to completely fill `new_len`. - #[allow(unsafe_code, clippy::uninit_vec)] - unsafe { - v.set_len(new_len); - } - } else { - v.truncate(new_len) - } -} - -fn decompress_all_at_once_with(b: &[u8], decompressed_len: usize, out: &mut Vec<u8>) -> Result<(), Error> { - set_len(out, decompressed_len); - use std::cell::RefCell; - thread_local! { - pub static INFLATE: RefCell<zlib::Inflate> = RefCell::new(zlib::Inflate::default()); - } - - INFLATE.with(|inflate| { - let mut inflate = inflate.borrow_mut(); - inflate.reset(); - inflate.once(b, out).map_err(|err| Error::ZlibInflate { - source: err, - message: "Failed to decompress entry", - }) +fn decompress_all_at_once_with( + inflate: &mut zlib::Inflate, + b: &[u8], + decompressed_len: usize, + out: &mut Vec<u8>, +) -> Result<(), Error> { + out.resize(decompressed_len, 0); + inflate.reset(); + inflate.once(b, out).map_err(|err| Error::ZlibInflate { + source: err, + message: "Failed to decompress entry", })?; Ok(()) } diff --git a/vendor/gix-pack/src/cache/object.rs b/vendor/gix-pack/src/cache/object.rs index 26896bf89..af1f7d0a4 100644 --- a/vendor/gix-pack/src/cache/object.rs +++ b/vendor/gix-pack/src/cache/object.rs @@ -43,7 +43,7 @@ mod memory { MemoryCappedHashmap { inner: clru::CLruCache::with_config( clru::CLruCacheConfig::new(NonZeroUsize::new(memory_cap_in_bytes).expect("non zero")) - .with_hasher(gix_hashtable::hash::Builder::default()) + .with_hasher(gix_hashtable::hash::Builder) .with_scale(CustomScale), ), free_list: Vec::new(), |