summaryrefslogtreecommitdiffstats
path: root/vendor/gix-pack/src
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/gix-pack/src')
-rw-r--r--vendor/gix-pack/src/bundle/find.rs23
-rw-r--r--vendor/gix-pack/src/bundle/init.rs2
-rw-r--r--vendor/gix-pack/src/bundle/mod.rs16
-rw-r--r--vendor/gix-pack/src/bundle/write/mod.rs48
-rw-r--r--vendor/gix-pack/src/cache/delta/from_offsets.rs8
-rw-r--r--vendor/gix-pack/src/cache/delta/mod.rs8
-rw-r--r--vendor/gix-pack/src/cache/delta/traverse/mod.rs21
-rw-r--r--vendor/gix-pack/src/cache/delta/traverse/resolve.rs117
-rw-r--r--vendor/gix-pack/src/cache/object.rs2
-rw-r--r--vendor/gix-pack/src/data/entry/decode.rs8
-rw-r--r--vendor/gix-pack/src/data/entry/header.rs4
-rw-r--r--vendor/gix-pack/src/data/file/decode/entry.rs76
-rw-r--r--vendor/gix-pack/src/data/file/decode/header.rs15
-rw-r--r--vendor/gix-pack/src/data/file/verify.rs5
-rw-r--r--vendor/gix-pack/src/data/input/bytes_to_entries.rs71
-rw-r--r--vendor/gix-pack/src/data/input/entries_to_bytes.rs5
-rw-r--r--vendor/gix-pack/src/data/input/entry.rs2
-rw-r--r--vendor/gix-pack/src/data/input/lookup_ref_delta_objects.rs12
-rw-r--r--vendor/gix-pack/src/data/mod.rs2
-rw-r--r--vendor/gix-pack/src/data/output/count/mod.rs2
-rw-r--r--vendor/gix-pack/src/data/output/count/objects/mod.rs138
-rw-r--r--vendor/gix-pack/src/data/output/count/objects/reduce.rs23
-rw-r--r--vendor/gix-pack/src/data/output/count/objects/types.rs10
-rw-r--r--vendor/gix-pack/src/data/output/entry/iter_from_counts.rs27
-rw-r--r--vendor/gix-pack/src/data/output/entry/mod.rs9
-rw-r--r--vendor/gix-pack/src/find.rs21
-rw-r--r--vendor/gix-pack/src/find_traits.rs82
-rw-r--r--vendor/gix-pack/src/index/access.rs11
-rw-r--r--vendor/gix-pack/src/index/encode.rs158
-rw-r--r--vendor/gix-pack/src/index/mod.rs2
-rw-r--r--vendor/gix-pack/src/index/traverse/mod.rs35
-rw-r--r--vendor/gix-pack/src/index/traverse/with_index.rs38
-rw-r--r--vendor/gix-pack/src/index/traverse/with_lookup.rs32
-rw-r--r--vendor/gix-pack/src/index/util.rs30
-rw-r--r--vendor/gix-pack/src/index/verify.rs28
-rw-r--r--vendor/gix-pack/src/index/write/encode.rs124
-rw-r--r--vendor/gix-pack/src/index/write/mod.rs31
-rw-r--r--vendor/gix-pack/src/multi_index/access.rs4
-rw-r--r--vendor/gix-pack/src/multi_index/chunk.rs16
-rw-r--r--vendor/gix-pack/src/multi_index/verify.rs48
-rw-r--r--vendor/gix-pack/src/multi_index/write.rs37
-rw-r--r--vendor/gix-pack/src/verify.rs4
42 files changed, 653 insertions, 702 deletions
diff --git a/vendor/gix-pack/src/bundle/find.rs b/vendor/gix-pack/src/bundle/find.rs
index 2fc335721..98e28333d 100644
--- a/vendor/gix-pack/src/bundle/find.rs
+++ b/vendor/gix-pack/src/bundle/find.rs
@@ -1,25 +1,30 @@
+use gix_features::zlib;
+
impl crate::Bundle {
- /// Find an object with the given [`ObjectId`][gix_hash::ObjectId] and place its data into `out`.
+ /// Find an object with the given [`ObjectId`](gix_hash::ObjectId) and place its data into `out`.
+ /// `inflate` is used to decompress objects, and will be reset before first use, but not after the last use.
///
- /// [`cache`][crate::cache::DecodeEntry] is used to accelerate the lookup.
+ /// [`cache`](crate::cache::DecodeEntry) is used to accelerate the lookup.
///
/// **Note** that ref deltas are automatically resolved within this pack only, which makes this implementation unusable
/// for thin packs, which by now are expected to be resolved already.
pub fn find<'a>(
&self,
- id: impl AsRef<gix_hash::oid>,
+ id: &gix_hash::oid,
out: &'a mut Vec<u8>,
- cache: &mut impl crate::cache::DecodeEntry,
+ inflate: &mut zlib::Inflate,
+ cache: &mut dyn crate::cache::DecodeEntry,
) -> Result<Option<(gix_object::Data<'a>, crate::data::entry::Location)>, crate::data::decode::Error> {
let idx = match self.index.lookup(id) {
Some(idx) => idx,
None => return Ok(None),
};
- self.get_object_by_index(idx, out, cache).map(Some)
+ self.get_object_by_index(idx, out, inflate, cache).map(Some)
}
/// Special-use function to get an object given an index previously returned from
- /// `internal_find_pack_index`.
+ /// [index::File::](crate::index::File::lookup()).
+ /// `inflate` is used to decompress objects, and will be reset before first use, but not after the last use.
///
/// # Panics
///
@@ -28,7 +33,8 @@ impl crate::Bundle {
&self,
idx: u32,
out: &'a mut Vec<u8>,
- cache: &mut impl crate::cache::DecodeEntry,
+ inflate: &mut zlib::Inflate,
+ cache: &mut dyn crate::cache::DecodeEntry,
) -> Result<(gix_object::Data<'a>, crate::data::entry::Location), crate::data::decode::Error> {
let ofs = self.index.pack_offset_at_index(idx);
let pack_entry = self.pack.entry(ofs);
@@ -37,7 +43,8 @@ impl crate::Bundle {
.decode_entry(
pack_entry,
out,
- |id, _out| {
+ inflate,
+ &|id, _out| {
self.index.lookup(id).map(|idx| {
crate::data::decode::entry::ResolvedBase::InPack(
self.pack.entry(self.index.pack_offset_at_index(idx)),
diff --git a/vendor/gix-pack/src/bundle/init.rs b/vendor/gix-pack/src/bundle/init.rs
index 3ba5257ed..bde90bde1 100644
--- a/vendor/gix-pack/src/bundle/init.rs
+++ b/vendor/gix-pack/src/bundle/init.rs
@@ -29,7 +29,7 @@ impl Bundle {
fn at_inner(path: &Path, object_hash: gix_hash::Kind) -> Result<Self, Error> {
let ext = path
.extension()
- .and_then(|e| e.to_str())
+ .and_then(std::ffi::OsStr::to_str)
.ok_or_else(|| Error::InvalidPath(path.to_owned()))?;
Ok(match ext {
"idx" => Self {
diff --git a/vendor/gix-pack/src/bundle/mod.rs b/vendor/gix-pack/src/bundle/mod.rs
index 076b355d9..d8ef1107d 100644
--- a/vendor/gix-pack/src/bundle/mod.rs
+++ b/vendor/gix-pack/src/bundle/mod.rs
@@ -3,25 +3,23 @@ pub mod init;
mod find;
///
-#[cfg(not(feature = "wasm"))]
+#[cfg(all(not(feature = "wasm"), feature = "streaming-input"))]
pub mod write;
///
pub mod verify {
use std::sync::atomic::AtomicBool;
- use gix_features::progress::Progress;
+ use gix_features::progress::DynNestedProgress;
///
pub mod integrity {
/// Returned by [`Bundle::verify_integrity()`][crate::Bundle::verify_integrity()].
- pub struct Outcome<P> {
+ pub struct Outcome {
/// The computed checksum of the index which matched the stored one.
pub actual_index_checksum: gix_hash::ObjectId,
/// The packs traversal outcome
pub pack_traverse_outcome: crate::index::traverse::Statistics,
- /// The provided progress instance.
- pub progress: P,
}
}
@@ -30,14 +28,13 @@ pub mod verify {
impl Bundle {
/// Similar to [`crate::index::File::verify_integrity()`] but more convenient to call as the presence of the
/// pack file is a given.
- pub fn verify_integrity<C, P, F>(
+ pub fn verify_integrity<C, F>(
&self,
- progress: P,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
options: crate::index::verify::integrity::Options<F>,
- ) -> Result<integrity::Outcome<P>, crate::index::traverse::Error<crate::index::verify::integrity::Error>>
+ ) -> Result<integrity::Outcome, crate::index::traverse::Error<crate::index::verify::integrity::Error>>
where
- P: Progress,
C: crate::cache::DecodeEntry,
F: Fn() -> C + Send + Clone,
{
@@ -53,7 +50,6 @@ pub mod verify {
.map(|o| integrity::Outcome {
actual_index_checksum: o.actual_index_checksum,
pack_traverse_outcome: o.pack_traverse_statistics.expect("pack is set"),
- progress: o.progress,
})
}
}
diff --git a/vendor/gix-pack/src/bundle/write/mod.rs b/vendor/gix-pack/src/bundle/write/mod.rs
index 103a0034b..8983cb92f 100644
--- a/vendor/gix-pack/src/bundle/write/mod.rs
+++ b/vendor/gix-pack/src/bundle/write/mod.rs
@@ -13,6 +13,7 @@ use crate::data;
mod error;
pub use error::Error;
+use gix_features::progress::prodash::DynNestedProgress;
mod types;
use types::{LockWriter, PassThrough};
@@ -63,14 +64,15 @@ impl crate::Bundle {
/// be accounted for.
/// - Empty packs always have the same name and not handling this case will result in at most one superfluous pack.
pub fn write_to_directory(
- pack: impl io::BufRead,
- directory: Option<impl AsRef<Path>>,
- mut progress: impl Progress,
+ pack: &mut dyn io::BufRead,
+ directory: Option<&Path>,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
thin_pack_base_object_lookup_fn: Option<ThinPackLookupFn>,
options: Options,
) -> Result<Outcome, Error> {
- let mut read_progress = progress.add_child_with_id("read pack", ProgressId::ReadPackBytes.into());
+ let _span = gix_features::trace::coarse!("gix_pack::Bundle::write_to_directory()");
+ let mut read_progress = progress.add_child_with_id("read pack".into(), ProgressId::ReadPackBytes.into());
read_progress.init(None, progress::bytes());
let pack = progress::Read {
inner: pack,
@@ -170,20 +172,17 @@ impl crate::Bundle {
/// As it sends portions of the input to a thread it requires the 'static lifetime for the interrupt flags. This can only
/// be satisfied by a static `AtomicBool` which is only suitable for programs that only run one of these operations at a time
/// or don't mind that all of them abort when the flag is set.
- pub fn write_to_directory_eagerly<P>(
- pack: impl io::Read + Send + 'static,
+ pub fn write_to_directory_eagerly(
+ pack: Box<dyn io::Read + Send + 'static>,
pack_size: Option<u64>,
directory: Option<impl AsRef<Path>>,
- mut progress: P,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &'static AtomicBool,
thin_pack_base_object_lookup_fn: Option<ThinPackLookupFnSend>,
options: Options,
- ) -> Result<Outcome, Error>
- where
- P: Progress,
- P::SubProgress: 'static,
- {
- let mut read_progress = progress.add_child_with_id("read pack", ProgressId::ReadPackBytes.into()); /* Bundle Write Read pack Bytes*/
+ ) -> Result<Outcome, Error> {
+ let _span = gix_features::trace::coarse!("gix_pack::Bundle::write_to_directory_eagerly()");
+ let mut read_progress = progress.add_child_with_id("read pack".into(), ProgressId::ReadPackBytes.into()); /* Bundle Write Read pack Bytes*/
read_progress.init(pack_size.map(|s| s as usize), progress::bytes());
let pack = progress::Read {
inner: pack,
@@ -251,7 +250,7 @@ impl crate::Bundle {
progress,
options,
data_file,
- pack_entries_iter,
+ Box::new(pack_entries_iter),
should_interrupt,
pack_version,
)?;
@@ -266,9 +265,9 @@ impl crate::Bundle {
})
}
- fn inner_write(
+ fn inner_write<'a>(
directory: Option<impl AsRef<Path>>,
- mut progress: impl Progress,
+ progress: &mut dyn DynNestedProgress,
Options {
thread_limit,
iteration_mode: _,
@@ -276,12 +275,12 @@ impl crate::Bundle {
object_hash,
}: Options,
data_file: SharedTempFile,
- pack_entries_iter: impl Iterator<Item = Result<data::input::Entry, data::input::Error>>,
+ mut pack_entries_iter: Box<dyn Iterator<Item = Result<data::input::Entry, data::input::Error>> + 'a>,
should_interrupt: &AtomicBool,
pack_version: data::Version,
) -> Result<WriteOutcome, Error> {
- let indexing_progress = progress.add_child_with_id(
- "create index file",
+ let mut indexing_progress = progress.add_child_with_id(
+ "create index file".into(),
ProgressId::IndexingSteps(Default::default()).into(),
);
Ok(match directory {
@@ -295,14 +294,15 @@ impl crate::Bundle {
let data_file = Arc::clone(&data_file);
move || new_pack_file_resolver(data_file)
},
- pack_entries_iter,
+ &mut pack_entries_iter,
thread_limit,
- indexing_progress,
+ &mut indexing_progress,
&mut index_file,
should_interrupt,
object_hash,
pack_version,
)?;
+ drop(pack_entries_iter);
let data_path = directory.join(format!("pack-{}.pack", outcome.data_hash.to_hex()));
let index_path = data_path.with_extension("idx");
@@ -335,10 +335,10 @@ impl crate::Bundle {
outcome: crate::index::File::write_data_iter_to_stream(
index_kind,
move || new_pack_file_resolver(data_file),
- pack_entries_iter,
+ &mut pack_entries_iter,
thread_limit,
- indexing_progress,
- io::sink(),
+ &mut indexing_progress,
+ &mut io::sink(),
should_interrupt,
object_hash,
pack_version,
diff --git a/vendor/gix-pack/src/cache/delta/from_offsets.rs b/vendor/gix-pack/src/cache/delta/from_offsets.rs
index 065b1ca20..d790dcc0f 100644
--- a/vendor/gix-pack/src/cache/delta/from_offsets.rs
+++ b/vendor/gix-pack/src/cache/delta/from_offsets.rs
@@ -42,11 +42,11 @@ impl<T> Tree<T> {
///
/// Note that the sort order is ascending. The given pack file path must match the provided offsets.
pub fn from_offsets_in_pack(
- pack_path: impl AsRef<std::path::Path>,
+ pack_path: &std::path::Path,
data_sorted_by_offsets: impl Iterator<Item = T>,
- get_pack_offset: impl Fn(&T) -> data::Offset,
- resolve_in_pack_id: impl Fn(&gix_hash::oid) -> Option<data::Offset>,
- mut progress: impl Progress,
+ get_pack_offset: &dyn Fn(&T) -> data::Offset,
+ resolve_in_pack_id: &dyn Fn(&gix_hash::oid) -> Option<data::Offset>,
+ progress: &mut dyn Progress,
should_interrupt: &AtomicBool,
object_hash: gix_hash::Kind,
) -> Result<Self, Error> {
diff --git a/vendor/gix-pack/src/cache/delta/mod.rs b/vendor/gix-pack/src/cache/delta/mod.rs
index f4c1b6fc6..64b392f76 100644
--- a/vendor/gix-pack/src/cache/delta/mod.rs
+++ b/vendor/gix-pack/src/cache/delta/mod.rs
@@ -179,11 +179,11 @@ mod tests {
fn tree(index_path: &str, pack_path: &str) -> Result<(), Box<dyn std::error::Error>> {
let idx = pack::index::File::at(fixture_path(index_path), gix_hash::Kind::Sha1)?;
crate::cache::delta::Tree::from_offsets_in_pack(
- fixture_path(pack_path),
+ &fixture_path(pack_path),
idx.sorted_offsets().into_iter(),
- |ofs| *ofs,
- |id| idx.lookup(id).map(|index| idx.pack_offset_at_index(index)),
- gix_features::progress::Discard,
+ &|ofs| *ofs,
+ &|id| idx.lookup(id).map(|index| idx.pack_offset_at_index(index)),
+ &mut gix_features::progress::Discard,
&AtomicBool::new(false),
gix_hash::Kind::Sha1,
)?;
diff --git a/vendor/gix-pack/src/cache/delta/traverse/mod.rs b/vendor/gix-pack/src/cache/delta/traverse/mod.rs
index e933af838..0598cf92e 100644
--- a/vendor/gix-pack/src/cache/delta/traverse/mod.rs
+++ b/vendor/gix-pack/src/cache/delta/traverse/mod.rs
@@ -1,5 +1,6 @@
use std::sync::atomic::{AtomicBool, Ordering};
+use gix_features::progress::DynNestedProgress;
use gix_features::{
parallel::in_parallel_with_slice,
progress::{self, Progress},
@@ -55,11 +56,11 @@ pub struct Context<'a> {
}
/// Options for [`Tree::traverse()`].
-pub struct Options<'a, P1, P2> {
+pub struct Options<'a, 's> {
/// is a progress instance to track progress for each object in the traversal.
- pub object_progress: P1,
+ pub object_progress: Box<dyn DynNestedProgress>,
/// is a progress instance to track the overall progress.
- pub size_progress: P2,
+ pub size_progress: &'s mut dyn Progress,
/// If `Some`, only use the given amount of threads. Otherwise, the amount of threads to use will be selected based on
/// the amount of available logical cores.
pub thread_limit: Option<usize>,
@@ -99,7 +100,7 @@ where
/// This method returns a vector of all tree items, along with their potentially modified custom node data.
///
/// _Note_ that this method consumed the Tree to assure safe parallel traversal with mutation support.
- pub fn traverse<F, P1, P2, MBFN, E, R>(
+ pub fn traverse<F, MBFN, E, R>(
mut self,
resolve: F,
resolve_data: &R,
@@ -108,17 +109,15 @@ where
Options {
thread_limit,
mut object_progress,
- mut size_progress,
+ size_progress,
should_interrupt,
object_hash,
- }: Options<'_, P1, P2>,
+ }: Options<'_, '_>,
) -> Result<Outcome<T>, Error>
where
F: for<'r> Fn(EntryRange, &'r R) -> Option<&'r [u8]> + Send + Clone,
R: Send + Sync,
- P1: Progress,
- P2: Progress,
- MBFN: FnMut(&mut T, &<P1 as Progress>::SubProgress, Context<'_>) -> Result<(), E> + Send + Clone,
+ MBFN: FnMut(&mut T, &dyn Progress, Context<'_>) -> Result<(), E> + Send + Clone,
E: std::error::Error + Send + Sync + 'static,
{
self.set_pack_entries_end_and_resolve_ref_offsets(pack_entries_end)?;
@@ -150,7 +149,9 @@ where
resolve::State {
delta_bytes: Vec::<u8>::with_capacity(4096),
fully_resolved_delta_bytes: Vec::<u8>::with_capacity(4096),
- progress: threading::lock(&object_progress).add_child(format!("thread {thread_index}")),
+ progress: Box::new(
+ threading::lock(&object_progress).add_child(format!("thread {thread_index}")),
+ ),
resolve: resolve.clone(),
modify_base: inspect_object.clone(),
child_items: child_items.clone(),
diff --git a/vendor/gix-pack/src/cache/delta/traverse/resolve.rs b/vendor/gix-pack/src/cache/delta/traverse/resolve.rs
index 0a4d29191..daf6f273e 100644
--- a/vendor/gix-pack/src/cache/delta/traverse/resolve.rs
+++ b/vendor/gix-pack/src/cache/delta/traverse/resolve.rs
@@ -17,19 +17,19 @@ use crate::{
data::EntryRange,
};
-pub(crate) struct State<P, F, MBFN, T: Send> {
+pub(crate) struct State<F, MBFN, T: Send> {
pub delta_bytes: Vec<u8>,
pub fully_resolved_delta_bytes: Vec<u8>,
- pub progress: P,
+ pub progress: Box<dyn Progress>,
pub resolve: F,
pub modify_base: MBFN,
pub child_items: ItemSliceSend<Item<T>>,
}
#[allow(clippy::too_many_arguments)]
-pub(crate) fn deltas<T, F, MBFN, E, R, P>(
- object_counter: Option<gix_features::progress::StepShared>,
- size_counter: Option<gix_features::progress::StepShared>,
+pub(crate) fn deltas<T, F, MBFN, E, R>(
+ objects: gix_features::progress::StepShared,
+ size: gix_features::progress::StepShared,
node: &mut Item<T>,
State {
delta_bytes,
@@ -38,7 +38,7 @@ pub(crate) fn deltas<T, F, MBFN, E, R, P>(
resolve,
modify_base,
child_items,
- }: &mut State<P, F, MBFN, T>,
+ }: &mut State<F, MBFN, T>,
resolve_data: &R,
hash_len: usize,
threads_left: &AtomicIsize,
@@ -47,20 +47,20 @@ pub(crate) fn deltas<T, F, MBFN, E, R, P>(
where
T: Send,
R: Send + Sync,
- P: Progress,
F: for<'r> Fn(EntryRange, &'r R) -> Option<&'r [u8]> + Send + Clone,
- MBFN: FnMut(&mut T, &P, Context<'_>) -> Result<(), E> + Send + Clone,
+ MBFN: FnMut(&mut T, &dyn Progress, Context<'_>) -> Result<(), E> + Send + Clone,
E: std::error::Error + Send + Sync + 'static,
{
let mut decompressed_bytes_by_pack_offset = BTreeMap::new();
- let decompress_from_resolver = |slice: EntryRange, out: &mut Vec<u8>| -> Result<(data::Entry, u64), Error> {
+ let mut inflate = zlib::Inflate::default();
+ let mut decompress_from_resolver = |slice: EntryRange, out: &mut Vec<u8>| -> Result<(data::Entry, u64), Error> {
let bytes = resolve(slice.clone(), resolve_data).ok_or(Error::ResolveFailed {
pack_offset: slice.start,
})?;
let entry = data::Entry::from_bytes(bytes, slice.start, hash_len);
let compressed = &bytes[entry.header_size()..];
let decompressed_len = entry.decompressed_size as usize;
- decompress_all_at_once_with(compressed, decompressed_len, out)?;
+ decompress_all_at_once_with(&mut inflate, compressed, decompressed_len, out)?;
Ok((entry, slice.end))
};
@@ -103,10 +103,8 @@ where
},
)
.map_err(|err| Box::new(err) as Box<dyn std::error::Error + Send + Sync>)?;
- object_counter.as_ref().map(|c| c.fetch_add(1, Ordering::SeqCst));
- size_counter
- .as_ref()
- .map(|c| c.fetch_add(base_bytes.len(), Ordering::SeqCst));
+ objects.fetch_add(1, Ordering::Relaxed);
+ size.fetch_add(base_bytes.len(), Ordering::Relaxed);
}
for mut child in base.into_child_iter() {
@@ -121,7 +119,7 @@ where
let (result_size, consumed) = data::delta::decode_header_size(&delta_bytes[consumed..]);
header_ofs += consumed;
- set_len(fully_resolved_delta_bytes, result_size as usize);
+ fully_resolved_delta_bytes.resize(result_size as usize, 0);
data::delta::apply(&base_bytes, fully_resolved_delta_bytes, &delta_bytes[header_ofs..]);
// FIXME: this actually invalidates the "pack_offset()" computation, which is not obvious to consumers
@@ -136,7 +134,7 @@ where
} else {
modify_base(
child.data(),
- progress,
+ &progress,
Context {
entry: &child_entry,
entry_end,
@@ -145,10 +143,8 @@ where
},
)
.map_err(|err| Box::new(err) as Box<dyn std::error::Error + Send + Sync>)?;
- object_counter.as_ref().map(|c| c.fetch_add(1, Ordering::SeqCst));
- size_counter
- .as_ref()
- .map(|c| c.fetch_add(base_bytes.len(), Ordering::SeqCst));
+ objects.fetch_add(1, Ordering::Relaxed);
+ size.fetch_add(base_bytes.len(), Ordering::Relaxed);
}
}
@@ -168,9 +164,9 @@ where
return deltas_mt(
initial_threads,
decompressed_bytes_by_pack_offset,
- object_counter,
- size_counter,
- progress,
+ objects,
+ size,
+ &progress,
nodes,
resolve.clone(),
resolve_data,
@@ -190,12 +186,12 @@ where
/// system. Since this thread will take a controlling function, we may spawn one more than that. In threaded mode, we will finish
/// all remaining work.
#[allow(clippy::too_many_arguments)]
-pub(crate) fn deltas_mt<T, F, MBFN, E, R, P>(
+pub(crate) fn deltas_mt<T, F, MBFN, E, R>(
mut threads_to_create: isize,
decompressed_bytes_by_pack_offset: BTreeMap<u64, (data::Entry, u64, Vec<u8>)>,
- object_counter: Option<gix_features::progress::StepShared>,
- size_counter: Option<gix_features::progress::StepShared>,
- progress: &P,
+ objects: gix_features::progress::StepShared,
+ size: gix_features::progress::StepShared,
+ progress: &dyn Progress,
nodes: Vec<(u16, Node<'_, T>)>,
resolve: F,
resolve_data: &R,
@@ -207,9 +203,8 @@ pub(crate) fn deltas_mt<T, F, MBFN, E, R, P>(
where
T: Send,
R: Send + Sync,
- P: Progress,
F: for<'r> Fn(EntryRange, &'r R) -> Option<&'r [u8]> + Send + Clone,
- MBFN: FnMut(&mut T, &P, Context<'_>) -> Result<(), E> + Send + Clone,
+ MBFN: FnMut(&mut T, &dyn Progress, Context<'_>) -> Result<(), E> + Send + Clone,
E: std::error::Error + Send + Sync + 'static,
{
let nodes = gix_features::threading::Mutable::new(nodes);
@@ -229,13 +224,14 @@ where
let decompressed_bytes_by_pack_offset = &decompressed_bytes_by_pack_offset;
let resolve = resolve.clone();
let mut modify_base = modify_base.clone();
- let object_counter = object_counter.as_ref();
- let size_counter = size_counter.as_ref();
+ let objects = &objects;
+ let size = &size;
move || -> Result<(), Error> {
let mut fully_resolved_delta_bytes = Vec::new();
let mut delta_bytes = Vec::new();
- let decompress_from_resolver =
+ let mut inflate = zlib::Inflate::default();
+ let mut decompress_from_resolver =
|slice: EntryRange, out: &mut Vec<u8>| -> Result<(data::Entry, u64), Error> {
let bytes = resolve(slice.clone(), resolve_data).ok_or(Error::ResolveFailed {
pack_offset: slice.start,
@@ -243,7 +239,7 @@ where
let entry = data::Entry::from_bytes(bytes, slice.start, hash_len);
let compressed = &bytes[entry.header_size()..];
let decompressed_len = entry.decompressed_size as usize;
- decompress_all_at_once_with(compressed, decompressed_len, out)?;
+ decompress_all_at_once_with(&mut inflate, compressed, decompressed_len, out)?;
Ok((entry, slice.end))
};
@@ -280,10 +276,8 @@ where
},
)
.map_err(|err| Box::new(err) as Box<dyn std::error::Error + Send + Sync>)?;
- object_counter.as_ref().map(|c| c.fetch_add(1, Ordering::SeqCst));
- size_counter
- .as_ref()
- .map(|c| c.fetch_add(base_bytes.len(), Ordering::SeqCst));
+ objects.fetch_add(1, Ordering::Relaxed);
+ size.fetch_add(base_bytes.len(), Ordering::Relaxed);
}
for mut child in base.into_child_iter() {
@@ -328,10 +322,8 @@ where
},
)
.map_err(|err| Box::new(err) as Box<dyn std::error::Error + Send + Sync>)?;
- object_counter.as_ref().map(|c| c.fetch_add(1, Ordering::SeqCst));
- size_counter
- .as_ref()
- .map(|c| c.fetch_add(base_bytes.len(), Ordering::SeqCst));
+ objects.fetch_add(1, Ordering::Relaxed);
+ size.fetch_add(base_bytes.len(), Ordering::Relaxed);
}
}
}
@@ -357,6 +349,9 @@ where
// but may instead find a good way to set the polling interval instead of hard-coding it.
std::thread::sleep(poll_interval);
// Get out of threads are already starving or they would be starving soon as no work is left.
+ //
+ // Lint: ScopedJoinHandle is not the same depending on active features and is not exposed in some cases.
+ #[allow(clippy::redundant_closure_for_method_calls)]
if threads.iter().any(|t| t.is_finished()) {
let mut running_threads = Vec::new();
for thread in threads.drain(..) {
@@ -389,35 +384,17 @@ where
})
}
-fn set_len(v: &mut Vec<u8>, new_len: usize) {
- if new_len > v.len() {
- v.reserve_exact(new_len.saturating_sub(v.capacity()) + (v.capacity() - v.len()));
- // SAFETY:
- // 1. we have reserved enough capacity to fit `new_len`
- // 2. the caller is trusted to write into `v` to completely fill `new_len`.
- #[allow(unsafe_code, clippy::uninit_vec)]
- unsafe {
- v.set_len(new_len);
- }
- } else {
- v.truncate(new_len)
- }
-}
-
-fn decompress_all_at_once_with(b: &[u8], decompressed_len: usize, out: &mut Vec<u8>) -> Result<(), Error> {
- set_len(out, decompressed_len);
- use std::cell::RefCell;
- thread_local! {
- pub static INFLATE: RefCell<zlib::Inflate> = RefCell::new(zlib::Inflate::default());
- }
-
- INFLATE.with(|inflate| {
- let mut inflate = inflate.borrow_mut();
- inflate.reset();
- inflate.once(b, out).map_err(|err| Error::ZlibInflate {
- source: err,
- message: "Failed to decompress entry",
- })
+fn decompress_all_at_once_with(
+ inflate: &mut zlib::Inflate,
+ b: &[u8],
+ decompressed_len: usize,
+ out: &mut Vec<u8>,
+) -> Result<(), Error> {
+ out.resize(decompressed_len, 0);
+ inflate.reset();
+ inflate.once(b, out).map_err(|err| Error::ZlibInflate {
+ source: err,
+ message: "Failed to decompress entry",
})?;
Ok(())
}
diff --git a/vendor/gix-pack/src/cache/object.rs b/vendor/gix-pack/src/cache/object.rs
index 26896bf89..af1f7d0a4 100644
--- a/vendor/gix-pack/src/cache/object.rs
+++ b/vendor/gix-pack/src/cache/object.rs
@@ -43,7 +43,7 @@ mod memory {
MemoryCappedHashmap {
inner: clru::CLruCache::with_config(
clru::CLruCacheConfig::new(NonZeroUsize::new(memory_cap_in_bytes).expect("non zero"))
- .with_hasher(gix_hashtable::hash::Builder::default())
+ .with_hasher(gix_hashtable::hash::Builder)
.with_scale(CustomScale),
),
free_list: Vec::new(),
diff --git a/vendor/gix-pack/src/data/entry/decode.rs b/vendor/gix-pack/src/data/entry/decode.rs
index 79d7aecff..b81320319 100644
--- a/vendor/gix-pack/src/data/entry/decode.rs
+++ b/vendor/gix-pack/src/data/entry/decode.rs
@@ -47,16 +47,16 @@ impl data::Entry {
/// Instantiate an `Entry` from the reader `r`, providing the `pack_offset` to allow tracking the start of the entry data section.
pub fn from_read(
- mut r: impl io::Read,
+ r: &mut dyn io::Read,
pack_offset: data::Offset,
hash_len: usize,
) -> Result<data::Entry, io::Error> {
- let (type_id, size, mut consumed) = streaming_parse_header_info(&mut r)?;
+ let (type_id, size, mut consumed) = streaming_parse_header_info(r)?;
use crate::data::entry::Header::*;
let object = match type_id {
OFS_DELTA => {
- let (distance, leb_bytes) = leb64_from_read(&mut r)?;
+ let (distance, leb_bytes) = leb64_from_read(r)?;
let delta = OfsDelta {
base_distance: distance,
};
@@ -89,7 +89,7 @@ impl data::Entry {
}
#[inline]
-fn streaming_parse_header_info(mut read: impl io::Read) -> Result<(u8, u64, usize), io::Error> {
+fn streaming_parse_header_info(read: &mut dyn io::Read) -> Result<(u8, u64, usize), io::Error> {
let mut byte = [0u8; 1];
read.read_exact(&mut byte)?;
let mut c = byte[0];
diff --git a/vendor/gix-pack/src/data/entry/header.rs b/vendor/gix-pack/src/data/entry/header.rs
index 4d0dbf4d2..358bd743c 100644
--- a/vendor/gix-pack/src/data/entry/header.rs
+++ b/vendor/gix-pack/src/data/entry/header.rs
@@ -83,7 +83,7 @@ impl Header {
///
/// Returns the amount of bytes written to `out`.
/// `decompressed_size_in_bytes` is the full size in bytes of the object that this header represents
- pub fn write_to(&self, decompressed_size_in_bytes: u64, mut out: impl io::Write) -> io::Result<usize> {
+ pub fn write_to(&self, decompressed_size_in_bytes: u64, out: &mut dyn io::Write) -> io::Result<usize> {
let mut size = decompressed_size_in_bytes;
let mut written = 1;
let mut c: u8 = (self.as_type_id() << 4) | (size as u8 & 0b0000_1111);
@@ -115,7 +115,7 @@ impl Header {
/// The size of the header in bytes when serialized
pub fn size(&self, decompressed_size: u64) -> usize {
- self.write_to(decompressed_size, io::sink())
+ self.write_to(decompressed_size, &mut io::sink())
.expect("io::sink() to never fail")
}
}
diff --git a/vendor/gix-pack/src/data/file/decode/entry.rs b/vendor/gix-pack/src/data/file/decode/entry.rs
index f82e33a7b..d5dd121f8 100644
--- a/vendor/gix-pack/src/data/file/decode/entry.rs
+++ b/vendor/gix-pack/src/data/file/decode/entry.rs
@@ -75,6 +75,7 @@ impl Outcome {
/// Decompression of objects
impl File {
/// Decompress the given `entry` into `out` and return the amount of bytes read from the pack data.
+ /// Note that `inflate` is not reset after usage, but will be reset before using it.
///
/// _Note_ that this method does not resolve deltified objects, but merely decompresses their content
/// `out` is expected to be large enough to hold `entry.size` bytes.
@@ -82,7 +83,12 @@ impl File {
/// # Panics
///
/// If `out` isn't large enough to hold the decompressed `entry`
- pub fn decompress_entry(&self, entry: &data::Entry, out: &mut [u8]) -> Result<usize, Error> {
+ pub fn decompress_entry(
+ &self,
+ entry: &data::Entry,
+ inflate: &mut zlib::Inflate,
+ out: &mut [u8],
+ ) -> Result<usize, Error> {
assert!(
out.len() as u64 >= entry.decompressed_size,
"output buffer isn't large enough to hold decompressed result, want {}, have {}",
@@ -90,7 +96,7 @@ impl File {
out.len()
);
- self.decompress_entry_from_data_offset(entry.data_offset, out)
+ self.decompress_entry_from_data_offset(entry.data_offset, inflate, out)
.map_err(Into::into)
}
@@ -121,53 +127,39 @@ impl File {
pub(crate) fn decompress_entry_from_data_offset(
&self,
data_offset: data::Offset,
+ inflate: &mut zlib::Inflate,
out: &mut [u8],
) -> Result<usize, zlib::inflate::Error> {
let offset: usize = data_offset.try_into().expect("offset representable by machine");
assert!(offset < self.data.len(), "entry offset out of bounds");
- use std::cell::RefCell;
- thread_local! {
- pub static INFLATE: RefCell<zlib::Inflate> = RefCell::new(zlib::Inflate::default());
- }
- INFLATE.with(|inflate| {
- let mut inflate = inflate.borrow_mut();
- let res = inflate
- .once(&self.data[offset..], out)
- .map(|(_status, consumed_in, _consumed_out)| consumed_in);
- inflate.reset();
- res
- })
+ inflate.reset();
+ inflate
+ .once(&self.data[offset..], out)
+ .map(|(_status, consumed_in, _consumed_out)| consumed_in)
}
/// Like `decompress_entry_from_data_offset`, but returns consumed input and output.
pub(crate) fn decompress_entry_from_data_offset_2(
&self,
data_offset: data::Offset,
+ inflate: &mut zlib::Inflate,
out: &mut [u8],
) -> Result<(usize, usize), zlib::inflate::Error> {
let offset: usize = data_offset.try_into().expect("offset representable by machine");
assert!(offset < self.data.len(), "entry offset out of bounds");
- use std::cell::RefCell;
- thread_local! {
- pub static INFLATE: RefCell<zlib::Inflate> = RefCell::new(zlib::Inflate::default());
- }
-
- INFLATE.with(|inflate| {
- let mut inflate = inflate.borrow_mut();
- let res = inflate
- .once(&self.data[offset..], out)
- .map(|(_status, consumed_in, consumed_out)| (consumed_in, consumed_out));
- inflate.reset();
- res
- })
+ inflate.reset();
+ inflate
+ .once(&self.data[offset..], out)
+ .map(|(_status, consumed_in, consumed_out)| (consumed_in, consumed_out))
}
/// Decode an entry, resolving delta's as needed, while growing the `out` vector if there is not enough
/// space to hold the result object.
///
/// The `entry` determines which object to decode, and is commonly obtained with the help of a pack index file or through pack iteration.
+ /// `inflate` will be used for decompressing entries, and will not be reset after usage, but before first using it.
///
/// `resolve` is a function to lookup objects with the given [`ObjectId`][gix_hash::ObjectId], in case the full object id is used to refer to
/// a base object, instead of an in-pack offset.
@@ -178,8 +170,9 @@ impl File {
&self,
entry: data::Entry,
out: &mut Vec<u8>,
- resolve: impl Fn(&gix_hash::oid, &mut Vec<u8>) -> Option<ResolvedBase>,
- delta_cache: &mut impl cache::DecodeEntry,
+ inflate: &mut zlib::Inflate,
+ resolve: &dyn Fn(&gix_hash::oid, &mut Vec<u8>) -> Option<ResolvedBase>,
+ delta_cache: &mut dyn cache::DecodeEntry,
) -> Result<Outcome, Error> {
use crate::data::entry::Header::*;
match entry.header {
@@ -191,15 +184,16 @@ impl File {
.expect("size representable by machine"),
0,
);
- self.decompress_entry(&entry, out.as_mut_slice()).map(|consumed_input| {
- Outcome::from_object_entry(
- entry.header.as_kind().expect("a non-delta entry"),
- &entry,
- consumed_input,
- )
- })
+ self.decompress_entry(&entry, inflate, out.as_mut_slice())
+ .map(|consumed_input| {
+ Outcome::from_object_entry(
+ entry.header.as_kind().expect("a non-delta entry"),
+ &entry,
+ consumed_input,
+ )
+ })
}
- OfsDelta { .. } | RefDelta { .. } => self.resolve_deltas(entry, resolve, out, delta_cache),
+ OfsDelta { .. } | RefDelta { .. } => self.resolve_deltas(entry, resolve, inflate, out, delta_cache),
}
}
@@ -209,9 +203,10 @@ impl File {
fn resolve_deltas(
&self,
last: data::Entry,
- resolve: impl Fn(&gix_hash::oid, &mut Vec<u8>) -> Option<ResolvedBase>,
+ resolve: &dyn Fn(&gix_hash::oid, &mut Vec<u8>) -> Option<ResolvedBase>,
+ inflate: &mut zlib::Inflate,
out: &mut Vec<u8>,
- cache: &mut impl cache::DecodeEntry,
+ cache: &mut dyn cache::DecodeEntry,
) -> Result<Outcome, Error> {
// all deltas, from the one that produces the desired object (first) to the oldest at the end of the chain
let mut chain = SmallVec::<[Delta; 10]>::default();
@@ -297,6 +292,7 @@ impl File {
for (delta_idx, delta) in chain.iter_mut().rev().enumerate() {
let consumed_from_data_offset = self.decompress_entry_from_data_offset(
delta.data_offset,
+ inflate,
&mut instructions[..delta.decompressed_size],
)?;
let is_last_delta_to_be_applied = delta_idx + 1 == chain_len;
@@ -357,7 +353,7 @@ impl File {
let base_entry = cursor;
debug_assert!(!base_entry.header.is_delta());
object_kind = base_entry.header.as_kind();
- self.decompress_entry_from_data_offset(base_entry.data_offset, out)?;
+ self.decompress_entry_from_data_offset(base_entry.data_offset, inflate, out)?;
}
(first_buffer_size, second_buffer_end)
diff --git a/vendor/gix-pack/src/data/file/decode/header.rs b/vendor/gix-pack/src/data/file/decode/header.rs
index 0afd6e52a..3a6e40f8a 100644
--- a/vendor/gix-pack/src/data/file/decode/header.rs
+++ b/vendor/gix-pack/src/data/file/decode/header.rs
@@ -2,6 +2,7 @@ use crate::{
data,
data::{delta, file::decode::Error, File},
};
+use gix_features::zlib;
/// A return value of a resolve function, which given an [`ObjectId`][gix_hash::ObjectId] determines where an object can be found.
#[derive(Debug, PartialEq, Eq, Hash, Ord, PartialOrd, Clone)]
@@ -37,13 +38,15 @@ impl File {
/// Resolve the object header information starting at `entry`, following the chain of entries as needed.
///
/// The `entry` determines which object to decode, and is commonly obtained with the help of a pack index file or through pack iteration.
+ /// `inflate` will be used for (partially) decompressing entries, and will be reset before first use, but not after the last use.
///
/// `resolve` is a function to lookup objects with the given [`ObjectId`][gix_hash::ObjectId], in case the full object id
/// is used to refer to a base object, instead of an in-pack offset.
pub fn decode_header(
&self,
mut entry: data::Entry,
- resolve: impl Fn(&gix_hash::oid) -> Option<ResolvedBase>,
+ inflate: &mut zlib::Inflate,
+ resolve: &dyn Fn(&gix_hash::oid) -> Option<ResolvedBase>,
) -> Result<Outcome, Error> {
use crate::data::entry::Header::*;
let mut num_deltas = 0;
@@ -60,14 +63,14 @@ impl File {
OfsDelta { base_distance } => {
num_deltas += 1;
if first_delta_decompressed_size.is_none() {
- first_delta_decompressed_size = Some(self.decode_delta_object_size(&entry)?);
+ first_delta_decompressed_size = Some(self.decode_delta_object_size(inflate, &entry)?);
}
entry = self.entry(entry.base_pack_offset(base_distance))
}
RefDelta { base_id } => {
num_deltas += 1;
if first_delta_decompressed_size.is_none() {
- first_delta_decompressed_size = Some(self.decode_delta_object_size(&entry)?);
+ first_delta_decompressed_size = Some(self.decode_delta_object_size(inflate, &entry)?);
}
match resolve(base_id.as_ref()) {
Some(ResolvedBase::InPack(base_entry)) => entry = base_entry,
@@ -89,9 +92,11 @@ impl File {
}
#[inline]
- fn decode_delta_object_size(&self, entry: &data::Entry) -> Result<u64, Error> {
+ fn decode_delta_object_size(&self, inflate: &mut zlib::Inflate, entry: &data::Entry) -> Result<u64, Error> {
let mut buf = [0_u8; 32];
- let used = self.decompress_entry_from_data_offset_2(entry.data_offset, &mut buf)?.1;
+ let used = self
+ .decompress_entry_from_data_offset_2(entry.data_offset, inflate, &mut buf)?
+ .1;
let buf = &buf[..used];
let (_base_size, offset) = delta::decode_header_size(buf);
let (result_size, _offset) = delta::decode_header_size(&buf[offset..]);
diff --git a/vendor/gix-pack/src/data/file/verify.rs b/vendor/gix-pack/src/data/file/verify.rs
index afec20826..11cec041d 100644
--- a/vendor/gix-pack/src/data/file/verify.rs
+++ b/vendor/gix-pack/src/data/file/verify.rs
@@ -1,6 +1,5 @@
-use std::sync::atomic::AtomicBool;
-
use gix_features::progress::Progress;
+use std::sync::atomic::AtomicBool;
use crate::data::File;
@@ -27,7 +26,7 @@ impl File {
/// even more thorough integrity check.
pub fn verify_checksum(
&self,
- progress: impl Progress,
+ progress: &mut dyn Progress,
should_interrupt: &AtomicBool,
) -> Result<gix_hash::ObjectId, checksum::Error> {
crate::verify::checksum_on_disk_or_mmap(
diff --git a/vendor/gix-pack/src/data/input/bytes_to_entries.rs b/vendor/gix-pack/src/data/input/bytes_to_entries.rs
index 995c8df2c..7450e9134 100644
--- a/vendor/gix-pack/src/data/input/bytes_to_entries.rs
+++ b/vendor/gix-pack/src/data/input/bytes_to_entries.rs
@@ -1,10 +1,6 @@
use std::{fs, io};
-use gix_features::{
- hash,
- hash::Sha1,
- zlib::{stream::inflate::ReadBoxed, Decompress},
-};
+use gix_features::{hash::Sha1, zlib::Decompress};
use gix_hash::ObjectId;
use crate::data::input;
@@ -14,7 +10,7 @@ use crate::data::input;
/// The iterator used as part of [`Bundle::write_to_directory(…)`][crate::Bundle::write_to_directory()].
pub struct BytesToEntriesIter<BR> {
read: BR,
- decompressor: Option<Box<Decompress>>,
+ decompressor: Decompress,
offset: u64,
had_error: bool,
version: crate::data::Version,
@@ -66,7 +62,7 @@ where
);
Ok(BytesToEntriesIter {
read,
- decompressor: None,
+ decompressor: Decompress::new(true),
compressed,
offset: 12,
had_error: false,
@@ -88,31 +84,25 @@ where
self.objects_left -= 1; // even an error counts as objects
// Read header
- let entry = match self.hash.take() {
+ let entry = match self.hash.as_mut() {
Some(hash) => {
let mut read = read_and_pass_to(
&mut self.read,
- hash::Write {
+ HashWrite {
inner: io::sink(),
hash,
},
);
- let res = crate::data::Entry::from_read(&mut read, self.offset, self.hash_len);
- self.hash = Some(read.write.hash);
- res
+ crate::data::Entry::from_read(&mut read, self.offset, self.hash_len)
}
None => crate::data::Entry::from_read(&mut self.read, self.offset, self.hash_len),
}
.map_err(input::Error::from)?;
// Decompress object to learn its compressed bytes
- let mut decompressor = self
- .decompressor
- .take()
- .unwrap_or_else(|| Box::new(Decompress::new(true)));
let compressed_buf = self.compressed_buf.take().unwrap_or_else(|| Vec::with_capacity(4096));
- decompressor.reset(true);
- let mut decompressed_reader = ReadBoxed {
+ self.decompressor.reset(true);
+ let mut decompressed_reader = DecompressRead {
inner: read_and_pass_to(
&mut self.read,
if self.compressed.keep() {
@@ -121,7 +111,7 @@ where
compressed_buf
},
),
- decompressor,
+ decompressor: &mut self.decompressor,
};
let bytes_copied = io::copy(&mut decompressed_reader, &mut io::sink())?;
@@ -135,7 +125,6 @@ where
let pack_offset = self.offset;
let compressed_size = decompressed_reader.decompressor.total_in();
self.offset += entry.header_size() as u64 + compressed_size;
- self.decompressor = Some(decompressed_reader.decompressor);
let mut compressed = decompressed_reader.inner.write;
debug_assert_eq!(
@@ -149,7 +138,7 @@ where
let crc32 = if self.compressed.crc32() {
let mut header_buf = [0u8; 12 + gix_hash::Kind::longest().len_in_bytes()];
- let header_len = entry.header.write_to(bytes_copied, header_buf.as_mut())?;
+ let header_len = entry.header.write_to(bytes_copied, &mut header_buf.as_mut())?;
let state = gix_features::hash::crc32_update(0, &header_buf[..header_len]);
Some(gix_features::hash::crc32_update(state, &compressed))
} else {
@@ -293,3 +282,43 @@ impl crate::data::File {
)
}
}
+
+/// The boxed variant is faster for what we do (moving the decompressor in and out a lot)
+pub struct DecompressRead<'a, R> {
+ /// The reader from which bytes should be decompressed.
+ pub inner: R,
+ /// The decompressor doing all the work.
+ pub decompressor: &'a mut Decompress,
+}
+
+impl<'a, R> io::Read for DecompressRead<'a, R>
+where
+ R: io::BufRead,
+{
+ fn read(&mut self, into: &mut [u8]) -> io::Result<usize> {
+ gix_features::zlib::stream::inflate::read(&mut self.inner, self.decompressor, into)
+ }
+}
+
+/// A utility to automatically generate a hash while writing into an inner writer.
+pub struct HashWrite<'a, T> {
+ /// The hash implementation.
+ pub hash: &'a mut Sha1,
+ /// The inner writer.
+ pub inner: T,
+}
+
+impl<'a, T> std::io::Write for HashWrite<'a, T>
+where
+ T: std::io::Write,
+{
+ fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
+ let written = self.inner.write(buf)?;
+ self.hash.update(&buf[..written]);
+ Ok(written)
+ }
+
+ fn flush(&mut self) -> std::io::Result<()> {
+ self.inner.flush()
+ }
+}
diff --git a/vendor/gix-pack/src/data/input/entries_to_bytes.rs b/vendor/gix-pack/src/data/input/entries_to_bytes.rs
index a8c21e653..27cd04648 100644
--- a/vendor/gix-pack/src/data/input/entries_to_bytes.rs
+++ b/vendor/gix-pack/src/data/input/entries_to_bytes.rs
@@ -73,12 +73,11 @@ where
}
self.num_entries += 1;
entry.header.write_to(entry.decompressed_size, &mut self.output)?;
- std::io::copy(
- &mut entry
+ self.output.write_all(
+ entry
.compressed
.as_deref()
.expect("caller must configure generator to keep compressed bytes"),
- &mut self.output,
)?;
Ok(entry)
}
diff --git a/vendor/gix-pack/src/data/input/entry.rs b/vendor/gix-pack/src/data/input/entry.rs
index 74d4800a0..7d3d9b3cb 100644
--- a/vendor/gix-pack/src/data/input/entry.rs
+++ b/vendor/gix-pack/src/data/input/entry.rs
@@ -33,7 +33,7 @@ impl input::Entry {
let mut header_buf = [0u8; 12 + gix_hash::Kind::longest().len_in_bytes()];
let header_len = self
.header
- .write_to(self.decompressed_size, header_buf.as_mut())
+ .write_to(self.decompressed_size, &mut header_buf.as_mut())
.expect("write to memory will not fail");
let state = gix_features::hash::crc32_update(0, &header_buf[..header_len]);
gix_features::hash::crc32_update(state, self.compressed.as_ref().expect("we always set it"))
diff --git a/vendor/gix-pack/src/data/input/lookup_ref_delta_objects.rs b/vendor/gix-pack/src/data/input/lookup_ref_delta_objects.rs
index d95e6176d..60f686d3a 100644
--- a/vendor/gix-pack/src/data/input/lookup_ref_delta_objects.rs
+++ b/vendor/gix-pack/src/data/input/lookup_ref_delta_objects.rs
@@ -47,13 +47,7 @@ where
/// positive `size_change` values mean an object grew or was more commonly, was inserted. Negative values
/// mean the object shrunk, usually because there header changed from ref-deltas to ofs deltas.
- fn track_change(
- &mut self,
- shifted_pack_offset: u64,
- pack_offset: u64,
- size_change: i64,
- oid: impl Into<Option<ObjectId>>,
- ) {
+ fn track_change(&mut self, shifted_pack_offset: u64, pack_offset: u64, size_change: i64, oid: Option<ObjectId>) {
if size_change == 0 {
return;
}
@@ -61,7 +55,7 @@ where
shifted_pack_offset,
pack_offset,
size_change_in_bytes: size_change,
- oid: oid.into().unwrap_or_else(||
+ oid: oid.unwrap_or_else(||
// NOTE: this value acts as sentinel and the actual hash kind doesn't matter.
gix_hash::Kind::Sha1.null()),
});
@@ -112,7 +106,7 @@ where
entry.pack_offset,
current_pack_offset,
entry.bytes_in_pack() as i64,
- base_id,
+ Some(base_id),
);
entry
}
diff --git a/vendor/gix-pack/src/data/mod.rs b/vendor/gix-pack/src/data/mod.rs
index 36e01d779..9808ae853 100644
--- a/vendor/gix-pack/src/data/mod.rs
+++ b/vendor/gix-pack/src/data/mod.rs
@@ -37,9 +37,11 @@ pub mod init {
pub mod entry;
///
+#[cfg(feature = "streaming-input")]
pub mod input;
/// Utilities to encode pack data entries and write them to a `Write` implementation to resemble a pack data file.
+#[cfg(feature = "generate")]
pub mod output;
/// A slice into a pack file denoting a pack entry.
diff --git a/vendor/gix-pack/src/data/output/count/mod.rs b/vendor/gix-pack/src/data/output/count/mod.rs
index 0c33abd97..481ff65d3 100644
--- a/vendor/gix-pack/src/data/output/count/mod.rs
+++ b/vendor/gix-pack/src/data/output/count/mod.rs
@@ -45,5 +45,5 @@ pub use objects_impl::{objects, objects_unthreaded};
///
pub mod objects {
- pub use super::objects_impl::{Error, ObjectExpansion, Options, Outcome, Result};
+ pub use super::objects_impl::{Error, ObjectExpansion, Options, Outcome};
}
diff --git a/vendor/gix-pack/src/data/output/count/objects/mod.rs b/vendor/gix-pack/src/data/output/count/objects/mod.rs
index a13e41146..24810577c 100644
--- a/vendor/gix-pack/src/data/output/count/objects/mod.rs
+++ b/vendor/gix-pack/src/data/output/count/objects/mod.rs
@@ -1,12 +1,9 @@
-use std::{
- cell::RefCell,
- sync::{atomic::AtomicBool, Arc},
-};
+use std::{cell::RefCell, sync::atomic::AtomicBool};
-use gix_features::{parallel, progress::Progress};
+use gix_features::parallel;
use gix_hash::ObjectId;
-use crate::{data::output, find};
+use crate::data::output;
pub(in crate::data::output::count::objects_impl) mod reduce;
mod util;
@@ -16,9 +13,6 @@ pub use types::{Error, ObjectExpansion, Options, Outcome};
mod tree;
-/// The return type used by [`objects()`].
-pub type Result<E1, E2> = std::result::Result<(Vec<output::Count>, Outcome), Error<E1, E2>>;
-
/// Generate [`Count`][output::Count]s from input `objects` with object expansion based on [`options`][Options]
/// to learn which objects would would constitute a pack. This step is required to know exactly how many objects would
/// be in a pack while keeping data around to avoid minimize object database access.
@@ -29,29 +23,25 @@ pub type Result<E1, E2> = std::result::Result<(Vec<output::Count>, Outcome), Err
/// * `objects_ids`
/// * A list of objects ids to add to the pack. Duplication checks are performed so no object is ever added to a pack twice.
/// * Objects may be expanded based on the provided [`options`][Options]
-/// * `progress`
-/// * a way to obtain progress information
+/// * `objects`
+/// * count the amount of objects we encounter
/// * `should_interrupt`
/// * A flag that is set to true if the operation should stop
/// * `options`
/// * more configuration
-pub fn objects<Find, Iter, IterErr, Oid>(
+pub fn objects<Find>(
db: Find,
- objects_ids: Iter,
- progress: impl Progress,
+ objects_ids: Box<dyn Iterator<Item = Result<ObjectId, Box<dyn std::error::Error + Send + Sync + 'static>>> + Send>,
+ objects: &dyn gix_features::progress::Count,
should_interrupt: &AtomicBool,
Options {
thread_limit,
input_object_expansion,
chunk_size,
}: Options,
-) -> Result<find::existing::Error<Find::Error>, IterErr>
+) -> Result<(Vec<output::Count>, Outcome), Error>
where
Find: crate::Find + Send + Clone,
- <Find as crate::Find>::Error: Send,
- Iter: Iterator<Item = std::result::Result<Oid, IterErr>> + Send,
- Oid: Into<ObjectId> + Send,
- IterErr: std::error::Error + Send,
{
let lower_bound = objects_ids.size_hint().0;
let (chunk_size, thread_limit, _) = parallel::optimize_chunk_size_and_thread_limit(
@@ -65,71 +55,59 @@ where
size: chunk_size,
};
let seen_objs = gix_hashtable::sync::ObjectIdMap::default();
- let progress = Arc::new(parking_lot::Mutex::new(progress));
+ let objects = objects.counter();
parallel::in_parallel(
chunks,
thread_limit,
{
- let progress = Arc::clone(&progress);
- move |n| {
+ move |_| {
(
Vec::new(), // object data buffer
Vec::new(), // object data buffer 2 to hold two objects at a time
- {
- let mut p = progress
- .lock()
- .add_child_with_id(format!("thread {n}"), gix_features::progress::UNKNOWN);
- p.init(None, gix_features::progress::count("objects"));
- p
- },
+ objects.clone(),
)
}
},
{
let seen_objs = &seen_objs;
- move |oids: Vec<std::result::Result<Oid, IterErr>>, (buf1, buf2, progress)| {
+ move |oids: Vec<_>, (buf1, buf2, objects)| {
expand::this(
&db,
input_object_expansion,
seen_objs,
- oids,
+ &mut oids.into_iter(),
buf1,
buf2,
- progress,
+ objects,
should_interrupt,
true, /*allow pack lookups*/
)
}
},
- reduce::Statistics::new(progress),
+ reduce::Statistics::new(),
)
}
/// Like [`objects()`] but using a single thread only to mostly save on the otherwise required overhead.
-pub fn objects_unthreaded<Find, IterErr, Oid>(
- db: Find,
- object_ids: impl Iterator<Item = std::result::Result<Oid, IterErr>>,
- mut progress: impl Progress,
+pub fn objects_unthreaded(
+ db: &dyn crate::Find,
+ object_ids: &mut dyn Iterator<Item = Result<ObjectId, Box<dyn std::error::Error + Send + Sync + 'static>>>,
+ objects: &dyn gix_features::progress::Count,
should_interrupt: &AtomicBool,
input_object_expansion: ObjectExpansion,
-) -> Result<find::existing::Error<Find::Error>, IterErr>
-where
- Find: crate::Find,
- Oid: Into<ObjectId>,
- IterErr: std::error::Error,
-{
+) -> Result<(Vec<output::Count>, Outcome), Error> {
let seen_objs = RefCell::new(gix_hashtable::HashSet::default());
let (mut buf1, mut buf2) = (Vec::new(), Vec::new());
expand::this(
- &db,
+ db,
input_object_expansion,
&seen_objs,
object_ids,
&mut buf1,
&mut buf2,
- &mut progress,
+ &objects.counter(),
should_interrupt,
false, /*allow pack lookups*/
)
@@ -138,7 +116,6 @@ where
mod expand {
use std::sync::atomic::{AtomicBool, Ordering};
- use gix_features::progress::Progress;
use gix_hash::{oid, ObjectId};
use gix_object::{CommitRefIter, TagRefIter};
@@ -149,26 +126,21 @@ mod expand {
};
use crate::{
data::{output, output::count::PackLocation},
- find, FindExt,
+ FindExt,
};
#[allow(clippy::too_many_arguments)]
- pub fn this<Find, IterErr, Oid>(
- db: &Find,
+ pub fn this(
+ db: &dyn crate::Find,
input_object_expansion: ObjectExpansion,
seen_objs: &impl util::InsertImmutable,
- oids: impl IntoIterator<Item = std::result::Result<Oid, IterErr>>,
+ oids: &mut dyn Iterator<Item = Result<ObjectId, Box<dyn std::error::Error + Send + Sync + 'static>>>,
buf1: &mut Vec<u8>,
#[allow(clippy::ptr_arg)] buf2: &mut Vec<u8>,
- progress: &mut impl Progress,
+ objects: &gix_features::progress::AtomicStep,
should_interrupt: &AtomicBool,
allow_pack_lookups: bool,
- ) -> super::Result<find::existing::Error<Find::Error>, IterErr>
- where
- Find: crate::Find,
- Oid: Into<ObjectId>,
- IterErr: std::error::Error,
- {
+ ) -> Result<(Vec<output::Count>, Outcome), Error> {
use ObjectExpansion::*;
let mut out = Vec::new();
@@ -180,13 +152,13 @@ mod expand {
let mut outcome = Outcome::default();
let stats = &mut outcome;
- for id in oids.into_iter() {
+ for id in oids {
if should_interrupt.load(Ordering::Relaxed) {
return Err(Error::Interrupted);
}
- let id = id.map(|oid| oid.into()).map_err(Error::InputIteration)?;
- let (obj, location) = db.find(id, buf1)?;
+ let id = id.map_err(Error::InputIteration)?;
+ let (obj, location) = db.find(&id, buf1)?;
stats.input_objects += 1;
match input_object_expansion {
TreeAdditionsComparedToAncestor => {
@@ -196,14 +168,14 @@ mod expand {
let mut id = id.to_owned();
loop {
- push_obj_count_unique(&mut out, seen_objs, &id, location, progress, stats, false);
+ push_obj_count_unique(&mut out, seen_objs, &id, location, objects, stats, false);
match obj.kind {
Tree | Blob => break,
Tag => {
id = TagRefIter::from_bytes(obj.data)
.target_id()
.expect("every tag has a target");
- let tmp = db.find(id, buf1)?;
+ let tmp = db.find(&id, buf1)?;
obj = tmp.0;
location = tmp.1;
@@ -225,14 +197,14 @@ mod expand {
Err(err) => return Err(Error::CommitDecode(err)),
}
}
- let (obj, location) = db.find(tree_id, buf1)?;
+ let (obj, location) = db.find(&tree_id, buf1)?;
push_obj_count_unique(
- &mut out, seen_objs, &tree_id, location, progress, stats, true,
+ &mut out, seen_objs, &tree_id, location, objects, stats, true,
);
gix_object::TreeRefIter::from_bytes(obj.data)
};
- let objects = if parent_commit_ids.is_empty() {
+ let objects_ref = if parent_commit_ids.is_empty() {
traverse_delegate.clear();
gix_traverse::tree::breadthfirst(
current_tree_iter,
@@ -241,7 +213,7 @@ mod expand {
stats.decoded_objects += 1;
match db.find(oid, buf).ok() {
Some((obj, location)) => {
- progress.inc();
+ objects.fetch_add(1, Ordering::Relaxed);
stats.expanded_objects += 1;
out.push(output::Count::from_data(oid, location));
obj.try_into_tree_iter()
@@ -259,20 +231,20 @@ mod expand {
let (parent_commit_obj, location) = db.find(commit_id, buf2)?;
push_obj_count_unique(
- &mut out, seen_objs, commit_id, location, progress, stats, true,
+ &mut out, seen_objs, commit_id, location, objects, stats, true,
);
CommitRefIter::from_bytes(parent_commit_obj.data)
.tree_id()
.expect("every commit has a tree")
};
let parent_tree = {
- let (parent_tree_obj, location) = db.find(parent_tree_id, buf2)?;
+ let (parent_tree_obj, location) = db.find(&parent_tree_id, buf2)?;
push_obj_count_unique(
&mut out,
seen_objs,
&parent_tree_id,
location,
- progress,
+ objects,
stats,
true,
);
@@ -294,8 +266,8 @@ mod expand {
}
&changes_delegate.objects
};
- for id in objects.iter() {
- out.push(id_to_count(db, buf2, id, progress, stats, allow_pack_lookups));
+ for id in objects_ref.iter() {
+ out.push(id_to_count(db, buf2, id, objects, stats, allow_pack_lookups));
}
break;
}
@@ -307,7 +279,7 @@ mod expand {
let mut id = id;
let mut obj = (obj, location);
loop {
- push_obj_count_unique(&mut out, seen_objs, &id, obj.1.clone(), progress, stats, false);
+ push_obj_count_unique(&mut out, seen_objs, &id, obj.1.clone(), objects, stats, false);
match obj.0.kind {
Tree => {
traverse_delegate.clear();
@@ -318,7 +290,7 @@ mod expand {
stats.decoded_objects += 1;
match db.find(oid, buf).ok() {
Some((obj, location)) => {
- progress.inc();
+ objects.fetch_add(1, Ordering::Relaxed);
stats.expanded_objects += 1;
out.push(output::Count::from_data(oid, location));
obj.try_into_tree_iter()
@@ -330,7 +302,7 @@ mod expand {
)
.map_err(Error::TreeTraverse)?;
for id in &traverse_delegate.non_trees {
- out.push(id_to_count(db, buf1, id, progress, stats, allow_pack_lookups));
+ out.push(id_to_count(db, buf1, id, objects, stats, allow_pack_lookups));
}
break;
}
@@ -339,7 +311,7 @@ mod expand {
.tree_id()
.expect("every commit has a tree");
stats.expanded_objects += 1;
- obj = db.find(id, buf1)?;
+ obj = db.find(&id, buf1)?;
continue;
}
Blob => break,
@@ -348,13 +320,13 @@ mod expand {
.target_id()
.expect("every tag has a target");
stats.expanded_objects += 1;
- obj = db.find(id, buf1)?;
+ obj = db.find(&id, buf1)?;
continue;
}
}
}
}
- AsIs => push_obj_count_unique(&mut out, seen_objs, &id, location, progress, stats, false),
+ AsIs => push_obj_count_unique(&mut out, seen_objs, &id, location, objects, stats, false),
}
}
outcome.total_objects = out.len();
@@ -367,13 +339,13 @@ mod expand {
all_seen: &impl util::InsertImmutable,
id: &oid,
location: Option<crate::data::entry::Location>,
- progress: &mut impl Progress,
+ objects: &gix_features::progress::AtomicStep,
statistics: &mut Outcome,
count_expanded: bool,
) {
let inserted = all_seen.insert(id.to_owned());
if inserted {
- progress.inc();
+ objects.fetch_add(1, Ordering::Relaxed);
statistics.decoded_objects += 1;
if count_expanded {
statistics.expanded_objects += 1;
@@ -383,15 +355,15 @@ mod expand {
}
#[inline]
- fn id_to_count<Find: crate::Find>(
- db: &Find,
+ fn id_to_count(
+ db: &dyn crate::Find,
buf: &mut Vec<u8>,
id: &oid,
- progress: &mut impl Progress,
+ objects: &gix_features::progress::AtomicStep,
statistics: &mut Outcome,
allow_pack_lookups: bool,
) -> output::Count {
- progress.inc();
+ objects.fetch_add(1, Ordering::Relaxed);
statistics.expanded_objects += 1;
output::Count {
id: id.to_owned(),
diff --git a/vendor/gix-pack/src/data/output/count/objects/reduce.rs b/vendor/gix-pack/src/data/output/count/objects/reduce.rs
index c6a61d467..03144b60f 100644
--- a/vendor/gix-pack/src/data/output/count/objects/reduce.rs
+++ b/vendor/gix-pack/src/data/output/count/objects/reduce.rs
@@ -1,35 +1,27 @@
-use std::{marker::PhantomData, sync::Arc};
+use std::marker::PhantomData;
-use gix_features::{parallel, progress::Progress};
+use gix_features::parallel;
use super::Outcome;
use crate::data::output;
-pub struct Statistics<E, P> {
+pub struct Statistics<E> {
total: Outcome,
counts: Vec<output::Count>,
- progress: Arc<parking_lot::Mutex<P>>,
_err: PhantomData<E>,
}
-impl<E, P> Statistics<E, P>
-where
- P: Progress,
-{
- pub fn new(progress: Arc<parking_lot::Mutex<P>>) -> Self {
+impl<E> Statistics<E> {
+ pub fn new() -> Self {
Statistics {
total: Default::default(),
counts: Default::default(),
- progress,
- _err: PhantomData::default(),
+ _err: PhantomData,
}
}
}
-impl<E, P> parallel::Reduce for Statistics<E, P>
-where
- P: Progress,
-{
+impl<E> parallel::Reduce for Statistics<E> {
type Input = Result<(Vec<output::Count>, Outcome), E>;
type FeedProduce = ();
type Output = (Vec<output::Count>, Outcome);
@@ -38,7 +30,6 @@ where
fn feed(&mut self, item: Self::Input) -> Result<Self::FeedProduce, Self::Error> {
let (counts, stats) = item?;
self.total.aggregate(stats);
- self.progress.lock().inc_by(counts.len());
self.counts.extend(counts);
Ok(())
}
diff --git a/vendor/gix-pack/src/data/output/count/objects/types.rs b/vendor/gix-pack/src/data/output/count/objects/types.rs
index f39a24ee4..4b9ecea20 100644
--- a/vendor/gix-pack/src/data/output/count/objects/types.rs
+++ b/vendor/gix-pack/src/data/output/count/objects/types.rs
@@ -80,17 +80,13 @@ impl Default for Options {
/// The error returned by the pack generation iterator [`bytes::FromEntriesIter`][crate::data::output::bytes::FromEntriesIter].
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
-pub enum Error<FindErr, IterErr>
-where
- FindErr: std::error::Error + 'static,
- IterErr: std::error::Error + 'static,
-{
+pub enum Error {
#[error(transparent)]
CommitDecode(gix_object::decode::Error),
#[error(transparent)]
- FindExisting(#[from] FindErr),
+ FindExisting(#[from] crate::find::existing::Error),
#[error(transparent)]
- InputIteration(IterErr),
+ InputIteration(Box<dyn std::error::Error + Send + Sync + 'static>),
#[error(transparent)]
TreeTraverse(gix_traverse::tree::breadthfirst::Error),
#[error(transparent)]
diff --git a/vendor/gix-pack/src/data/output/entry/iter_from_counts.rs b/vendor/gix-pack/src/data/output/entry/iter_from_counts.rs
index dbe8b0b95..2bebf5b20 100644
--- a/vendor/gix-pack/src/data/output/entry/iter_from_counts.rs
+++ b/vendor/gix-pack/src/data/output/entry/iter_from_counts.rs
@@ -1,6 +1,7 @@
pub(crate) mod function {
use std::{cmp::Ordering, sync::Arc};
+ use gix_features::progress::prodash::{Count, DynNestedProgress};
use gix_features::{parallel, parallel::SequenceId, progress::Progress};
use super::{reduce, util, Error, Mode, Options, Outcome, ProgressId};
@@ -38,7 +39,7 @@ pub(crate) mod function {
pub fn iter_from_counts<Find>(
mut counts: Vec<output::Count>,
db: Find,
- mut progress: impl Progress + 'static,
+ mut progress: Box<dyn DynNestedProgress + 'static>,
Options {
version,
mode,
@@ -46,11 +47,10 @@ pub(crate) mod function {
thread_limit,
chunk_size,
}: Options,
- ) -> impl Iterator<Item = Result<(SequenceId, Vec<output::Entry>), Error<Find::Error>>>
- + parallel::reduce::Finalize<Reduce = reduce::Statistics<Error<Find::Error>>>
+ ) -> impl Iterator<Item = Result<(SequenceId, Vec<output::Entry>), Error>>
+ + parallel::reduce::Finalize<Reduce = reduce::Statistics<Error>>
where
Find: crate::Find + Send + Clone + 'static,
- <Find as crate::Find>::Error: Send,
{
assert!(
matches!(version, crate::data::Version::V2),
@@ -60,7 +60,7 @@ pub(crate) mod function {
parallel::optimize_chunk_size_and_thread_limit(chunk_size, Some(counts.len()), thread_limit, None);
{
let progress = Arc::new(parking_lot::Mutex::new(
- progress.add_child_with_id("resolving", ProgressId::ResolveCounts.into()),
+ progress.add_child_with_id("resolving".into(), ProgressId::ResolveCounts.into()),
));
progress.lock().init(None, gix_features::progress::count("counts"));
let enough_counts_present = counts.len() > 4_000;
@@ -79,7 +79,7 @@ pub(crate) mod function {
use crate::data::output::count::PackLocation::*;
match count.entry_pack_location {
LookedUp(_) => continue,
- NotLookedUp => count.entry_pack_location = LookedUp(db.location_by_oid(count.id, buf)),
+ NotLookedUp => count.entry_pack_location = LookedUp(db.location_by_oid(&count.id, buf)),
}
}
progress.lock().inc_by(chunk_size);
@@ -93,7 +93,7 @@ pub(crate) mod function {
}
let counts_range_by_pack_id = match mode {
Mode::PackCopyAndBaseObjects => {
- let mut progress = progress.add_child_with_id("sorting", ProgressId::SortEntries.into());
+ let mut progress = progress.add_child_with_id("sorting".into(), ProgressId::SortEntries.into());
progress.init(Some(counts.len()), gix_features::progress::count("counts"));
let start = std::time::Instant::now();
@@ -204,7 +204,7 @@ pub(crate) mod function {
stats.objects_copied_from_pack += 1;
entry
}
- None => match db.try_find(count.id, buf).map_err(Error::FindExisting)? {
+ None => match db.try_find(&count.id, buf).map_err(Error::FindExisting)? {
Some((obj, _location)) => {
stats.decoded_and_recompressed_objects += 1;
output::Entry::from_data(count, &obj)
@@ -216,7 +216,7 @@ pub(crate) mod function {
},
}
}
- None => match db.try_find(count.id, buf).map_err(Error::FindExisting)? {
+ None => match db.try_find(&count.id, buf).map_err(Error::FindExisting)? {
Some((obj, _location)) => {
stats.decoded_and_recompressed_objects += 1;
output::Entry::from_data(count, &obj)
@@ -288,7 +288,7 @@ mod reduce {
fn default() -> Self {
Statistics {
total: Default::default(),
- _err: PhantomData::default(),
+ _err: PhantomData,
}
}
}
@@ -395,12 +395,9 @@ mod types {
/// The error returned by the pack generation function [`iter_from_counts()`][crate::data::output::entry::iter_from_counts()].
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
- pub enum Error<FindErr>
- where
- FindErr: std::error::Error + 'static,
- {
+ pub enum Error {
#[error(transparent)]
- FindExisting(FindErr),
+ FindExisting(crate::find::Error),
#[error(transparent)]
NewEntry(#[from] entry::Error),
}
diff --git a/vendor/gix-pack/src/data/output/entry/mod.rs b/vendor/gix-pack/src/data/output/entry/mod.rs
index a94720047..4ab4879eb 100644
--- a/vendor/gix-pack/src/data/output/entry/mod.rs
+++ b/vendor/gix-pack/src/data/output/entry/mod.rs
@@ -66,15 +66,14 @@ impl output::Entry {
potential_bases: &[output::Count],
bases_index_offset: usize,
pack_offset_to_oid: Option<impl FnMut(u32, u64) -> Option<ObjectId>>,
- target_version: crate::data::Version,
+ target_version: data::Version,
) -> Option<Result<Self, Error>> {
if entry.version != target_version {
return None;
};
let pack_offset_must_be_zero = 0;
- let pack_entry =
- crate::data::Entry::from_bytes(&entry.data, pack_offset_must_be_zero, count.id.as_slice().len());
+ let pack_entry = data::Entry::from_bytes(&entry.data, pack_offset_must_be_zero, count.id.as_slice().len());
use crate::data::entry::Header::*;
match pack_entry.header {
@@ -153,9 +152,9 @@ impl output::Entry {
/// This information is known to the one calling the method.
pub fn to_entry_header(
&self,
- version: crate::data::Version,
+ version: data::Version,
index_to_base_distance: impl FnOnce(usize) -> u64,
- ) -> crate::data::entry::Header {
+ ) -> data::entry::Header {
assert!(
matches!(version, data::Version::V2),
"we can only write V2 pack entries for now"
diff --git a/vendor/gix-pack/src/find.rs b/vendor/gix-pack/src/find.rs
index 2908669a2..b049d4d78 100644
--- a/vendor/gix-pack/src/find.rs
+++ b/vendor/gix-pack/src/find.rs
@@ -1,13 +1,16 @@
+/// The error returned by methods of the [Find](crate::Find) trait.
+pub type Error = Box<dyn std::error::Error + Send + Sync + 'static>;
+
///
pub mod existing {
use gix_hash::ObjectId;
- /// The error returned by the [`find(…)`][crate::FindExt::find()] trait methods.
+ /// The error returned by the [`find(…)`](crate::FindExt::find()) trait methods.
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
- pub enum Error<T: std::error::Error + 'static> {
+ pub enum Error {
#[error(transparent)]
- Find(T),
+ Find(crate::find::Error),
#[error("An object with id {} could not be found", .oid)]
NotFound { oid: ObjectId },
}
@@ -17,12 +20,12 @@ pub mod existing {
pub mod existing_object {
use gix_hash::ObjectId;
- /// The error returned by the various [`find_*`][crate::FindExt::find_commit()] trait methods.
+ /// The error returned by the various [`find_*`](crate::FindExt::find_commit()) trait methods.
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
- pub enum Error<T: std::error::Error + 'static> {
+ pub enum Error {
#[error(transparent)]
- Find(T),
+ Find(crate::find::Error),
#[error(transparent)]
Decode(gix_object::decode::Error),
#[error("An object with id {} could not be found", .oid)]
@@ -36,12 +39,12 @@ pub mod existing_object {
pub mod existing_iter {
use gix_hash::ObjectId;
- /// The error returned by the various [`find_*`][crate::FindExt::find_commit()] trait methods.
+ /// The error returned by the various [`find_*`](crate::FindExt::find_commit()) trait methods.
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
- pub enum Error<T: std::error::Error + 'static> {
+ pub enum Error {
#[error(transparent)]
- Find(T),
+ Find(crate::find::Error),
#[error("An object with id {} could not be found", .oid)]
NotFound { oid: ObjectId },
#[error("Expected object of kind {} something else", .expected)]
diff --git a/vendor/gix-pack/src/find_traits.rs b/vendor/gix-pack/src/find_traits.rs
index 6f828afbf..7c4821d81 100644
--- a/vendor/gix-pack/src/find_traits.rs
+++ b/vendor/gix-pack/src/find_traits.rs
@@ -12,11 +12,8 @@ use crate::{data, find};
///
/// [issue]: https://github.com/rust-lang/rust/issues/44265
pub trait Find {
- /// The error returned by [`try_find()`][Find::try_find()]
- type Error: std::error::Error + Send + Sync + 'static;
-
/// Returns true if the object exists in the database.
- fn contains(&self, id: impl AsRef<gix_hash::oid>) -> bool;
+ fn contains(&self, id: &gix_hash::oid) -> bool;
/// Find an object matching `id` in the database while placing its raw, decoded data into `buffer`.
/// A `pack_cache` can be used to speed up subsequent lookups, set it to [`crate::cache::Never`] if the
@@ -26,9 +23,9 @@ pub trait Find {
/// or the error that occurred during lookup or object retrieval.
fn try_find<'a>(
&self,
- id: impl AsRef<gix_hash::oid>,
+ id: &gix_hash::oid,
buffer: &'a mut Vec<u8>,
- ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, Self::Error> {
+ ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, find::Error> {
self.try_find_cached(id, buffer, &mut crate::cache::Never)
}
@@ -40,16 +37,16 @@ pub trait Find {
/// or the error that occurred during lookup or object retrieval.
fn try_find_cached<'a>(
&self,
- id: impl AsRef<gix_hash::oid>,
+ id: &gix_hash::oid,
buffer: &'a mut Vec<u8>,
- pack_cache: &mut impl crate::cache::DecodeEntry,
- ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, Self::Error>;
+ pack_cache: &mut dyn crate::cache::DecodeEntry,
+ ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, find::Error>;
/// Find the packs location where an object with `id` can be found in the database, or `None` if there is no pack
/// holding the object.
///
/// _Note_ that this is always None if the object isn't packed even though it exists as loose object.
- fn location_by_oid(&self, id: impl AsRef<gix_hash::oid>, buf: &mut Vec<u8>) -> Option<data::entry::Location>;
+ fn location_by_oid(&self, id: &gix_hash::oid, buf: &mut Vec<u8>) -> Option<data::entry::Location>;
/// Obtain a vector of all offsets, in index order, along with their object id.
fn pack_offsets_and_oid(&self, pack_id: u32) -> Option<Vec<(data::Offset, gix_hash::ObjectId)>>;
@@ -77,10 +74,9 @@ mod ext {
/// while returning the desired object type.
fn $method<'a>(
&self,
- id: impl AsRef<gix_hash::oid>,
+ id: &gix_hash::oid,
buffer: &'a mut Vec<u8>,
- ) -> Result<($object_type, Option<crate::data::entry::Location>), find::existing_object::Error<Self::Error>>
- {
+ ) -> Result<($object_type, Option<crate::data::entry::Location>), find::existing_object::Error> {
let id = id.as_ref();
self.try_find(id, buffer)
.map_err(find::existing_object::Error::Find)?
@@ -108,9 +104,9 @@ mod ext {
/// while returning the desired iterator type.
fn $method<'a>(
&self,
- id: impl AsRef<gix_hash::oid>,
+ id: &gix_hash::oid,
buffer: &'a mut Vec<u8>,
- ) -> Result<($object_type, Option<crate::data::entry::Location>), find::existing_iter::Error<Self::Error>> {
+ ) -> Result<($object_type, Option<crate::data::entry::Location>), find::existing_iter::Error> {
let id = id.as_ref();
self.try_find(id, buffer)
.map_err(find::existing_iter::Error::Find)?
@@ -133,11 +129,9 @@ mod ext {
/// Like [`try_find(…)`][super::Find::try_find()], but flattens the `Result<Option<_>>` into a single `Result` making a non-existing object an error.
fn find<'a>(
&self,
- id: impl AsRef<gix_hash::oid>,
+ id: &gix_hash::oid,
buffer: &'a mut Vec<u8>,
- ) -> Result<(gix_object::Data<'a>, Option<crate::data::entry::Location>), find::existing::Error<Self::Error>>
- {
- let id = id.as_ref();
+ ) -> Result<(gix_object::Data<'a>, Option<crate::data::entry::Location>), find::existing::Error> {
self.try_find(id, buffer)
.map_err(find::existing::Error::Find)?
.ok_or_else(|| find::existing::Error::NotFound {
@@ -154,7 +148,7 @@ mod ext {
make_iter_lookup!(find_tag_iter, Kind::Tag, TagRefIter<'a>, try_into_tag_iter);
}
- impl<T: super::Find> FindExt for T {}
+ impl<T: super::Find + ?Sized> FindExt for T {}
}
pub use ext::FindExt;
@@ -169,22 +163,20 @@ mod find_impls {
where
T: crate::Find,
{
- type Error = T::Error;
-
- fn contains(&self, id: impl AsRef<oid>) -> bool {
+ fn contains(&self, id: &oid) -> bool {
(*self).contains(id)
}
fn try_find_cached<'a>(
&self,
- id: impl AsRef<oid>,
+ id: &oid,
buffer: &'a mut Vec<u8>,
- pack_cache: &mut impl crate::cache::DecodeEntry,
- ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, Self::Error> {
+ pack_cache: &mut dyn crate::cache::DecodeEntry,
+ ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, crate::find::Error> {
(*self).try_find_cached(id, buffer, pack_cache)
}
- fn location_by_oid(&self, id: impl AsRef<oid>, buf: &mut Vec<u8>) -> Option<data::entry::Location> {
+ fn location_by_oid(&self, id: &oid, buf: &mut Vec<u8>) -> Option<data::entry::Location> {
(*self).location_by_oid(id, buf)
}
@@ -201,22 +193,20 @@ mod find_impls {
where
T: super::Find,
{
- type Error = T::Error;
-
- fn contains(&self, id: impl AsRef<oid>) -> bool {
+ fn contains(&self, id: &oid) -> bool {
self.deref().contains(id)
}
fn try_find_cached<'a>(
&self,
- id: impl AsRef<oid>,
+ id: &oid,
buffer: &'a mut Vec<u8>,
- pack_cache: &mut impl crate::cache::DecodeEntry,
- ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, Self::Error> {
+ pack_cache: &mut dyn crate::cache::DecodeEntry,
+ ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, find::Error> {
self.deref().try_find_cached(id, buffer, pack_cache)
}
- fn location_by_oid(&self, id: impl AsRef<oid>, buf: &mut Vec<u8>) -> Option<data::entry::Location> {
+ fn location_by_oid(&self, id: &oid, buf: &mut Vec<u8>) -> Option<data::entry::Location> {
self.deref().location_by_oid(id, buf)
}
@@ -233,22 +223,20 @@ mod find_impls {
where
T: super::Find,
{
- type Error = T::Error;
-
- fn contains(&self, id: impl AsRef<oid>) -> bool {
+ fn contains(&self, id: &oid) -> bool {
self.deref().contains(id)
}
fn try_find_cached<'a>(
&self,
- id: impl AsRef<oid>,
+ id: &oid,
buffer: &'a mut Vec<u8>,
- pack_cache: &mut impl crate::cache::DecodeEntry,
- ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, Self::Error> {
+ pack_cache: &mut dyn crate::cache::DecodeEntry,
+ ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, find::Error> {
self.deref().try_find_cached(id, buffer, pack_cache)
}
- fn location_by_oid(&self, id: impl AsRef<oid>, buf: &mut Vec<u8>) -> Option<data::entry::Location> {
+ fn location_by_oid(&self, id: &oid, buf: &mut Vec<u8>) -> Option<data::entry::Location> {
self.deref().location_by_oid(id, buf)
}
@@ -265,22 +253,20 @@ mod find_impls {
where
T: super::Find,
{
- type Error = T::Error;
-
- fn contains(&self, id: impl AsRef<oid>) -> bool {
+ fn contains(&self, id: &oid) -> bool {
self.deref().contains(id)
}
fn try_find_cached<'a>(
&self,
- id: impl AsRef<oid>,
+ id: &oid,
buffer: &'a mut Vec<u8>,
- pack_cache: &mut impl crate::cache::DecodeEntry,
- ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, Self::Error> {
+ pack_cache: &mut dyn crate::cache::DecodeEntry,
+ ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, find::Error> {
self.deref().try_find_cached(id, buffer, pack_cache)
}
- fn location_by_oid(&self, id: impl AsRef<oid>, buf: &mut Vec<u8>) -> Option<data::entry::Location> {
+ fn location_by_oid(&self, id: &oid, buf: &mut Vec<u8>) -> Option<data::entry::Location> {
self.deref().location_by_oid(id, buf)
}
diff --git a/vendor/gix-pack/src/index/access.rs b/vendor/gix-pack/src/index/access.rs
index 18fb70e2a..3b748e110 100644
--- a/vendor/gix-pack/src/index/access.rs
+++ b/vendor/gix-pack/src/index/access.rs
@@ -119,7 +119,7 @@ impl index::File {
// NOTE: pretty much the same things as in `multi_index::File::lookup`, change things there
// as well.
pub fn lookup(&self, id: impl AsRef<gix_hash::oid>) -> Option<EntryIndex> {
- lookup(id, &self.fan, |idx| self.oid_at_index(idx))
+ lookup(id.as_ref(), &self.fan, &|idx| self.oid_at_index(idx))
}
/// Given a `prefix`, find an object that matches it uniquely within this index and return `Some(Ok(entry_index))`.
@@ -141,7 +141,7 @@ impl index::File {
prefix,
candidates,
&self.fan,
- |idx| self.oid_at_index(idx),
+ &|idx| self.oid_at_index(idx),
self.num_objects,
)
}
@@ -206,7 +206,7 @@ pub(crate) fn lookup_prefix<'a>(
prefix: gix_hash::Prefix,
candidates: Option<&mut Range<EntryIndex>>,
fan: &[u32; FAN_LEN],
- oid_at_index: impl Fn(EntryIndex) -> &'a gix_hash::oid,
+ oid_at_index: &dyn Fn(EntryIndex) -> &'a gix_hash::oid,
num_objects: u32,
) -> Option<PrefixLookupResult> {
let first_byte = prefix.as_oid().first_byte() as usize;
@@ -266,11 +266,10 @@ pub(crate) fn lookup_prefix<'a>(
}
pub(crate) fn lookup<'a>(
- id: impl AsRef<gix_hash::oid>,
+ id: &gix_hash::oid,
fan: &[u32; FAN_LEN],
- oid_at_index: impl Fn(EntryIndex) -> &'a gix_hash::oid,
+ oid_at_index: &dyn Fn(EntryIndex) -> &'a gix_hash::oid,
) -> Option<EntryIndex> {
- let id = id.as_ref();
let first_byte = id.first_byte() as usize;
let mut upper_bound = fan[first_byte];
let mut lower_bound = if first_byte != 0 { fan[first_byte - 1] } else { 0 };
diff --git a/vendor/gix-pack/src/index/encode.rs b/vendor/gix-pack/src/index/encode.rs
new file mode 100644
index 000000000..d9dad68ce
--- /dev/null
+++ b/vendor/gix-pack/src/index/encode.rs
@@ -0,0 +1,158 @@
+use std::cmp::Ordering;
+
+pub(crate) const LARGE_OFFSET_THRESHOLD: u64 = 0x7fff_ffff;
+pub(crate) const HIGH_BIT: u32 = 0x8000_0000;
+
+pub(crate) fn fanout(iter: &mut dyn ExactSizeIterator<Item = u8>) -> [u32; 256] {
+ let mut fan_out = [0u32; 256];
+ let entries_len = iter.len() as u32;
+ let mut iter = iter.enumerate();
+ let mut idx_and_entry = iter.next();
+ let mut upper_bound = 0;
+
+ for (offset_be, byte) in fan_out.iter_mut().zip(0u8..=255) {
+ *offset_be = match idx_and_entry.as_ref() {
+ Some((_idx, first_byte)) => match first_byte.cmp(&byte) {
+ Ordering::Less => unreachable!("ids should be ordered, and we make sure to keep ahead with them"),
+ Ordering::Greater => upper_bound,
+ Ordering::Equal => {
+ if byte == 255 {
+ entries_len
+ } else {
+ idx_and_entry = iter.find(|(_, first_byte)| *first_byte != byte);
+ upper_bound = idx_and_entry.as_ref().map_or(entries_len, |(idx, _)| *idx as u32);
+ upper_bound
+ }
+ }
+ },
+ None => entries_len,
+ };
+ }
+
+ fan_out
+}
+
+#[cfg(feature = "streaming-input")]
+mod function {
+ use gix_features::{
+ hash,
+ progress::{self, DynNestedProgress},
+ };
+ use std::io;
+
+ use super::{fanout, HIGH_BIT, LARGE_OFFSET_THRESHOLD};
+
+ use crate::index::V2_SIGNATURE;
+
+ struct Count<W> {
+ bytes: u64,
+ inner: W,
+ }
+
+ impl<W> Count<W> {
+ fn new(inner: W) -> Self {
+ Count { bytes: 0, inner }
+ }
+ }
+
+ impl<W> io::Write for Count<W>
+ where
+ W: io::Write,
+ {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ let written = self.inner.write(buf)?;
+ self.bytes += written as u64;
+ Ok(written)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.flush()
+ }
+ }
+
+ pub(crate) fn write_to(
+ out: &mut dyn io::Write,
+ entries_sorted_by_oid: Vec<crate::cache::delta::Item<crate::index::write::TreeEntry>>,
+ pack_hash: &gix_hash::ObjectId,
+ kind: crate::index::Version,
+ progress: &mut dyn DynNestedProgress,
+ ) -> io::Result<gix_hash::ObjectId> {
+ use io::Write;
+ assert_eq!(kind, crate::index::Version::V2, "Can only write V2 packs right now");
+ assert!(
+ entries_sorted_by_oid.len() <= u32::MAX as usize,
+ "a pack cannot have more than u32::MAX objects"
+ );
+
+ // Write header
+ let mut out = Count::new(std::io::BufWriter::with_capacity(
+ 8 * 4096,
+ hash::Write::new(out, kind.hash()),
+ ));
+ out.write_all(V2_SIGNATURE)?;
+ out.write_all(&(kind as u32).to_be_bytes())?;
+
+ progress.init(Some(4), progress::steps());
+ let start = std::time::Instant::now();
+ let _info = progress.add_child_with_id("writing fan-out table".into(), gix_features::progress::UNKNOWN);
+ let fan_out = fanout(&mut entries_sorted_by_oid.iter().map(|e| e.data.id.first_byte()));
+
+ for value in fan_out.iter() {
+ out.write_all(&value.to_be_bytes())?;
+ }
+
+ progress.inc();
+ let _info = progress.add_child_with_id("writing ids".into(), gix_features::progress::UNKNOWN);
+ for entry in &entries_sorted_by_oid {
+ out.write_all(entry.data.id.as_slice())?;
+ }
+
+ progress.inc();
+ let _info = progress.add_child_with_id("writing crc32".into(), gix_features::progress::UNKNOWN);
+ for entry in &entries_sorted_by_oid {
+ out.write_all(&entry.data.crc32.to_be_bytes())?;
+ }
+
+ progress.inc();
+ let _info = progress.add_child_with_id("writing offsets".into(), gix_features::progress::UNKNOWN);
+ {
+ let mut offsets64 = Vec::<u64>::new();
+ for entry in &entries_sorted_by_oid {
+ let offset: u32 = if entry.offset > LARGE_OFFSET_THRESHOLD {
+ assert!(
+ offsets64.len() < LARGE_OFFSET_THRESHOLD as usize,
+ "Encoding breakdown - way too many 64bit offsets"
+ );
+ offsets64.push(entry.offset);
+ ((offsets64.len() - 1) as u32) | HIGH_BIT
+ } else {
+ entry.offset as u32
+ };
+ out.write_all(&offset.to_be_bytes())?;
+ }
+ for value in offsets64 {
+ out.write_all(&value.to_be_bytes())?;
+ }
+ }
+
+ out.write_all(pack_hash.as_slice())?;
+
+ let bytes_written_without_trailer = out.bytes;
+ let out = out.inner.into_inner()?;
+ let index_hash: gix_hash::ObjectId = out.hash.digest().into();
+ out.inner.write_all(index_hash.as_slice())?;
+ out.inner.flush()?;
+
+ progress.inc();
+ progress.show_throughput_with(
+ start,
+ (bytes_written_without_trailer + 20) as usize,
+ progress::bytes().expect("unit always set"),
+ progress::MessageLevel::Success,
+ );
+
+ Ok(index_hash)
+ }
+}
+#[cfg(feature = "streaming-input")]
+pub(crate) use function::write_to;
diff --git a/vendor/gix-pack/src/index/mod.rs b/vendor/gix-pack/src/index/mod.rs
index 36be2d429..8d8807442 100644
--- a/vendor/gix-pack/src/index/mod.rs
+++ b/vendor/gix-pack/src/index/mod.rs
@@ -141,10 +141,12 @@ pub mod init;
pub(crate) mod access;
pub use access::Entry;
+pub(crate) mod encode;
///
pub mod traverse;
mod util;
///
pub mod verify;
///
+#[cfg(feature = "streaming-input")]
pub mod write;
diff --git a/vendor/gix-pack/src/index/traverse/mod.rs b/vendor/gix-pack/src/index/traverse/mod.rs
index 83173f904..1edf0b1d5 100644
--- a/vendor/gix-pack/src/index/traverse/mod.rs
+++ b/vendor/gix-pack/src/index/traverse/mod.rs
@@ -1,9 +1,6 @@
use std::sync::atomic::AtomicBool;
-use gix_features::{
- parallel,
- progress::{Progress, RawProgress},
-};
+use gix_features::{parallel, progress::Progress, zlib};
use crate::index;
@@ -16,6 +13,7 @@ use reduce::Reducer;
mod error;
pub use error::Error;
+use gix_features::progress::DynNestedProgress;
mod types;
pub use types::{Algorithm, ProgressId, SafetyCheck, Statistics};
@@ -46,13 +44,11 @@ impl Default for Options<fn() -> crate::cache::Never> {
}
/// The outcome of the [`traverse()`][index::File::traverse()] method.
-pub struct Outcome<P> {
+pub struct Outcome {
/// The checksum obtained when hashing the file, which matched the checksum contained within the file.
pub actual_index_checksum: gix_hash::ObjectId,
/// The statistics obtained during traversal.
pub statistics: Statistics,
- /// The input progress to allow reuse.
- pub progress: P,
}
/// Traversal of pack data files using an index file
@@ -77,10 +73,10 @@ impl index::File {
///
/// Use [`thread_limit`][Options::thread_limit] to further control parallelism and [`check`][SafetyCheck] to define how much the passed
/// objects shall be verified beforehand.
- pub fn traverse<P, C, Processor, E, F>(
+ pub fn traverse<C, Processor, E, F>(
&self,
pack: &crate::data::File,
- progress: P,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
processor: Processor,
Options {
@@ -89,12 +85,11 @@ impl index::File {
check,
make_pack_lookup_cache,
}: Options<F>,
- ) -> Result<Outcome<P>, Error<E>>
+ ) -> Result<Outcome, Error<E>>
where
- P: Progress,
C: crate::cache::DecodeEntry,
E: std::error::Error + Send + Sync + 'static,
- Processor: FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn RawProgress) -> Result<(), E> + Send + Clone,
+ Processor: FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn Progress) -> Result<(), E> + Send + Clone,
F: Fn() -> C + Send + Clone,
{
match traversal {
@@ -123,8 +118,8 @@ impl index::File {
&self,
pack: &crate::data::File,
check: SafetyCheck,
- pack_progress: impl Progress,
- index_progress: impl Progress,
+ pack_progress: &mut dyn Progress,
+ index_progress: &mut dyn Progress,
should_interrupt: &AtomicBool,
) -> Result<gix_hash::ObjectId, Error<E>>
where
@@ -155,9 +150,10 @@ impl index::File {
pack: &crate::data::File,
cache: &mut C,
buf: &mut Vec<u8>,
- progress: &mut dyn RawProgress,
+ inflate: &mut zlib::Inflate,
+ progress: &mut dyn Progress,
index_entry: &index::Entry,
- processor: &mut impl FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn RawProgress) -> Result<(), E>,
+ processor: &mut impl FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn Progress) -> Result<(), E>,
) -> Result<crate::data::decode::entry::Outcome, Error<E>>
where
C: crate::cache::DecodeEntry,
@@ -169,7 +165,8 @@ impl index::File {
.decode_entry(
pack_entry,
buf,
- |id, _| {
+ inflate,
+ &|id, _| {
self.lookup(id).map(|index| {
crate::data::decode::entry::ResolvedBase::InPack(pack.entry(self.pack_offset_at_index(index)))
})
@@ -205,8 +202,8 @@ fn process_entry<E>(
decompressed: &[u8],
index_entry: &index::Entry,
pack_entry_crc32: impl FnOnce() -> u32,
- progress: &dyn RawProgress,
- processor: &mut impl FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn RawProgress) -> Result<(), E>,
+ progress: &dyn Progress,
+ processor: &mut impl FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn Progress) -> Result<(), E>,
) -> Result<(), Error<E>>
where
E: std::error::Error + Send + Sync + 'static,
diff --git a/vendor/gix-pack/src/index/traverse/with_index.rs b/vendor/gix-pack/src/index/traverse/with_index.rs
index 884277c9d..91382034c 100644
--- a/vendor/gix-pack/src/index/traverse/with_index.rs
+++ b/vendor/gix-pack/src/index/traverse/with_index.rs
@@ -1,6 +1,7 @@
use std::sync::atomic::{AtomicBool, Ordering};
-use gix_features::{parallel, progress::Progress};
+use gix_features::parallel;
+use gix_features::progress::DynNestedProgress;
use super::Error;
use crate::{
@@ -56,31 +57,30 @@ impl index::File {
/// at the cost of memory.
///
/// For more details, see the documentation on the [`traverse()`][index::File::traverse()] method.
- pub fn traverse_with_index<P, Processor, E>(
+ pub fn traverse_with_index<Processor, E>(
&self,
pack: &crate::data::File,
mut processor: Processor,
- mut progress: P,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
Options { check, thread_limit }: Options,
- ) -> Result<Outcome<P>, Error<E>>
+ ) -> Result<Outcome, Error<E>>
where
- P: Progress,
- Processor: FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn gix_features::progress::RawProgress) -> Result<(), E>
+ Processor: FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn gix_features::progress::Progress) -> Result<(), E>
+ Send
+ Clone,
E: std::error::Error + Send + Sync + 'static,
{
let (verify_result, traversal_result) = parallel::join(
{
- let pack_progress = progress.add_child_with_id(
+ let mut pack_progress = progress.add_child_with_id(
format!(
"Hash of pack '{}'",
pack.path().file_name().expect("pack has filename").to_string_lossy()
),
ProgressId::HashPackDataBytes.into(),
);
- let index_progress = progress.add_child_with_id(
+ let mut index_progress = progress.add_child_with_id(
format!(
"Hash of index '{}'",
self.path.file_name().expect("index has filename").to_string_lossy()
@@ -88,7 +88,8 @@ impl index::File {
ProgressId::HashPackIndexBytes.into(),
);
move || {
- let res = self.possibly_verify(pack, check, pack_progress, index_progress, should_interrupt);
+ let res =
+ self.possibly_verify(pack, check, &mut pack_progress, &mut index_progress, should_interrupt);
if res.is_err() {
should_interrupt.store(true, Ordering::SeqCst);
}
@@ -98,14 +99,17 @@ impl index::File {
|| -> Result<_, Error<_>> {
let sorted_entries = index_entries_sorted_by_offset_ascending(
self,
- progress.add_child_with_id("collecting sorted index", ProgressId::CollectSortedIndexEntries.into()),
+ &mut progress.add_child_with_id(
+ "collecting sorted index".into(),
+ ProgressId::CollectSortedIndexEntries.into(),
+ ),
); /* Pack Traverse Collect sorted Entries */
let tree = crate::cache::delta::Tree::from_offsets_in_pack(
pack.path(),
sorted_entries.into_iter().map(Entry::from),
- |e| e.index_entry.pack_offset,
- |id| self.lookup(id).map(|idx| self.pack_offset_at_index(idx)),
- progress.add_child_with_id("indexing", ProgressId::TreeFromOffsetsObjects.into()),
+ &|e| e.index_entry.pack_offset,
+ &|id| self.lookup(id).map(|idx| self.pack_offset_at_index(idx)),
+ &mut progress.add_child_with_id("indexing".into(), ProgressId::TreeFromOffsetsObjects.into()),
should_interrupt,
self.object_hash,
)?;
@@ -153,8 +157,11 @@ impl index::File {
}
},
traverse::Options {
- object_progress: progress.add_child_with_id("Resolving", ProgressId::DecodedObjects.into()),
- size_progress: progress.add_child_with_id("Decoding", ProgressId::DecodedBytes.into()),
+ object_progress: Box::new(
+ progress.add_child_with_id("Resolving".into(), ProgressId::DecodedObjects.into()),
+ ),
+ size_progress:
+ &mut progress.add_child_with_id("Decoding".into(), ProgressId::DecodedBytes.into()),
thread_limit,
should_interrupt,
object_hash: self.object_hash,
@@ -167,7 +174,6 @@ impl index::File {
Ok(Outcome {
actual_index_checksum: verify_result?,
statistics: traversal_result?,
- progress,
})
}
}
diff --git a/vendor/gix-pack/src/index/traverse/with_lookup.rs b/vendor/gix-pack/src/index/traverse/with_lookup.rs
index 0165e4e01..3759dae5e 100644
--- a/vendor/gix-pack/src/index/traverse/with_lookup.rs
+++ b/vendor/gix-pack/src/index/traverse/with_lookup.rs
@@ -1,9 +1,11 @@
use std::sync::atomic::{AtomicBool, Ordering};
+use gix_features::progress::{Count, DynNestedProgress};
use gix_features::{
parallel::{self, in_parallel_if},
progress::{self, Progress},
threading::{lock, Mutable, OwnShared},
+ zlib,
};
use super::{Error, Reducer};
@@ -65,37 +67,34 @@ impl index::File {
/// waste while decoding objects.
///
/// For more details, see the documentation on the [`traverse()`][index::File::traverse()] method.
- pub fn traverse_with_lookup<P, C, Processor, E, F>(
+ pub fn traverse_with_lookup<C, Processor, E, F>(
&self,
mut processor: Processor,
pack: &data::File,
- mut progress: P,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
Options {
thread_limit,
check,
make_pack_lookup_cache,
}: Options<F>,
- ) -> Result<Outcome<P>, Error<E>>
+ ) -> Result<Outcome, Error<E>>
where
- P: Progress,
C: crate::cache::DecodeEntry,
E: std::error::Error + Send + Sync + 'static,
- Processor: FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn gix_features::progress::RawProgress) -> Result<(), E>
- + Send
- + Clone,
+ Processor: FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn Progress) -> Result<(), E> + Send + Clone,
F: Fn() -> C + Send + Clone,
{
let (verify_result, traversal_result) = parallel::join(
{
- let pack_progress = progress.add_child_with_id(
+ let mut pack_progress = progress.add_child_with_id(
format!(
"Hash of pack '{}'",
pack.path().file_name().expect("pack has filename").to_string_lossy()
),
ProgressId::HashPackDataBytes.into(),
);
- let index_progress = progress.add_child_with_id(
+ let mut index_progress = progress.add_child_with_id(
format!(
"Hash of index '{}'",
self.path.file_name().expect("index has filename").to_string_lossy()
@@ -103,7 +102,8 @@ impl index::File {
ProgressId::HashPackIndexBytes.into(),
);
move || {
- let res = self.possibly_verify(pack, check, pack_progress, index_progress, should_interrupt);
+ let res =
+ self.possibly_verify(pack, check, &mut pack_progress, &mut index_progress, should_interrupt);
if res.is_err() {
should_interrupt.store(true, Ordering::SeqCst);
}
@@ -113,7 +113,10 @@ impl index::File {
|| {
let index_entries = util::index_entries_sorted_by_offset_ascending(
self,
- progress.add_child_with_id("collecting sorted index", ProgressId::CollectSortedIndexEntries.into()),
+ &mut progress.add_child_with_id(
+ "collecting sorted index".into(),
+ ProgressId::CollectSortedIndexEntries.into(),
+ ),
);
let (chunk_size, thread_limit, available_cores) =
@@ -121,7 +124,7 @@ impl index::File {
let there_are_enough_entries_to_process = || index_entries.len() > chunk_size * available_cores;
let input_chunks = index_entries.chunks(chunk_size.max(chunk_size));
let reduce_progress = OwnShared::new(Mutable::new({
- let mut p = progress.add_child_with_id("Traversing", ProgressId::DecodedObjects.into());
+ let mut p = progress.add_child_with_id("Traversing".into(), ProgressId::DecodedObjects.into());
p.init(Some(self.num_objects() as usize), progress::count("objects"));
p
}));
@@ -131,6 +134,7 @@ impl index::File {
(
make_pack_lookup_cache(),
Vec::with_capacity(2048), // decode buffer
+ zlib::Inflate::default(),
lock(&reduce_progress)
.add_child_with_id(format!("thread {index}"), gix_features::progress::UNKNOWN), // per thread progress
)
@@ -143,7 +147,7 @@ impl index::File {
thread_limit,
state_per_thread,
move |entries: &[index::Entry],
- (cache, buf, progress)|
+ (cache, buf, inflate, progress)|
-> Result<Vec<data::decode::entry::Outcome>, Error<_>> {
progress.init(
Some(entries.len()),
@@ -157,6 +161,7 @@ impl index::File {
pack,
cache,
buf,
+ inflate,
progress,
index_entry,
&mut processor,
@@ -183,7 +188,6 @@ impl index::File {
Ok(Outcome {
actual_index_checksum: verify_result?,
statistics: traversal_result?,
- progress,
})
}
}
diff --git a/vendor/gix-pack/src/index/util.rs b/vendor/gix-pack/src/index/util.rs
index 284ee6158..2549429f9 100644
--- a/vendor/gix-pack/src/index/util.rs
+++ b/vendor/gix-pack/src/index/util.rs
@@ -1,10 +1,10 @@
-use std::{io, time::Instant};
+use std::time::Instant;
use gix_features::progress::{self, Progress};
pub(crate) fn index_entries_sorted_by_offset_ascending(
idx: &crate::index::File,
- mut progress: impl Progress,
+ progress: &mut dyn Progress,
) -> Vec<crate::index::Entry> {
progress.init(Some(idx.num_objects as usize), progress::count("entries"));
let start = Instant::now();
@@ -19,29 +19,3 @@ pub(crate) fn index_entries_sorted_by_offset_ascending(
progress.show_throughput(start);
v
}
-
-pub(crate) struct Count<W> {
- pub bytes: u64,
- pub inner: W,
-}
-
-impl<W> Count<W> {
- pub fn new(inner: W) -> Self {
- Count { bytes: 0, inner }
- }
-}
-
-impl<W> io::Write for Count<W>
-where
- W: io::Write,
-{
- fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
- let written = self.inner.write(buf)?;
- self.bytes += written as u64;
- Ok(written)
- }
-
- fn flush(&mut self) -> io::Result<()> {
- self.inner.flush()
- }
-}
diff --git a/vendor/gix-pack/src/index/verify.rs b/vendor/gix-pack/src/index/verify.rs
index 6af352ac9..d259a6a38 100644
--- a/vendor/gix-pack/src/index/verify.rs
+++ b/vendor/gix-pack/src/index/verify.rs
@@ -1,6 +1,6 @@
use std::sync::atomic::AtomicBool;
-use gix_features::progress::Progress;
+use gix_features::progress::{DynNestedProgress, Progress};
use gix_object::{bstr::ByteSlice, WriteTo};
use crate::index;
@@ -15,6 +15,8 @@ pub mod integrity {
#[derive(thiserror::Error, Debug)]
#[allow(missing_docs)]
pub enum Error {
+ #[error("Reserialization of an object failed")]
+ Io(#[from] std::io::Error),
#[error("The fan at index {index} is out of order as it's larger then the following value.")]
Fan { index: usize },
#[error("{kind} object {id} could not be decoded")]
@@ -33,13 +35,11 @@ pub mod integrity {
}
/// Returned by [`index::File::verify_integrity()`][crate::index::File::verify_integrity()].
- pub struct Outcome<P> {
+ pub struct Outcome {
/// The computed checksum of the index which matched the stored one.
pub actual_index_checksum: gix_hash::ObjectId,
/// The packs traversal outcome, if one was provided
pub pack_traverse_statistics: Option<crate::index::traverse::Statistics>,
- /// The provided progress instance.
- pub progress: P,
}
/// Additional options to define how the integrity should be verified.
@@ -136,7 +136,7 @@ impl index::File {
/// of this index file, and return it if it does.
pub fn verify_checksum(
&self,
- progress: impl Progress,
+ progress: &mut dyn Progress,
should_interrupt: &AtomicBool,
) -> Result<gix_hash::ObjectId, checksum::Error> {
crate::verify::checksum_on_disk_or_mmap(
@@ -166,14 +166,13 @@ impl index::File {
///
/// The given `progress` is inevitably consumed if there is an error, which is a tradeoff chosen to easily allow using `?` in the
/// error case.
- pub fn verify_integrity<P, C, F>(
+ pub fn verify_integrity<C, F>(
&self,
pack: Option<PackContext<'_, F>>,
- mut progress: P,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
- ) -> Result<integrity::Outcome<P>, index::traverse::Error<index::verify::integrity::Error>>
+ ) -> Result<integrity::Outcome, index::traverse::Error<index::verify::integrity::Error>>
where
- P: Progress,
C: crate::cache::DecodeEntry,
F: Fn() -> C + Send + Clone,
{
@@ -214,18 +213,17 @@ impl index::File {
.map(|o| integrity::Outcome {
actual_index_checksum: o.actual_index_checksum,
pack_traverse_statistics: Some(o.statistics),
- progress: o.progress,
}),
None => self
.verify_checksum(
- progress.add_child_with_id("Sha1 of index", integrity::ProgressId::ChecksumBytes.into()),
+ &mut progress
+ .add_child_with_id("Sha1 of index".into(), integrity::ProgressId::ChecksumBytes.into()),
should_interrupt,
)
.map_err(Into::into)
.map(|id| integrity::Outcome {
actual_index_checksum: id,
pack_traverse_statistics: None,
- progress,
}),
}
}
@@ -237,7 +235,7 @@ impl index::File {
object_kind: gix_object::Kind,
buf: &[u8],
index_entry: &index::Entry,
- progress: &dyn gix_features::progress::RawProgress,
+ progress: &dyn gix_features::progress::Progress,
) -> Result<(), integrity::Error> {
if let Mode::HashCrc32Decode | Mode::HashCrc32DecodeEncode = verify_mode {
use gix_object::Kind::*;
@@ -252,9 +250,7 @@ impl index::File {
})?;
if let Mode::HashCrc32DecodeEncode = verify_mode {
encode_buf.clear();
- object
- .write_to(&mut *encode_buf)
- .expect("writing to a memory buffer never fails");
+ object.write_to(&mut *encode_buf)?;
if encode_buf.as_slice() != buf {
let mut should_return_error = true;
if let Tree = object_kind {
diff --git a/vendor/gix-pack/src/index/write/encode.rs b/vendor/gix-pack/src/index/write/encode.rs
deleted file mode 100644
index f1195875c..000000000
--- a/vendor/gix-pack/src/index/write/encode.rs
+++ /dev/null
@@ -1,124 +0,0 @@
-use std::{cmp::Ordering, io};
-
-pub(crate) const LARGE_OFFSET_THRESHOLD: u64 = 0x7fff_ffff;
-pub(crate) const HIGH_BIT: u32 = 0x8000_0000;
-
-use gix_features::{
- hash,
- progress::{self, Progress},
-};
-
-use crate::index::{util::Count, V2_SIGNATURE};
-
-pub(crate) fn write_to(
- out: impl io::Write,
- entries_sorted_by_oid: Vec<crate::cache::delta::Item<crate::index::write::TreeEntry>>,
- pack_hash: &gix_hash::ObjectId,
- kind: crate::index::Version,
- mut progress: impl Progress,
-) -> io::Result<gix_hash::ObjectId> {
- use io::Write;
- assert_eq!(kind, crate::index::Version::V2, "Can only write V2 packs right now");
- assert!(
- entries_sorted_by_oid.len() <= u32::MAX as usize,
- "a pack cannot have more than u32::MAX objects"
- );
-
- // Write header
- let mut out = Count::new(std::io::BufWriter::with_capacity(
- 8 * 4096,
- hash::Write::new(out, kind.hash()),
- ));
- out.write_all(V2_SIGNATURE)?;
- out.write_all(&(kind as u32).to_be_bytes())?;
-
- progress.init(Some(4), progress::steps());
- let start = std::time::Instant::now();
- let _info = progress.add_child_with_id("writing fan-out table", gix_features::progress::UNKNOWN);
- let fan_out = fanout(entries_sorted_by_oid.iter().map(|e| e.data.id.first_byte()));
-
- for value in fan_out.iter() {
- out.write_all(&value.to_be_bytes())?;
- }
-
- progress.inc();
- let _info = progress.add_child_with_id("writing ids", gix_features::progress::UNKNOWN);
- for entry in &entries_sorted_by_oid {
- out.write_all(entry.data.id.as_slice())?;
- }
-
- progress.inc();
- let _info = progress.add_child_with_id("writing crc32", gix_features::progress::UNKNOWN);
- for entry in &entries_sorted_by_oid {
- out.write_all(&entry.data.crc32.to_be_bytes())?;
- }
-
- progress.inc();
- let _info = progress.add_child_with_id("writing offsets", gix_features::progress::UNKNOWN);
- {
- let mut offsets64 = Vec::<u64>::new();
- for entry in &entries_sorted_by_oid {
- let offset: u32 = if entry.offset > LARGE_OFFSET_THRESHOLD {
- assert!(
- offsets64.len() < LARGE_OFFSET_THRESHOLD as usize,
- "Encoding breakdown - way too many 64bit offsets"
- );
- offsets64.push(entry.offset);
- ((offsets64.len() - 1) as u32) | HIGH_BIT
- } else {
- entry.offset as u32
- };
- out.write_all(&offset.to_be_bytes())?;
- }
- for value in offsets64 {
- out.write_all(&value.to_be_bytes())?;
- }
- }
-
- out.write_all(pack_hash.as_slice())?;
-
- let bytes_written_without_trailer = out.bytes;
- let mut out = out.inner.into_inner()?;
- let index_hash: gix_hash::ObjectId = out.hash.digest().into();
- out.inner.write_all(index_hash.as_slice())?;
- out.inner.flush()?;
-
- progress.inc();
- progress.show_throughput_with(
- start,
- (bytes_written_without_trailer + 20) as usize,
- progress::bytes().expect("unit always set"),
- progress::MessageLevel::Success,
- );
-
- Ok(index_hash)
-}
-
-pub(crate) fn fanout(iter: impl ExactSizeIterator<Item = u8>) -> [u32; 256] {
- let mut fan_out = [0u32; 256];
- let entries_len = iter.len() as u32;
- let mut iter = iter.enumerate();
- let mut idx_and_entry = iter.next();
- let mut upper_bound = 0;
-
- for (offset_be, byte) in fan_out.iter_mut().zip(0u8..=255) {
- *offset_be = match idx_and_entry.as_ref() {
- Some((_idx, first_byte)) => match first_byte.cmp(&byte) {
- Ordering::Less => unreachable!("ids should be ordered, and we make sure to keep ahead with them"),
- Ordering::Greater => upper_bound,
- Ordering::Equal => {
- if byte == 255 {
- entries_len
- } else {
- idx_and_entry = iter.find(|(_, first_byte)| *first_byte != byte);
- upper_bound = idx_and_entry.as_ref().map_or(entries_len, |(idx, _)| *idx as u32);
- upper_bound
- }
- }
- },
- None => entries_len,
- };
- }
-
- fan_out
-}
diff --git a/vendor/gix-pack/src/index/write/mod.rs b/vendor/gix-pack/src/index/write/mod.rs
index 72a076a85..d1402fa86 100644
--- a/vendor/gix-pack/src/index/write/mod.rs
+++ b/vendor/gix-pack/src/index/write/mod.rs
@@ -1,11 +1,11 @@
use std::{convert::TryInto, io, sync::atomic::AtomicBool};
pub use error::Error;
-use gix_features::progress::{self, Progress};
+use gix_features::progress::prodash::DynNestedProgress;
+use gix_features::progress::{self, Count, Progress};
use crate::cache::delta::{traverse, Tree};
-pub(crate) mod encode;
mod error;
pub(crate) struct TreeEntry {
@@ -83,13 +83,13 @@ impl crate::index::File {
/// It should return `None` if the entry cannot be resolved from the pack that produced the `entries` iterator, causing
/// the write operation to fail.
#[allow(clippy::too_many_arguments)]
- pub fn write_data_iter_to_stream<F, F2, R, P>(
+ pub fn write_data_iter_to_stream<F, F2, R>(
version: crate::index::Version,
make_resolver: F,
- entries: impl Iterator<Item = Result<crate::data::input::Entry, crate::data::input::Error>>,
+ entries: &mut dyn Iterator<Item = Result<crate::data::input::Entry, crate::data::input::Error>>,
thread_limit: Option<usize>,
- mut root_progress: P,
- out: impl io::Write,
+ root_progress: &mut dyn DynNestedProgress,
+ out: &mut dyn io::Write,
should_interrupt: &AtomicBool,
object_hash: gix_hash::Kind,
pack_version: crate::data::Version,
@@ -98,7 +98,6 @@ impl crate::index::File {
F: FnOnce() -> io::Result<(F2, R)>,
R: Send + Sync,
F2: for<'r> Fn(crate::data::EntryRange, &'r R) -> Option<&'r [u8]> + Send + Clone,
- P: Progress,
{
if version != crate::index::Version::default() {
return Err(Error::Unsupported(version));
@@ -111,10 +110,10 @@ impl crate::index::File {
let indexing_start = std::time::Instant::now();
root_progress.init(Some(4), progress::steps());
- let mut objects_progress = root_progress.add_child_with_id("indexing", ProgressId::IndexObjects.into());
+ let mut objects_progress = root_progress.add_child_with_id("indexing".into(), ProgressId::IndexObjects.into());
objects_progress.init(Some(anticipated_num_objects), progress::count("objects"));
let mut decompressed_progress =
- root_progress.add_child_with_id("decompressing", ProgressId::DecompressedBytes.into());
+ root_progress.add_child_with_id("decompressing".into(), ProgressId::DecompressedBytes.into());
decompressed_progress.init(None, progress::bytes());
let mut pack_entries_end: u64 = 0;
@@ -199,8 +198,11 @@ impl crate::index::File {
Ok::<_, Error>(())
},
traverse::Options {
- object_progress: root_progress.add_child_with_id("Resolving", ProgressId::ResolveObjects.into()),
- size_progress: root_progress.add_child_with_id("Decoding", ProgressId::DecodedBytes.into()),
+ object_progress: Box::new(
+ root_progress.add_child_with_id("Resolving".into(), ProgressId::ResolveObjects.into()),
+ ),
+ size_progress: &mut root_progress
+ .add_child_with_id("Decoding".into(), ProgressId::DecodedBytes.into()),
thread_limit,
should_interrupt,
object_hash,
@@ -211,7 +213,8 @@ impl crate::index::File {
let mut items = roots;
items.extend(children);
{
- let _progress = root_progress.add_child_with_id("sorting by id", gix_features::progress::UNKNOWN);
+ let _progress =
+ root_progress.add_child_with_id("sorting by id".into(), gix_features::progress::UNKNOWN);
items.sort_by_key(|e| e.data.id);
}
@@ -229,12 +232,12 @@ impl crate::index::File {
}
None => return Err(Error::IteratorInvariantTrailer),
};
- let index_hash = encode::write_to(
+ let index_hash = crate::index::encode::write_to(
out,
sorted_pack_offsets_by_oid,
&pack_hash,
version,
- root_progress.add_child_with_id("writing index file", ProgressId::IndexBytesWritten.into()),
+ &mut root_progress.add_child_with_id("writing index file".into(), ProgressId::IndexBytesWritten.into()),
)?;
root_progress.show_throughput_with(
indexing_start,
diff --git a/vendor/gix-pack/src/multi_index/access.rs b/vendor/gix-pack/src/multi_index/access.rs
index 488f996d3..0150c7206 100644
--- a/vendor/gix-pack/src/multi_index/access.rs
+++ b/vendor/gix-pack/src/multi_index/access.rs
@@ -89,7 +89,7 @@ impl File {
prefix,
candidates,
&self.fan,
- |idx| self.oid_at_index(idx),
+ &|idx| self.oid_at_index(idx),
self.num_objects,
)
}
@@ -98,7 +98,7 @@ impl File {
///
/// Use this index for finding additional information via [`File::pack_id_and_pack_offset_at_index()`].
pub fn lookup(&self, id: impl AsRef<gix_hash::oid>) -> Option<EntryIndex> {
- crate::index::access::lookup(id, &self.fan, |idx| self.oid_at_index(idx))
+ crate::index::access::lookup(id.as_ref(), &self.fan, &|idx| self.oid_at_index(idx))
}
/// Given the `index` ranging from 0 to [File::num_objects()], return the pack index and its absolute offset into the pack.
diff --git a/vendor/gix-pack/src/multi_index/chunk.rs b/vendor/gix-pack/src/multi_index/chunk.rs
index 48a003ca0..86e43714d 100644
--- a/vendor/gix-pack/src/multi_index/chunk.rs
+++ b/vendor/gix-pack/src/multi_index/chunk.rs
@@ -82,7 +82,7 @@ pub mod index_names {
/// Write all `paths` in order to `out`, including padding.
pub fn write(
paths: impl IntoIterator<Item = impl AsRef<Path>>,
- mut out: impl std::io::Write,
+ out: &mut dyn std::io::Write,
) -> std::io::Result<()> {
let mut written_bytes = 0;
for path in paths {
@@ -130,9 +130,9 @@ pub mod fanout {
/// Write the fanout for the given entries, which must be sorted by oid
pub(crate) fn write(
sorted_entries: &[multi_index::write::Entry],
- mut out: impl std::io::Write,
+ out: &mut dyn std::io::Write,
) -> std::io::Result<()> {
- let fanout = crate::index::write::encode::fanout(sorted_entries.iter().map(|e| e.id.first_byte()));
+ let fanout = crate::index::encode::fanout(&mut sorted_entries.iter().map(|e| e.id.first_byte()));
for value in fanout.iter() {
out.write_all(&value.to_be_bytes())?;
@@ -157,7 +157,7 @@ pub mod lookup {
pub(crate) fn write(
sorted_entries: &[multi_index::write::Entry],
- mut out: impl std::io::Write,
+ out: &mut dyn std::io::Write,
) -> std::io::Result<()> {
for entry in sorted_entries {
out.write_all(entry.id.as_slice())?;
@@ -188,9 +188,9 @@ pub mod offsets {
pub(crate) fn write(
sorted_entries: &[multi_index::write::Entry],
large_offsets_needed: bool,
- mut out: impl std::io::Write,
+ out: &mut dyn std::io::Write,
) -> std::io::Result<()> {
- use crate::index::write::encode::{HIGH_BIT, LARGE_OFFSET_THRESHOLD};
+ use crate::index::encode::{HIGH_BIT, LARGE_OFFSET_THRESHOLD};
let mut num_large_offsets = 0u32;
for entry in sorted_entries {
@@ -226,7 +226,7 @@ pub mod offsets {
pub mod large_offsets {
use std::ops::Range;
- use crate::{index::write::encode::LARGE_OFFSET_THRESHOLD, multi_index};
+ use crate::{index::encode::LARGE_OFFSET_THRESHOLD, multi_index};
/// The id uniquely identifying the large offsets table (with 64 bit offsets)
pub const ID: gix_chunk::Id = *b"LOFF";
@@ -254,7 +254,7 @@ pub mod large_offsets {
pub(crate) fn write(
sorted_entries: &[multi_index::write::Entry],
mut num_large_offsets: usize,
- mut out: impl std::io::Write,
+ out: &mut dyn std::io::Write,
) -> std::io::Result<()> {
for offset in sorted_entries
.iter()
diff --git a/vendor/gix-pack/src/multi_index/verify.rs b/vendor/gix-pack/src/multi_index/verify.rs
index 856a48501..0903b3568 100644
--- a/vendor/gix-pack/src/multi_index/verify.rs
+++ b/vendor/gix-pack/src/multi_index/verify.rs
@@ -1,6 +1,6 @@
use std::{cmp::Ordering, sync::atomic::AtomicBool, time::Instant};
-use gix_features::progress::Progress;
+use gix_features::progress::{Count, DynNestedProgress, Progress};
use crate::{index, multi_index::File};
@@ -39,13 +39,11 @@ pub mod integrity {
}
/// Returned by [`multi_index::File::verify_integrity()`][crate::multi_index::File::verify_integrity()].
- pub struct Outcome<P> {
+ pub struct Outcome {
/// The computed checksum of the multi-index which matched the stored one.
pub actual_index_checksum: gix_hash::ObjectId,
/// The for each entry in [`index_names()`][super::File::index_names()] provide the corresponding pack traversal outcome.
pub pack_traverse_statistics: Vec<crate::index::traverse::Statistics>,
- /// The provided progress instance.
- pub progress: P,
}
/// The progress ids used in [`multi_index::File::verify_integrity()`][crate::multi_index::File::verify_integrity()].
@@ -80,7 +78,7 @@ impl File {
/// of this index file, and return it if it does.
pub fn verify_checksum(
&self,
- progress: impl Progress,
+ progress: &mut dyn Progress,
should_interrupt: &AtomicBool,
) -> Result<gix_hash::ObjectId, checksum::Error> {
crate::verify::checksum_on_disk_or_mmap(
@@ -96,14 +94,11 @@ impl File {
/// Similar to [`verify_integrity()`][File::verify_integrity()] but without any deep inspection of objects.
///
/// Instead we only validate the contents of the multi-index itself.
- pub fn verify_integrity_fast<P>(
+ pub fn verify_integrity_fast(
&self,
- progress: P,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
- ) -> Result<(gix_hash::ObjectId, P), integrity::Error>
- where
- P: Progress,
- {
+ ) -> Result<gix_hash::ObjectId, integrity::Error> {
self.verify_integrity_inner(
progress,
should_interrupt,
@@ -114,35 +109,33 @@ impl File {
index::traverse::Error::Processor(err) => err,
_ => unreachable!("BUG: no other error type is possible"),
})
- .map(|o| (o.actual_index_checksum, o.progress))
+ .map(|o| o.actual_index_checksum)
}
/// Similar to [`crate::Bundle::verify_integrity()`] but checks all contained indices and their packs.
///
/// Note that it's considered a failure if an index doesn't have a corresponding pack.
- pub fn verify_integrity<C, P, F>(
+ pub fn verify_integrity<C, F>(
&self,
- progress: P,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
options: index::verify::integrity::Options<F>,
- ) -> Result<integrity::Outcome<P>, index::traverse::Error<integrity::Error>>
+ ) -> Result<integrity::Outcome, index::traverse::Error<integrity::Error>>
where
- P: Progress,
C: crate::cache::DecodeEntry,
F: Fn() -> C + Send + Clone,
{
self.verify_integrity_inner(progress, should_interrupt, true, options)
}
- fn verify_integrity_inner<C, P, F>(
+ fn verify_integrity_inner<C, F>(
&self,
- mut progress: P,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
deep_check: bool,
options: index::verify::integrity::Options<F>,
- ) -> Result<integrity::Outcome<P>, index::traverse::Error<integrity::Error>>
+ ) -> Result<integrity::Outcome, index::traverse::Error<integrity::Error>>
where
- P: Progress,
C: crate::cache::DecodeEntry,
F: Fn() -> C + Send + Clone,
{
@@ -150,7 +143,7 @@ impl File {
let actual_index_checksum = self
.verify_checksum(
- progress.add_child_with_id(
+ &mut progress.add_child_with_id(
format!("{}: checksum", self.path.display()),
integrity::ProgressId::ChecksumBytes.into(),
),
@@ -176,7 +169,7 @@ impl File {
let mut pack_ids_and_offsets = Vec::with_capacity(self.num_objects as usize);
{
let order_start = Instant::now();
- let mut progress = progress.add_child_with_id("checking oid order", gix_features::progress::UNKNOWN);
+ let mut progress = progress.add_child_with_id("checking oid order".into(), gix_features::progress::UNKNOWN);
progress.init(
Some(self.num_objects as usize),
gix_features::progress::count("objects"),
@@ -238,8 +231,10 @@ impl File {
let multi_index_entries_to_check = &pack_ids_slice[..slice_end];
{
let offset_start = Instant::now();
- let mut offsets_progress =
- progress.add_child_with_id("verify object offsets", integrity::ProgressId::ObjectOffsets.into());
+ let mut offsets_progress = progress.add_child_with_id(
+ "verify object offsets".into(),
+ integrity::ProgressId::ObjectOffsets.into(),
+ );
offsets_progress.init(
Some(pack_ids_and_offsets.len()),
gix_features::progress::count("objects"),
@@ -278,7 +273,6 @@ impl File {
let crate::bundle::verify::integrity::Outcome {
actual_index_checksum: _,
pack_traverse_outcome,
- progress: returned_progress,
} = bundle
.verify_integrity(progress, should_interrupt, options.clone())
.map_err(|err| {
@@ -315,7 +309,6 @@ impl File {
Interrupted => Interrupted,
}
})?;
- progress = returned_progress;
pack_traverse_statistics.push(pack_traverse_outcome);
}
}
@@ -325,13 +318,12 @@ impl File {
"BUG: our slicing should allow to visit all objects"
);
- progress.set_name("Validating multi-pack");
+ progress.set_name("Validating multi-pack".into());
progress.show_throughput(operation_start);
Ok(integrity::Outcome {
actual_index_checksum,
pack_traverse_statistics,
- progress,
})
}
}
diff --git a/vendor/gix-pack/src/multi_index/write.rs b/vendor/gix-pack/src/multi_index/write.rs
index 9002af9eb..881033091 100644
--- a/vendor/gix-pack/src/multi_index/write.rs
+++ b/vendor/gix-pack/src/multi_index/write.rs
@@ -5,7 +5,7 @@ use std::{
time::{Instant, SystemTime},
};
-use gix_features::progress::Progress;
+use gix_features::progress::{Count, DynNestedProgress, Progress};
use crate::multi_index;
@@ -40,11 +40,9 @@ pub struct Options {
}
/// The result of [`multi_index::File::write_from_index_paths()`].
-pub struct Outcome<P> {
+pub struct Outcome {
/// The calculated multi-index checksum of the file at `multi_index_path`.
pub multi_index_checksum: gix_hash::ObjectId,
- /// The input progress
- pub progress: P,
}
/// The progress ids used in [`write_from_index_paths()`][multi_index::File::write_from_index_paths()].
@@ -79,16 +77,13 @@ impl multi_index::File {
/// Create a new multi-index file for writing to `out` from the pack index files at `index_paths`.
///
/// Progress is sent to `progress` and interruptions checked via `should_interrupt`.
- pub fn write_from_index_paths<P>(
+ pub fn write_from_index_paths(
mut index_paths: Vec<PathBuf>,
- out: impl std::io::Write,
- mut progress: P,
+ out: &mut dyn std::io::Write,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
Options { object_hash }: Options,
- ) -> Result<Outcome<P>, Error>
- where
- P: Progress,
- {
+ ) -> Result<Outcome, Error> {
let out = gix_features::hash::Write::new(out, object_hash);
let (index_paths_sorted, index_filenames_sorted) = {
index_paths.sort();
@@ -102,8 +97,10 @@ impl multi_index::File {
let entries = {
let mut entries = Vec::new();
let start = Instant::now();
- let mut progress =
- progress.add_child_with_id("Collecting entries", ProgressId::FromPathsCollectingEntries.into());
+ let mut progress = progress.add_child_with_id(
+ "Collecting entries".into(),
+ ProgressId::FromPathsCollectingEntries.into(),
+ );
progress.init(Some(index_paths_sorted.len()), gix_features::progress::count("indices"));
// This could be parallelized… but it's probably not worth it unless you have 500mio objects.
@@ -129,7 +126,7 @@ impl multi_index::File {
progress.show_throughput(start);
let start = Instant::now();
- progress.set_name("Deduplicate");
+ progress.set_name("Deduplicate".into());
progress.init(Some(entries.len()), gix_features::progress::count("entries"));
entries.sort_by(|l, r| {
l.id.cmp(&r.id)
@@ -168,7 +165,8 @@ impl multi_index::File {
);
}
- let mut write_progress = progress.add_child_with_id("Writing multi-index", ProgressId::BytesWritten.into());
+ let mut write_progress =
+ progress.add_child_with_id("Writing multi-index".into(), ProgressId::BytesWritten.into());
let write_start = Instant::now();
write_progress.init(
Some(cf.planned_storage_size() as usize + Self::HEADER_LEN),
@@ -187,7 +185,7 @@ impl multi_index::File {
)?;
{
- progress.set_name("Writing chunks");
+ progress.set_name("Writing chunks".into());
progress.init(Some(cf.num_chunks()), gix_features::progress::count("chunks"));
let mut chunk_write = cf.into_write(&mut out, bytes_written)?;
@@ -220,14 +218,11 @@ impl multi_index::File {
out.inner.inner.write_all(multi_index_checksum.as_slice())?;
out.progress.show_throughput(write_start);
- Ok(Outcome {
- multi_index_checksum,
- progress,
- })
+ Ok(Outcome { multi_index_checksum })
}
fn write_header(
- mut out: impl std::io::Write,
+ out: &mut dyn std::io::Write,
num_chunks: u8,
num_indices: u32,
object_hash: gix_hash::Kind,
diff --git a/vendor/gix-pack/src/verify.rs b/vendor/gix-pack/src/verify.rs
index f985c8657..d502ada38 100644
--- a/vendor/gix-pack/src/verify.rs
+++ b/vendor/gix-pack/src/verify.rs
@@ -33,7 +33,7 @@ pub fn checksum_on_disk_or_mmap(
data: &[u8],
expected: gix_hash::ObjectId,
object_hash: gix_hash::Kind,
- mut progress: impl Progress,
+ progress: &mut dyn Progress,
should_interrupt: &AtomicBool,
) -> Result<gix_hash::ObjectId, checksum::Error> {
let data_len_without_trailer = data.len() - object_hash.len_in_bytes();
@@ -41,7 +41,7 @@ pub fn checksum_on_disk_or_mmap(
data_path,
data_len_without_trailer,
object_hash,
- &mut progress,
+ progress,
should_interrupt,
) {
Ok(id) => id,