summaryrefslogtreecommitdiffstats
path: root/vendor/gix-pack/src/data/output
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
commitc23a457e72abe608715ac76f076f47dc42af07a5 (patch)
tree2772049aaf84b5c9d0ed12ec8d86812f7a7904b6 /vendor/gix-pack/src/data/output
parentReleasing progress-linux version 1.73.0+dfsg1-1~progress7.99u1. (diff)
downloadrustc-c23a457e72abe608715ac76f076f47dc42af07a5.tar.xz
rustc-c23a457e72abe608715ac76f076f47dc42af07a5.zip
Merging upstream version 1.74.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/gix-pack/src/data/output')
-rw-r--r--vendor/gix-pack/src/data/output/count/mod.rs2
-rw-r--r--vendor/gix-pack/src/data/output/count/objects/mod.rs138
-rw-r--r--vendor/gix-pack/src/data/output/count/objects/reduce.rs23
-rw-r--r--vendor/gix-pack/src/data/output/count/objects/types.rs10
-rw-r--r--vendor/gix-pack/src/data/output/entry/iter_from_counts.rs27
-rw-r--r--vendor/gix-pack/src/data/output/entry/mod.rs9
6 files changed, 82 insertions, 127 deletions
diff --git a/vendor/gix-pack/src/data/output/count/mod.rs b/vendor/gix-pack/src/data/output/count/mod.rs
index 0c33abd97..481ff65d3 100644
--- a/vendor/gix-pack/src/data/output/count/mod.rs
+++ b/vendor/gix-pack/src/data/output/count/mod.rs
@@ -45,5 +45,5 @@ pub use objects_impl::{objects, objects_unthreaded};
///
pub mod objects {
- pub use super::objects_impl::{Error, ObjectExpansion, Options, Outcome, Result};
+ pub use super::objects_impl::{Error, ObjectExpansion, Options, Outcome};
}
diff --git a/vendor/gix-pack/src/data/output/count/objects/mod.rs b/vendor/gix-pack/src/data/output/count/objects/mod.rs
index a13e41146..24810577c 100644
--- a/vendor/gix-pack/src/data/output/count/objects/mod.rs
+++ b/vendor/gix-pack/src/data/output/count/objects/mod.rs
@@ -1,12 +1,9 @@
-use std::{
- cell::RefCell,
- sync::{atomic::AtomicBool, Arc},
-};
+use std::{cell::RefCell, sync::atomic::AtomicBool};
-use gix_features::{parallel, progress::Progress};
+use gix_features::parallel;
use gix_hash::ObjectId;
-use crate::{data::output, find};
+use crate::data::output;
pub(in crate::data::output::count::objects_impl) mod reduce;
mod util;
@@ -16,9 +13,6 @@ pub use types::{Error, ObjectExpansion, Options, Outcome};
mod tree;
-/// The return type used by [`objects()`].
-pub type Result<E1, E2> = std::result::Result<(Vec<output::Count>, Outcome), Error<E1, E2>>;
-
/// Generate [`Count`][output::Count]s from input `objects` with object expansion based on [`options`][Options]
/// to learn which objects would would constitute a pack. This step is required to know exactly how many objects would
/// be in a pack while keeping data around to avoid minimize object database access.
@@ -29,29 +23,25 @@ pub type Result<E1, E2> = std::result::Result<(Vec<output::Count>, Outcome), Err
/// * `objects_ids`
/// * A list of objects ids to add to the pack. Duplication checks are performed so no object is ever added to a pack twice.
/// * Objects may be expanded based on the provided [`options`][Options]
-/// * `progress`
-/// * a way to obtain progress information
+/// * `objects`
+/// * count the amount of objects we encounter
/// * `should_interrupt`
/// * A flag that is set to true if the operation should stop
/// * `options`
/// * more configuration
-pub fn objects<Find, Iter, IterErr, Oid>(
+pub fn objects<Find>(
db: Find,
- objects_ids: Iter,
- progress: impl Progress,
+ objects_ids: Box<dyn Iterator<Item = Result<ObjectId, Box<dyn std::error::Error + Send + Sync + 'static>>> + Send>,
+ objects: &dyn gix_features::progress::Count,
should_interrupt: &AtomicBool,
Options {
thread_limit,
input_object_expansion,
chunk_size,
}: Options,
-) -> Result<find::existing::Error<Find::Error>, IterErr>
+) -> Result<(Vec<output::Count>, Outcome), Error>
where
Find: crate::Find + Send + Clone,
- <Find as crate::Find>::Error: Send,
- Iter: Iterator<Item = std::result::Result<Oid, IterErr>> + Send,
- Oid: Into<ObjectId> + Send,
- IterErr: std::error::Error + Send,
{
let lower_bound = objects_ids.size_hint().0;
let (chunk_size, thread_limit, _) = parallel::optimize_chunk_size_and_thread_limit(
@@ -65,71 +55,59 @@ where
size: chunk_size,
};
let seen_objs = gix_hashtable::sync::ObjectIdMap::default();
- let progress = Arc::new(parking_lot::Mutex::new(progress));
+ let objects = objects.counter();
parallel::in_parallel(
chunks,
thread_limit,
{
- let progress = Arc::clone(&progress);
- move |n| {
+ move |_| {
(
Vec::new(), // object data buffer
Vec::new(), // object data buffer 2 to hold two objects at a time
- {
- let mut p = progress
- .lock()
- .add_child_with_id(format!("thread {n}"), gix_features::progress::UNKNOWN);
- p.init(None, gix_features::progress::count("objects"));
- p
- },
+ objects.clone(),
)
}
},
{
let seen_objs = &seen_objs;
- move |oids: Vec<std::result::Result<Oid, IterErr>>, (buf1, buf2, progress)| {
+ move |oids: Vec<_>, (buf1, buf2, objects)| {
expand::this(
&db,
input_object_expansion,
seen_objs,
- oids,
+ &mut oids.into_iter(),
buf1,
buf2,
- progress,
+ objects,
should_interrupt,
true, /*allow pack lookups*/
)
}
},
- reduce::Statistics::new(progress),
+ reduce::Statistics::new(),
)
}
/// Like [`objects()`] but using a single thread only to mostly save on the otherwise required overhead.
-pub fn objects_unthreaded<Find, IterErr, Oid>(
- db: Find,
- object_ids: impl Iterator<Item = std::result::Result<Oid, IterErr>>,
- mut progress: impl Progress,
+pub fn objects_unthreaded(
+ db: &dyn crate::Find,
+ object_ids: &mut dyn Iterator<Item = Result<ObjectId, Box<dyn std::error::Error + Send + Sync + 'static>>>,
+ objects: &dyn gix_features::progress::Count,
should_interrupt: &AtomicBool,
input_object_expansion: ObjectExpansion,
-) -> Result<find::existing::Error<Find::Error>, IterErr>
-where
- Find: crate::Find,
- Oid: Into<ObjectId>,
- IterErr: std::error::Error,
-{
+) -> Result<(Vec<output::Count>, Outcome), Error> {
let seen_objs = RefCell::new(gix_hashtable::HashSet::default());
let (mut buf1, mut buf2) = (Vec::new(), Vec::new());
expand::this(
- &db,
+ db,
input_object_expansion,
&seen_objs,
object_ids,
&mut buf1,
&mut buf2,
- &mut progress,
+ &objects.counter(),
should_interrupt,
false, /*allow pack lookups*/
)
@@ -138,7 +116,6 @@ where
mod expand {
use std::sync::atomic::{AtomicBool, Ordering};
- use gix_features::progress::Progress;
use gix_hash::{oid, ObjectId};
use gix_object::{CommitRefIter, TagRefIter};
@@ -149,26 +126,21 @@ mod expand {
};
use crate::{
data::{output, output::count::PackLocation},
- find, FindExt,
+ FindExt,
};
#[allow(clippy::too_many_arguments)]
- pub fn this<Find, IterErr, Oid>(
- db: &Find,
+ pub fn this(
+ db: &dyn crate::Find,
input_object_expansion: ObjectExpansion,
seen_objs: &impl util::InsertImmutable,
- oids: impl IntoIterator<Item = std::result::Result<Oid, IterErr>>,
+ oids: &mut dyn Iterator<Item = Result<ObjectId, Box<dyn std::error::Error + Send + Sync + 'static>>>,
buf1: &mut Vec<u8>,
#[allow(clippy::ptr_arg)] buf2: &mut Vec<u8>,
- progress: &mut impl Progress,
+ objects: &gix_features::progress::AtomicStep,
should_interrupt: &AtomicBool,
allow_pack_lookups: bool,
- ) -> super::Result<find::existing::Error<Find::Error>, IterErr>
- where
- Find: crate::Find,
- Oid: Into<ObjectId>,
- IterErr: std::error::Error,
- {
+ ) -> Result<(Vec<output::Count>, Outcome), Error> {
use ObjectExpansion::*;
let mut out = Vec::new();
@@ -180,13 +152,13 @@ mod expand {
let mut outcome = Outcome::default();
let stats = &mut outcome;
- for id in oids.into_iter() {
+ for id in oids {
if should_interrupt.load(Ordering::Relaxed) {
return Err(Error::Interrupted);
}
- let id = id.map(|oid| oid.into()).map_err(Error::InputIteration)?;
- let (obj, location) = db.find(id, buf1)?;
+ let id = id.map_err(Error::InputIteration)?;
+ let (obj, location) = db.find(&id, buf1)?;
stats.input_objects += 1;
match input_object_expansion {
TreeAdditionsComparedToAncestor => {
@@ -196,14 +168,14 @@ mod expand {
let mut id = id.to_owned();
loop {
- push_obj_count_unique(&mut out, seen_objs, &id, location, progress, stats, false);
+ push_obj_count_unique(&mut out, seen_objs, &id, location, objects, stats, false);
match obj.kind {
Tree | Blob => break,
Tag => {
id = TagRefIter::from_bytes(obj.data)
.target_id()
.expect("every tag has a target");
- let tmp = db.find(id, buf1)?;
+ let tmp = db.find(&id, buf1)?;
obj = tmp.0;
location = tmp.1;
@@ -225,14 +197,14 @@ mod expand {
Err(err) => return Err(Error::CommitDecode(err)),
}
}
- let (obj, location) = db.find(tree_id, buf1)?;
+ let (obj, location) = db.find(&tree_id, buf1)?;
push_obj_count_unique(
- &mut out, seen_objs, &tree_id, location, progress, stats, true,
+ &mut out, seen_objs, &tree_id, location, objects, stats, true,
);
gix_object::TreeRefIter::from_bytes(obj.data)
};
- let objects = if parent_commit_ids.is_empty() {
+ let objects_ref = if parent_commit_ids.is_empty() {
traverse_delegate.clear();
gix_traverse::tree::breadthfirst(
current_tree_iter,
@@ -241,7 +213,7 @@ mod expand {
stats.decoded_objects += 1;
match db.find(oid, buf).ok() {
Some((obj, location)) => {
- progress.inc();
+ objects.fetch_add(1, Ordering::Relaxed);
stats.expanded_objects += 1;
out.push(output::Count::from_data(oid, location));
obj.try_into_tree_iter()
@@ -259,20 +231,20 @@ mod expand {
let (parent_commit_obj, location) = db.find(commit_id, buf2)?;
push_obj_count_unique(
- &mut out, seen_objs, commit_id, location, progress, stats, true,
+ &mut out, seen_objs, commit_id, location, objects, stats, true,
);
CommitRefIter::from_bytes(parent_commit_obj.data)
.tree_id()
.expect("every commit has a tree")
};
let parent_tree = {
- let (parent_tree_obj, location) = db.find(parent_tree_id, buf2)?;
+ let (parent_tree_obj, location) = db.find(&parent_tree_id, buf2)?;
push_obj_count_unique(
&mut out,
seen_objs,
&parent_tree_id,
location,
- progress,
+ objects,
stats,
true,
);
@@ -294,8 +266,8 @@ mod expand {
}
&changes_delegate.objects
};
- for id in objects.iter() {
- out.push(id_to_count(db, buf2, id, progress, stats, allow_pack_lookups));
+ for id in objects_ref.iter() {
+ out.push(id_to_count(db, buf2, id, objects, stats, allow_pack_lookups));
}
break;
}
@@ -307,7 +279,7 @@ mod expand {
let mut id = id;
let mut obj = (obj, location);
loop {
- push_obj_count_unique(&mut out, seen_objs, &id, obj.1.clone(), progress, stats, false);
+ push_obj_count_unique(&mut out, seen_objs, &id, obj.1.clone(), objects, stats, false);
match obj.0.kind {
Tree => {
traverse_delegate.clear();
@@ -318,7 +290,7 @@ mod expand {
stats.decoded_objects += 1;
match db.find(oid, buf).ok() {
Some((obj, location)) => {
- progress.inc();
+ objects.fetch_add(1, Ordering::Relaxed);
stats.expanded_objects += 1;
out.push(output::Count::from_data(oid, location));
obj.try_into_tree_iter()
@@ -330,7 +302,7 @@ mod expand {
)
.map_err(Error::TreeTraverse)?;
for id in &traverse_delegate.non_trees {
- out.push(id_to_count(db, buf1, id, progress, stats, allow_pack_lookups));
+ out.push(id_to_count(db, buf1, id, objects, stats, allow_pack_lookups));
}
break;
}
@@ -339,7 +311,7 @@ mod expand {
.tree_id()
.expect("every commit has a tree");
stats.expanded_objects += 1;
- obj = db.find(id, buf1)?;
+ obj = db.find(&id, buf1)?;
continue;
}
Blob => break,
@@ -348,13 +320,13 @@ mod expand {
.target_id()
.expect("every tag has a target");
stats.expanded_objects += 1;
- obj = db.find(id, buf1)?;
+ obj = db.find(&id, buf1)?;
continue;
}
}
}
}
- AsIs => push_obj_count_unique(&mut out, seen_objs, &id, location, progress, stats, false),
+ AsIs => push_obj_count_unique(&mut out, seen_objs, &id, location, objects, stats, false),
}
}
outcome.total_objects = out.len();
@@ -367,13 +339,13 @@ mod expand {
all_seen: &impl util::InsertImmutable,
id: &oid,
location: Option<crate::data::entry::Location>,
- progress: &mut impl Progress,
+ objects: &gix_features::progress::AtomicStep,
statistics: &mut Outcome,
count_expanded: bool,
) {
let inserted = all_seen.insert(id.to_owned());
if inserted {
- progress.inc();
+ objects.fetch_add(1, Ordering::Relaxed);
statistics.decoded_objects += 1;
if count_expanded {
statistics.expanded_objects += 1;
@@ -383,15 +355,15 @@ mod expand {
}
#[inline]
- fn id_to_count<Find: crate::Find>(
- db: &Find,
+ fn id_to_count(
+ db: &dyn crate::Find,
buf: &mut Vec<u8>,
id: &oid,
- progress: &mut impl Progress,
+ objects: &gix_features::progress::AtomicStep,
statistics: &mut Outcome,
allow_pack_lookups: bool,
) -> output::Count {
- progress.inc();
+ objects.fetch_add(1, Ordering::Relaxed);
statistics.expanded_objects += 1;
output::Count {
id: id.to_owned(),
diff --git a/vendor/gix-pack/src/data/output/count/objects/reduce.rs b/vendor/gix-pack/src/data/output/count/objects/reduce.rs
index c6a61d467..03144b60f 100644
--- a/vendor/gix-pack/src/data/output/count/objects/reduce.rs
+++ b/vendor/gix-pack/src/data/output/count/objects/reduce.rs
@@ -1,35 +1,27 @@
-use std::{marker::PhantomData, sync::Arc};
+use std::marker::PhantomData;
-use gix_features::{parallel, progress::Progress};
+use gix_features::parallel;
use super::Outcome;
use crate::data::output;
-pub struct Statistics<E, P> {
+pub struct Statistics<E> {
total: Outcome,
counts: Vec<output::Count>,
- progress: Arc<parking_lot::Mutex<P>>,
_err: PhantomData<E>,
}
-impl<E, P> Statistics<E, P>
-where
- P: Progress,
-{
- pub fn new(progress: Arc<parking_lot::Mutex<P>>) -> Self {
+impl<E> Statistics<E> {
+ pub fn new() -> Self {
Statistics {
total: Default::default(),
counts: Default::default(),
- progress,
- _err: PhantomData::default(),
+ _err: PhantomData,
}
}
}
-impl<E, P> parallel::Reduce for Statistics<E, P>
-where
- P: Progress,
-{
+impl<E> parallel::Reduce for Statistics<E> {
type Input = Result<(Vec<output::Count>, Outcome), E>;
type FeedProduce = ();
type Output = (Vec<output::Count>, Outcome);
@@ -38,7 +30,6 @@ where
fn feed(&mut self, item: Self::Input) -> Result<Self::FeedProduce, Self::Error> {
let (counts, stats) = item?;
self.total.aggregate(stats);
- self.progress.lock().inc_by(counts.len());
self.counts.extend(counts);
Ok(())
}
diff --git a/vendor/gix-pack/src/data/output/count/objects/types.rs b/vendor/gix-pack/src/data/output/count/objects/types.rs
index f39a24ee4..4b9ecea20 100644
--- a/vendor/gix-pack/src/data/output/count/objects/types.rs
+++ b/vendor/gix-pack/src/data/output/count/objects/types.rs
@@ -80,17 +80,13 @@ impl Default for Options {
/// The error returned by the pack generation iterator [`bytes::FromEntriesIter`][crate::data::output::bytes::FromEntriesIter].
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
-pub enum Error<FindErr, IterErr>
-where
- FindErr: std::error::Error + 'static,
- IterErr: std::error::Error + 'static,
-{
+pub enum Error {
#[error(transparent)]
CommitDecode(gix_object::decode::Error),
#[error(transparent)]
- FindExisting(#[from] FindErr),
+ FindExisting(#[from] crate::find::existing::Error),
#[error(transparent)]
- InputIteration(IterErr),
+ InputIteration(Box<dyn std::error::Error + Send + Sync + 'static>),
#[error(transparent)]
TreeTraverse(gix_traverse::tree::breadthfirst::Error),
#[error(transparent)]
diff --git a/vendor/gix-pack/src/data/output/entry/iter_from_counts.rs b/vendor/gix-pack/src/data/output/entry/iter_from_counts.rs
index dbe8b0b95..2bebf5b20 100644
--- a/vendor/gix-pack/src/data/output/entry/iter_from_counts.rs
+++ b/vendor/gix-pack/src/data/output/entry/iter_from_counts.rs
@@ -1,6 +1,7 @@
pub(crate) mod function {
use std::{cmp::Ordering, sync::Arc};
+ use gix_features::progress::prodash::{Count, DynNestedProgress};
use gix_features::{parallel, parallel::SequenceId, progress::Progress};
use super::{reduce, util, Error, Mode, Options, Outcome, ProgressId};
@@ -38,7 +39,7 @@ pub(crate) mod function {
pub fn iter_from_counts<Find>(
mut counts: Vec<output::Count>,
db: Find,
- mut progress: impl Progress + 'static,
+ mut progress: Box<dyn DynNestedProgress + 'static>,
Options {
version,
mode,
@@ -46,11 +47,10 @@ pub(crate) mod function {
thread_limit,
chunk_size,
}: Options,
- ) -> impl Iterator<Item = Result<(SequenceId, Vec<output::Entry>), Error<Find::Error>>>
- + parallel::reduce::Finalize<Reduce = reduce::Statistics<Error<Find::Error>>>
+ ) -> impl Iterator<Item = Result<(SequenceId, Vec<output::Entry>), Error>>
+ + parallel::reduce::Finalize<Reduce = reduce::Statistics<Error>>
where
Find: crate::Find + Send + Clone + 'static,
- <Find as crate::Find>::Error: Send,
{
assert!(
matches!(version, crate::data::Version::V2),
@@ -60,7 +60,7 @@ pub(crate) mod function {
parallel::optimize_chunk_size_and_thread_limit(chunk_size, Some(counts.len()), thread_limit, None);
{
let progress = Arc::new(parking_lot::Mutex::new(
- progress.add_child_with_id("resolving", ProgressId::ResolveCounts.into()),
+ progress.add_child_with_id("resolving".into(), ProgressId::ResolveCounts.into()),
));
progress.lock().init(None, gix_features::progress::count("counts"));
let enough_counts_present = counts.len() > 4_000;
@@ -79,7 +79,7 @@ pub(crate) mod function {
use crate::data::output::count::PackLocation::*;
match count.entry_pack_location {
LookedUp(_) => continue,
- NotLookedUp => count.entry_pack_location = LookedUp(db.location_by_oid(count.id, buf)),
+ NotLookedUp => count.entry_pack_location = LookedUp(db.location_by_oid(&count.id, buf)),
}
}
progress.lock().inc_by(chunk_size);
@@ -93,7 +93,7 @@ pub(crate) mod function {
}
let counts_range_by_pack_id = match mode {
Mode::PackCopyAndBaseObjects => {
- let mut progress = progress.add_child_with_id("sorting", ProgressId::SortEntries.into());
+ let mut progress = progress.add_child_with_id("sorting".into(), ProgressId::SortEntries.into());
progress.init(Some(counts.len()), gix_features::progress::count("counts"));
let start = std::time::Instant::now();
@@ -204,7 +204,7 @@ pub(crate) mod function {
stats.objects_copied_from_pack += 1;
entry
}
- None => match db.try_find(count.id, buf).map_err(Error::FindExisting)? {
+ None => match db.try_find(&count.id, buf).map_err(Error::FindExisting)? {
Some((obj, _location)) => {
stats.decoded_and_recompressed_objects += 1;
output::Entry::from_data(count, &obj)
@@ -216,7 +216,7 @@ pub(crate) mod function {
},
}
}
- None => match db.try_find(count.id, buf).map_err(Error::FindExisting)? {
+ None => match db.try_find(&count.id, buf).map_err(Error::FindExisting)? {
Some((obj, _location)) => {
stats.decoded_and_recompressed_objects += 1;
output::Entry::from_data(count, &obj)
@@ -288,7 +288,7 @@ mod reduce {
fn default() -> Self {
Statistics {
total: Default::default(),
- _err: PhantomData::default(),
+ _err: PhantomData,
}
}
}
@@ -395,12 +395,9 @@ mod types {
/// The error returned by the pack generation function [`iter_from_counts()`][crate::data::output::entry::iter_from_counts()].
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
- pub enum Error<FindErr>
- where
- FindErr: std::error::Error + 'static,
- {
+ pub enum Error {
#[error(transparent)]
- FindExisting(FindErr),
+ FindExisting(crate::find::Error),
#[error(transparent)]
NewEntry(#[from] entry::Error),
}
diff --git a/vendor/gix-pack/src/data/output/entry/mod.rs b/vendor/gix-pack/src/data/output/entry/mod.rs
index a94720047..4ab4879eb 100644
--- a/vendor/gix-pack/src/data/output/entry/mod.rs
+++ b/vendor/gix-pack/src/data/output/entry/mod.rs
@@ -66,15 +66,14 @@ impl output::Entry {
potential_bases: &[output::Count],
bases_index_offset: usize,
pack_offset_to_oid: Option<impl FnMut(u32, u64) -> Option<ObjectId>>,
- target_version: crate::data::Version,
+ target_version: data::Version,
) -> Option<Result<Self, Error>> {
if entry.version != target_version {
return None;
};
let pack_offset_must_be_zero = 0;
- let pack_entry =
- crate::data::Entry::from_bytes(&entry.data, pack_offset_must_be_zero, count.id.as_slice().len());
+ let pack_entry = data::Entry::from_bytes(&entry.data, pack_offset_must_be_zero, count.id.as_slice().len());
use crate::data::entry::Header::*;
match pack_entry.header {
@@ -153,9 +152,9 @@ impl output::Entry {
/// This information is known to the one calling the method.
pub fn to_entry_header(
&self,
- version: crate::data::Version,
+ version: data::Version,
index_to_base_distance: impl FnOnce(usize) -> u64,
- ) -> crate::data::entry::Header {
+ ) -> data::entry::Header {
assert!(
matches!(version, data::Version::V2),
"we can only write V2 pack entries for now"