summaryrefslogtreecommitdiffstats
path: root/vendor/gix-pack/src/bundle
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:41:41 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:41:41 +0000
commit10ee2acdd26a7f1298c6f6d6b7af9b469fe29b87 (patch)
treebdffd5d80c26cf4a7a518281a204be1ace85b4c1 /vendor/gix-pack/src/bundle
parentReleasing progress-linux version 1.70.0+dfsg1-9~progress7.99u1. (diff)
downloadrustc-10ee2acdd26a7f1298c6f6d6b7af9b469fe29b87.tar.xz
rustc-10ee2acdd26a7f1298c6f6d6b7af9b469fe29b87.zip
Merging upstream version 1.70.0+dfsg2.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/gix-pack/src/bundle')
-rw-r--r--vendor/gix-pack/src/bundle/find.rs63
-rw-r--r--vendor/gix-pack/src/bundle/init.rs46
-rw-r--r--vendor/gix-pack/src/bundle/mod.rs60
-rw-r--r--vendor/gix-pack/src/bundle/write/error.rs17
-rw-r--r--vendor/gix-pack/src/bundle/write/mod.rs378
-rw-r--r--vendor/gix-pack/src/bundle/write/types.rs120
6 files changed, 684 insertions, 0 deletions
diff --git a/vendor/gix-pack/src/bundle/find.rs b/vendor/gix-pack/src/bundle/find.rs
new file mode 100644
index 000000000..d39ed49a9
--- /dev/null
+++ b/vendor/gix-pack/src/bundle/find.rs
@@ -0,0 +1,63 @@
+impl crate::Bundle {
+ /// Find an object with the given [`ObjectId`][gix_hash::ObjectId] and place its data into `out`.
+ ///
+ /// [`cache`][crate::cache::DecodeEntry] is used to accelerate the lookup.
+ ///
+ /// **Note** that ref deltas are automatically resolved within this pack only, which makes this implementation unusable
+ /// for thin packs, which by now are expected to be resolved already.
+ pub fn find<'a>(
+ &self,
+ id: impl AsRef<gix_hash::oid>,
+ out: &'a mut Vec<u8>,
+ cache: &mut impl crate::cache::DecodeEntry,
+ ) -> Result<Option<(gix_object::Data<'a>, crate::data::entry::Location)>, crate::data::decode::Error> {
+ let idx = match self.index.lookup(id) {
+ Some(idx) => idx,
+ None => return Ok(None),
+ };
+ self.get_object_by_index(idx, out, cache).map(Some)
+ }
+
+ /// Special-use function to get an object given an index previously returned from
+ /// internal_find_pack_index.
+ ///
+ /// # Panics
+ ///
+ /// If `index` is out of bounds.
+ pub fn get_object_by_index<'a>(
+ &self,
+ idx: u32,
+ out: &'a mut Vec<u8>,
+ cache: &mut impl crate::cache::DecodeEntry,
+ ) -> Result<(gix_object::Data<'a>, crate::data::entry::Location), crate::data::decode::Error> {
+ let ofs = self.index.pack_offset_at_index(idx);
+ let pack_entry = self.pack.entry(ofs);
+ let header_size = pack_entry.header_size();
+ self.pack
+ .decode_entry(
+ pack_entry,
+ out,
+ |id, _out| {
+ self.index.lookup(id).map(|idx| {
+ crate::data::decode::entry::ResolvedBase::InPack(
+ self.pack.entry(self.index.pack_offset_at_index(idx)),
+ )
+ })
+ },
+ cache,
+ )
+ .map(move |r| {
+ (
+ gix_object::Data {
+ kind: r.kind,
+ data: out.as_slice(),
+ },
+ crate::data::entry::Location {
+ pack_id: self.pack.id,
+ pack_offset: ofs,
+ entry_size: r.compressed_size + header_size,
+ },
+ )
+ })
+ }
+}
diff --git a/vendor/gix-pack/src/bundle/init.rs b/vendor/gix-pack/src/bundle/init.rs
new file mode 100644
index 000000000..3ba5257ed
--- /dev/null
+++ b/vendor/gix-pack/src/bundle/init.rs
@@ -0,0 +1,46 @@
+use std::path::{Path, PathBuf};
+
+use crate::Bundle;
+
+/// Returned by [`Bundle::at()`]
+#[derive(thiserror::Error, Debug)]
+#[allow(missing_docs)]
+pub enum Error {
+ #[error("An 'idx' extension is expected of an index file: '{0}'")]
+ InvalidPath(PathBuf),
+ #[error(transparent)]
+ Pack(#[from] crate::data::header::decode::Error),
+ #[error(transparent)]
+ Index(#[from] crate::index::init::Error),
+}
+
+/// Initialization
+impl Bundle {
+ /// Create a `Bundle` from `path`, which is either a pack file _(*.pack)_ or an index file _(*.idx)_.
+ ///
+ /// The corresponding complementary file is expected to be present.
+ ///
+ /// The `object_hash` is a way to read (and write) the same file format with different hashes, as the hash kind
+ /// isn't stored within the file format itself.
+ pub fn at(path: impl AsRef<Path>, object_hash: gix_hash::Kind) -> Result<Self, Error> {
+ Self::at_inner(path.as_ref(), object_hash)
+ }
+
+ fn at_inner(path: &Path, object_hash: gix_hash::Kind) -> Result<Self, Error> {
+ let ext = path
+ .extension()
+ .and_then(|e| e.to_str())
+ .ok_or_else(|| Error::InvalidPath(path.to_owned()))?;
+ Ok(match ext {
+ "idx" => Self {
+ index: crate::index::File::at(path, object_hash)?,
+ pack: crate::data::File::at(path.with_extension("pack"), object_hash)?,
+ },
+ "pack" => Self {
+ pack: crate::data::File::at(path, object_hash)?,
+ index: crate::index::File::at(path.with_extension("idx"), object_hash)?,
+ },
+ _ => return Err(Error::InvalidPath(path.to_owned())),
+ })
+ }
+}
diff --git a/vendor/gix-pack/src/bundle/mod.rs b/vendor/gix-pack/src/bundle/mod.rs
new file mode 100644
index 000000000..076b355d9
--- /dev/null
+++ b/vendor/gix-pack/src/bundle/mod.rs
@@ -0,0 +1,60 @@
+///
+pub mod init;
+
+mod find;
+///
+#[cfg(not(feature = "wasm"))]
+pub mod write;
+
+///
+pub mod verify {
+ use std::sync::atomic::AtomicBool;
+
+ use gix_features::progress::Progress;
+
+ ///
+ pub mod integrity {
+ /// Returned by [`Bundle::verify_integrity()`][crate::Bundle::verify_integrity()].
+ pub struct Outcome<P> {
+ /// The computed checksum of the index which matched the stored one.
+ pub actual_index_checksum: gix_hash::ObjectId,
+ /// The packs traversal outcome
+ pub pack_traverse_outcome: crate::index::traverse::Statistics,
+ /// The provided progress instance.
+ pub progress: P,
+ }
+ }
+
+ use crate::Bundle;
+
+ impl Bundle {
+ /// Similar to [`crate::index::File::verify_integrity()`] but more convenient to call as the presence of the
+ /// pack file is a given.
+ pub fn verify_integrity<C, P, F>(
+ &self,
+ progress: P,
+ should_interrupt: &AtomicBool,
+ options: crate::index::verify::integrity::Options<F>,
+ ) -> Result<integrity::Outcome<P>, crate::index::traverse::Error<crate::index::verify::integrity::Error>>
+ where
+ P: Progress,
+ C: crate::cache::DecodeEntry,
+ F: Fn() -> C + Send + Clone,
+ {
+ self.index
+ .verify_integrity(
+ Some(crate::index::verify::PackContext {
+ data: &self.pack,
+ options,
+ }),
+ progress,
+ should_interrupt,
+ )
+ .map(|o| integrity::Outcome {
+ actual_index_checksum: o.actual_index_checksum,
+ pack_traverse_outcome: o.pack_traverse_statistics.expect("pack is set"),
+ progress: o.progress,
+ })
+ }
+ }
+}
diff --git a/vendor/gix-pack/src/bundle/write/error.rs b/vendor/gix-pack/src/bundle/write/error.rs
new file mode 100644
index 000000000..883c34029
--- /dev/null
+++ b/vendor/gix-pack/src/bundle/write/error.rs
@@ -0,0 +1,17 @@
+use std::io;
+
+use gix_tempfile::handle::Writable;
+
+/// The error returned by [`Bundle::write_to_directory()`][crate::Bundle::write_to_directory()]
+#[derive(thiserror::Error, Debug)]
+#[allow(missing_docs)]
+pub enum Error {
+ #[error("An IO error occurred when reading the pack or creating a temporary file")]
+ Io(#[from] io::Error),
+ #[error(transparent)]
+ PackIter(#[from] crate::data::input::Error),
+ #[error("Could not move a temporary file into its desired place")]
+ Persist(#[from] gix_tempfile::handle::persist::Error<Writable>),
+ #[error(transparent)]
+ IndexWrite(#[from] crate::index::write::Error),
+}
diff --git a/vendor/gix-pack/src/bundle/write/mod.rs b/vendor/gix-pack/src/bundle/write/mod.rs
new file mode 100644
index 000000000..fc0284b53
--- /dev/null
+++ b/vendor/gix-pack/src/bundle/write/mod.rs
@@ -0,0 +1,378 @@
+use std::{
+ io,
+ io::Write,
+ marker::PhantomData,
+ path::{Path, PathBuf},
+ sync::{atomic::AtomicBool, Arc},
+};
+
+use gix_features::{interrupt, progress, progress::Progress};
+use gix_tempfile::{AutoRemove, ContainingDirectory};
+
+use crate::data;
+
+mod error;
+pub use error::Error;
+
+mod types;
+use types::{LockWriter, PassThrough};
+pub use types::{Options, Outcome};
+
+use crate::bundle::write::types::SharedTempFile;
+
+type ThinPackLookupFn = Box<dyn for<'a> FnMut(gix_hash::ObjectId, &'a mut Vec<u8>) -> Option<gix_object::Data<'a>>>;
+type ThinPackLookupFnSend =
+ Box<dyn for<'a> FnMut(gix_hash::ObjectId, &'a mut Vec<u8>) -> Option<gix_object::Data<'a>> + Send + 'static>;
+
+/// The progress ids used in [`write_to_directory()`][crate::Bundle::write_to_directory()].
+///
+/// Use this information to selectively extract the progress of interest in case the parent application has custom visualization.
+#[derive(Debug, Copy, Clone)]
+pub enum ProgressId {
+ /// The amount of bytes read from the input pack data file.
+ ReadPackBytes,
+ /// A root progress counting logical steps towards an index file on disk.
+ ///
+ /// Underneath will be more progress information related to actually producing the index.
+ IndexingSteps(PhantomData<crate::index::write::ProgressId>),
+}
+
+impl From<ProgressId> for gix_features::progress::Id {
+ fn from(v: ProgressId) -> Self {
+ match v {
+ ProgressId::ReadPackBytes => *b"BWRB",
+ ProgressId::IndexingSteps(_) => *b"BWCI",
+ }
+ }
+}
+
+impl crate::Bundle {
+ /// Given a `pack` data stream, write it along with a generated index into the `directory` if `Some` or discard all output if `None`.
+ ///
+ /// In the latter case, the functionality provided here is more a kind of pack data stream validation.
+ ///
+ /// * `progress` provides detailed progress information which can be discarded with [`gix_features::progress::Discard`].
+ /// * `should_interrupt` is checked regularly and when true, the whole operation will stop.
+ /// * `thin_pack_base_object_lookup_fn` If set, we expect to see a thin-pack with objects that reference their base object by object id which is
+ /// expected to exist in the object database the bundle is contained within.
+ /// `options` further configure how the task is performed.
+ ///
+ /// # Note
+ ///
+ /// * the resulting pack may be empty, that is, contains zero objects in some situations. This is a valid reply by a server and should
+ /// be accounted for.
+ /// - Empty packs always have the same name and not handling this case will result in at most one superfluous pack.
+ pub fn write_to_directory<P>(
+ pack: impl io::BufRead,
+ directory: Option<impl AsRef<Path>>,
+ mut progress: P,
+ should_interrupt: &AtomicBool,
+ thin_pack_base_object_lookup_fn: Option<ThinPackLookupFn>,
+ options: Options,
+ ) -> Result<Outcome, Error>
+ where
+ P: Progress,
+ {
+ let mut read_progress = progress.add_child_with_id("read pack", ProgressId::ReadPackBytes.into());
+ read_progress.init(None, progress::bytes());
+ let pack = progress::Read {
+ inner: pack,
+ progress: progress::ThroughputOnDrop::new(read_progress),
+ };
+
+ let object_hash = options.object_hash;
+ let data_file = Arc::new(parking_lot::Mutex::new(io::BufWriter::with_capacity(
+ 64 * 1024,
+ match directory.as_ref() {
+ Some(directory) => gix_tempfile::new(directory, ContainingDirectory::Exists, AutoRemove::Tempfile)?,
+ None => gix_tempfile::new(std::env::temp_dir(), ContainingDirectory::Exists, AutoRemove::Tempfile)?,
+ },
+ )));
+ let (pack_entries_iter, pack_version): (
+ Box<dyn Iterator<Item = Result<data::input::Entry, data::input::Error>>>,
+ _,
+ ) = match thin_pack_base_object_lookup_fn {
+ Some(thin_pack_lookup_fn) => {
+ let pack = interrupt::Read {
+ inner: pack,
+ should_interrupt,
+ };
+ let buffered_pack = io::BufReader::new(pack);
+ let pack_entries_iter = data::input::LookupRefDeltaObjectsIter::new(
+ data::input::BytesToEntriesIter::new_from_header(
+ buffered_pack,
+ options.iteration_mode,
+ data::input::EntryDataMode::KeepAndCrc32,
+ object_hash,
+ )?,
+ thin_pack_lookup_fn,
+ );
+ let pack_version = pack_entries_iter.inner.version();
+ let pack_entries_iter = data::input::EntriesToBytesIter::new(
+ pack_entries_iter,
+ LockWriter {
+ writer: data_file.clone(),
+ },
+ pack_version,
+ gix_hash::Kind::Sha1, // Thin packs imply a pack being transported, and there we only ever know SHA1 at the moment.
+ );
+ (Box::new(pack_entries_iter), pack_version)
+ }
+ None => {
+ let pack = PassThrough {
+ reader: interrupt::Read {
+ inner: pack,
+ should_interrupt,
+ },
+ writer: Some(data_file.clone()),
+ };
+ // This buf-reader is required to assure we call 'read()' in order to fill the (extra) buffer. Otherwise all the counting
+ // we do with the wrapped pack reader doesn't work as it does not expect anyone to call BufRead functions directly.
+ // However, this is exactly what's happening in the ZipReader implementation that is eventually used.
+ // The performance impact of this is probably negligible, compared to all the other work that is done anyway :D.
+ let buffered_pack = io::BufReader::new(pack);
+ let pack_entries_iter = data::input::BytesToEntriesIter::new_from_header(
+ buffered_pack,
+ options.iteration_mode,
+ data::input::EntryDataMode::Crc32,
+ object_hash,
+ )?;
+ let pack_version = pack_entries_iter.version();
+ (Box::new(pack_entries_iter), pack_version)
+ }
+ };
+ let WriteOutcome {
+ outcome,
+ data_path,
+ index_path,
+ keep_path,
+ } = crate::Bundle::inner_write(
+ directory,
+ progress,
+ options,
+ data_file,
+ pack_entries_iter,
+ should_interrupt,
+ pack_version,
+ )?;
+
+ Ok(Outcome {
+ index: outcome,
+ object_hash,
+ pack_version,
+ data_path,
+ index_path,
+ keep_path,
+ })
+ }
+
+ /// Equivalent to [`write_to_directory()`][crate::Bundle::write_to_directory()] but offloads reading of the pack into its own thread, hence the `Send + 'static'` bounds.
+ ///
+ /// # Note
+ ///
+ /// As it sends portions of the input to a thread it requires the 'static lifetime for the interrupt flags. This can only
+ /// be satisfied by a static AtomicBool which is only suitable for programs that only run one of these operations at a time
+ /// or don't mind that all of them abort when the flag is set.
+ pub fn write_to_directory_eagerly<P>(
+ pack: impl io::Read + Send + 'static,
+ pack_size: Option<u64>,
+ directory: Option<impl AsRef<Path>>,
+ mut progress: P,
+ should_interrupt: &'static AtomicBool,
+ thin_pack_base_object_lookup_fn: Option<ThinPackLookupFnSend>,
+ options: Options,
+ ) -> Result<Outcome, Error>
+ where
+ P: Progress,
+ P::SubProgress: 'static,
+ {
+ let mut read_progress = progress.add_child_with_id("read pack", ProgressId::ReadPackBytes.into()); /* Bundle Write Read pack Bytes*/
+ read_progress.init(pack_size.map(|s| s as usize), progress::bytes());
+ let pack = progress::Read {
+ inner: pack,
+ progress: progress::ThroughputOnDrop::new(read_progress),
+ };
+
+ let data_file = Arc::new(parking_lot::Mutex::new(io::BufWriter::new(match directory.as_ref() {
+ Some(directory) => gix_tempfile::new(directory, ContainingDirectory::Exists, AutoRemove::Tempfile)?,
+ None => gix_tempfile::new(std::env::temp_dir(), ContainingDirectory::Exists, AutoRemove::Tempfile)?,
+ })));
+ let object_hash = options.object_hash;
+ let eight_pages = 4096 * 8;
+ let (pack_entries_iter, pack_version): (
+ Box<dyn Iterator<Item = Result<data::input::Entry, data::input::Error>> + Send + 'static>,
+ _,
+ ) = match thin_pack_base_object_lookup_fn {
+ Some(thin_pack_lookup_fn) => {
+ let pack = interrupt::Read {
+ inner: pack,
+ should_interrupt,
+ };
+ let buffered_pack = io::BufReader::with_capacity(eight_pages, pack);
+ let pack_entries_iter = data::input::LookupRefDeltaObjectsIter::new(
+ data::input::BytesToEntriesIter::new_from_header(
+ buffered_pack,
+ options.iteration_mode,
+ data::input::EntryDataMode::KeepAndCrc32,
+ object_hash,
+ )?,
+ thin_pack_lookup_fn,
+ );
+ let pack_kind = pack_entries_iter.inner.version();
+ (Box::new(pack_entries_iter), pack_kind)
+ }
+ None => {
+ let pack = PassThrough {
+ reader: interrupt::Read {
+ inner: pack,
+ should_interrupt,
+ },
+ writer: Some(data_file.clone()),
+ };
+ let buffered_pack = io::BufReader::with_capacity(eight_pages, pack);
+ let pack_entries_iter = data::input::BytesToEntriesIter::new_from_header(
+ buffered_pack,
+ options.iteration_mode,
+ data::input::EntryDataMode::Crc32,
+ object_hash,
+ )?;
+ let pack_kind = pack_entries_iter.version();
+ (Box::new(pack_entries_iter), pack_kind)
+ }
+ };
+ let num_objects = pack_entries_iter.size_hint().0;
+ let pack_entries_iter =
+ gix_features::parallel::EagerIterIf::new(move || num_objects > 25_000, pack_entries_iter, 5_000, 5);
+
+ let WriteOutcome {
+ outcome,
+ data_path,
+ index_path,
+ keep_path,
+ } = crate::Bundle::inner_write(
+ directory,
+ progress,
+ options,
+ data_file,
+ pack_entries_iter,
+ should_interrupt,
+ pack_version,
+ )?;
+
+ Ok(Outcome {
+ index: outcome,
+ object_hash,
+ pack_version,
+ data_path,
+ index_path,
+ keep_path,
+ })
+ }
+
+ fn inner_write(
+ directory: Option<impl AsRef<Path>>,
+ mut progress: impl Progress,
+ Options {
+ thread_limit,
+ iteration_mode: _,
+ index_version: index_kind,
+ object_hash,
+ }: Options,
+ data_file: SharedTempFile,
+ pack_entries_iter: impl Iterator<Item = Result<data::input::Entry, data::input::Error>>,
+ should_interrupt: &AtomicBool,
+ pack_version: data::Version,
+ ) -> Result<WriteOutcome, Error> {
+ let indexing_progress = progress.add_child_with_id(
+ "create index file",
+ ProgressId::IndexingSteps(Default::default()).into(),
+ );
+ Ok(match directory {
+ Some(directory) => {
+ let directory = directory.as_ref();
+ let mut index_file = gix_tempfile::new(directory, ContainingDirectory::Exists, AutoRemove::Tempfile)?;
+
+ let outcome = crate::index::File::write_data_iter_to_stream(
+ index_kind,
+ {
+ let data_file = Arc::clone(&data_file);
+ move || new_pack_file_resolver(data_file)
+ },
+ pack_entries_iter,
+ thread_limit,
+ indexing_progress,
+ &mut index_file,
+ should_interrupt,
+ object_hash,
+ pack_version,
+ )?;
+
+ let data_path = directory.join(format!("pack-{}.pack", outcome.data_hash.to_hex()));
+ let index_path = data_path.with_extension("idx");
+ let keep_path = data_path.with_extension("keep");
+
+ std::fs::write(&keep_path, b"")?;
+ Arc::try_unwrap(data_file)
+ .expect("only one handle left after pack was consumed")
+ .into_inner()
+ .into_inner()
+ .map_err(|err| Error::from(err.into_error()))?
+ .persist(&data_path)?;
+ index_file
+ .persist(&index_path)
+ .map_err(|err| {
+ progress.info(format!(
+ "pack file at {} is retained despite failing to move the index file into place. You can use plumbing to make it usable.",
+ data_path.display()
+ ));
+ err
+ })?;
+ WriteOutcome {
+ outcome,
+ data_path: Some(data_path),
+ index_path: Some(index_path),
+ keep_path: Some(keep_path),
+ }
+ }
+ None => WriteOutcome {
+ outcome: crate::index::File::write_data_iter_to_stream(
+ index_kind,
+ move || new_pack_file_resolver(data_file),
+ pack_entries_iter,
+ thread_limit,
+ indexing_progress,
+ io::sink(),
+ should_interrupt,
+ object_hash,
+ pack_version,
+ )?,
+ data_path: None,
+ index_path: None,
+ keep_path: None,
+ },
+ })
+ }
+}
+
+fn new_pack_file_resolver(
+ data_file: SharedTempFile,
+) -> io::Result<impl Fn(data::EntryRange, &mut Vec<u8>) -> Option<()> + Send + Clone> {
+ let mut guard = data_file.lock();
+ guard.flush()?;
+ let mapped_file = Arc::new(crate::mmap::read_only(
+ &guard.get_mut().with_mut(|f| f.path().to_owned())?,
+ )?);
+ let pack_data_lookup = move |range: std::ops::Range<u64>, out: &mut Vec<u8>| -> Option<()> {
+ mapped_file
+ .get(range.start as usize..range.end as usize)
+ .map(|pack_entry| out.copy_from_slice(pack_entry))
+ };
+ Ok(pack_data_lookup)
+}
+
+struct WriteOutcome {
+ outcome: crate::index::write::Outcome,
+ data_path: Option<PathBuf>,
+ index_path: Option<PathBuf>,
+ keep_path: Option<PathBuf>,
+}
diff --git a/vendor/gix-pack/src/bundle/write/types.rs b/vendor/gix-pack/src/bundle/write/types.rs
new file mode 100644
index 000000000..56c14ac59
--- /dev/null
+++ b/vendor/gix-pack/src/bundle/write/types.rs
@@ -0,0 +1,120 @@
+use std::{hash::Hash, io, io::SeekFrom, path::PathBuf, sync::Arc};
+
+use gix_tempfile::handle::Writable;
+
+/// Configuration for [write_to_directory][crate::Bundle::write_to_directory()] or
+/// [write_to_directory_eagerly][crate::Bundle::write_to_directory_eagerly()]
+#[derive(Debug, Clone)]
+pub struct Options {
+ /// The amount of threads to use at most when resolving the pack. If `None`, all logical cores are used.
+ pub thread_limit: Option<usize>,
+ /// Determine how much processing to spend on protecting against corruption or recovering from errors.
+ pub iteration_mode: crate::data::input::Mode,
+ /// The version of pack index to write, should be [`crate::index::Version::default()`]
+ pub index_version: crate::index::Version,
+ /// The kind of hash to use when writing the bundle.
+ pub object_hash: gix_hash::Kind,
+}
+
+impl Default for Options {
+ /// Options which favor speed and correctness and write the most commonly supported index file.
+ fn default() -> Self {
+ Options {
+ thread_limit: None,
+ iteration_mode: crate::data::input::Mode::Verify,
+ index_version: Default::default(),
+ object_hash: Default::default(),
+ }
+ }
+}
+
+/// Returned by [write_to_directory][crate::Bundle::write_to_directory()] or
+/// [write_to_directory_eagerly][crate::Bundle::write_to_directory_eagerly()]
+#[derive(PartialEq, Eq, Debug, Hash, Ord, PartialOrd, Clone)]
+#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
+pub struct Outcome {
+ /// The successful result of the index write operation
+ pub index: crate::index::write::Outcome,
+ /// The version of the pack
+ pub pack_version: crate::data::Version,
+ /// The kind of hash stored within the pack and indices
+ pub object_hash: gix_hash::Kind,
+
+ /// The path to the pack index file
+ pub index_path: Option<PathBuf>,
+ /// The path to the pack data file
+ pub data_path: Option<PathBuf>,
+ /// The path to the `.keep` file to prevent collection of the newly written pack until refs are pointing to it.
+ ///
+ /// The file is created right before moving the pack data and index data into place (i.e. `data_path` and `index_path`)
+ /// and is expected to be removed by the caller when ready.
+ pub keep_path: Option<PathBuf>,
+}
+
+impl Outcome {
+ /// Instantiate a bundle from the newly written index and data file that are represented by this `Outcome`
+ pub fn to_bundle(&self) -> Option<Result<crate::Bundle, crate::bundle::init::Error>> {
+ self.index_path
+ .as_ref()
+ .map(|path| crate::Bundle::at(path, self.object_hash))
+ }
+}
+
+pub(crate) type SharedTempFile = Arc<parking_lot::Mutex<std::io::BufWriter<gix_tempfile::Handle<Writable>>>>;
+
+pub(crate) struct PassThrough<R> {
+ pub reader: R,
+ pub writer: Option<SharedTempFile>,
+}
+
+impl<R> io::Read for PassThrough<R>
+where
+ R: io::Read,
+{
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let bytes_read = self.reader.read(buf)?;
+ if let Some(writer) = self.writer.as_mut() {
+ use std::io::Write;
+ writer.lock().write_all(&buf[..bytes_read])?;
+ }
+ Ok(bytes_read)
+ }
+}
+impl<R> io::BufRead for PassThrough<R>
+where
+ R: io::BufRead,
+{
+ fn fill_buf(&mut self) -> io::Result<&[u8]> {
+ self.reader.fill_buf()
+ }
+
+ fn consume(&mut self, amt: usize) {
+ self.reader.consume(amt)
+ }
+}
+
+pub(crate) struct LockWriter {
+ pub writer: SharedTempFile,
+}
+
+impl io::Write for LockWriter {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.writer.lock().write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.writer.lock().flush()
+ }
+}
+
+impl io::Read for LockWriter {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.writer.lock().get_mut().read(buf)
+ }
+}
+
+impl io::Seek for LockWriter {
+ fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
+ self.writer.lock().seek(pos)
+ }
+}