summaryrefslogtreecommitdiffstats
path: root/vendor/gix/src/clone
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:41:41 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:41:41 +0000
commit10ee2acdd26a7f1298c6f6d6b7af9b469fe29b87 (patch)
treebdffd5d80c26cf4a7a518281a204be1ace85b4c1 /vendor/gix/src/clone
parentReleasing progress-linux version 1.70.0+dfsg1-9~progress7.99u1. (diff)
downloadrustc-10ee2acdd26a7f1298c6f6d6b7af9b469fe29b87.tar.xz
rustc-10ee2acdd26a7f1298c6f6d6b7af9b469fe29b87.zip
Merging upstream version 1.70.0+dfsg2.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/gix/src/clone')
-rw-r--r--vendor/gix/src/clone/checkout.rs161
-rw-r--r--vendor/gix/src/clone/fetch/mod.rs212
-rw-r--r--vendor/gix/src/clone/fetch/util.rs229
-rw-r--r--vendor/gix/src/clone/mod.rs118
4 files changed, 720 insertions, 0 deletions
diff --git a/vendor/gix/src/clone/checkout.rs b/vendor/gix/src/clone/checkout.rs
new file mode 100644
index 000000000..50d235f13
--- /dev/null
+++ b/vendor/gix/src/clone/checkout.rs
@@ -0,0 +1,161 @@
+use crate::{clone::PrepareCheckout, Repository};
+
+///
+pub mod main_worktree {
+ use std::{path::PathBuf, sync::atomic::AtomicBool};
+
+ use gix_odb::FindExt;
+
+ use crate::{clone::PrepareCheckout, Progress, Repository};
+
+ /// The error returned by [`PrepareCheckout::main_worktree()`].
+ #[derive(Debug, thiserror::Error)]
+ #[allow(missing_docs)]
+ pub enum Error {
+ #[error("Repository at \"{}\" is a bare repository and cannot have a main worktree checkout", git_dir.display())]
+ BareRepository { git_dir: PathBuf },
+ #[error("The object pointed to by HEAD is not a treeish")]
+ NoHeadTree(#[from] crate::object::peel::to_kind::Error),
+ #[error("Could not create index from tree at {id}")]
+ IndexFromTree {
+ id: gix_hash::ObjectId,
+ source: gix_traverse::tree::breadthfirst::Error,
+ },
+ #[error(transparent)]
+ WriteIndex(#[from] gix_index::file::write::Error),
+ #[error(transparent)]
+ CheckoutOptions(#[from] crate::config::checkout_options::Error),
+ #[error(transparent)]
+ IndexCheckout(
+ #[from]
+ gix_worktree::index::checkout::Error<gix_odb::find::existing_object::Error<gix_odb::store::find::Error>>,
+ ),
+ #[error("Failed to reopen object database as Arc (only if thread-safety wasn't compiled in)")]
+ OpenArcOdb(#[from] std::io::Error),
+ #[error("The HEAD reference could not be located")]
+ FindHead(#[from] crate::reference::find::existing::Error),
+ #[error("The HEAD reference could not be located")]
+ PeelHeadToId(#[from] crate::head::peel::Error),
+ }
+
+ /// The progress ids used in [`PrepareCheckout::main_worktree()`].
+ ///
+ /// Use this information to selectively extract the progress of interest in case the parent application has custom visualization.
+ #[derive(Debug, Copy, Clone)]
+ pub enum ProgressId {
+ /// The amount of files checked out thus far.
+ CheckoutFiles,
+ /// The amount of bytes written in total, the aggregate of the size of the content of all files thus far.
+ BytesWritten,
+ }
+
+ impl From<ProgressId> for gix_features::progress::Id {
+ fn from(v: ProgressId) -> Self {
+ match v {
+ ProgressId::CheckoutFiles => *b"CLCF",
+ ProgressId::BytesWritten => *b"CLCB",
+ }
+ }
+ }
+
+ /// Modification
+ impl PrepareCheckout {
+ /// Checkout the main worktree, determining how many threads to use by looking at `checkout.workers`, defaulting to using
+ /// on thread per logical core.
+ ///
+ /// Note that this is a no-op if the remote was empty, leaving this repository empty as well. This can be validated by checking
+ /// if the `head()` of the returned repository is not unborn.
+ pub fn main_worktree(
+ &mut self,
+ mut progress: impl crate::Progress,
+ should_interrupt: &AtomicBool,
+ ) -> Result<(Repository, gix_worktree::index::checkout::Outcome), Error> {
+ let repo = self
+ .repo
+ .as_ref()
+ .expect("still present as we never succeeded the worktree checkout yet");
+ let workdir = repo.work_dir().ok_or_else(|| Error::BareRepository {
+ git_dir: repo.git_dir().to_owned(),
+ })?;
+ let root_tree = match repo.head()?.peel_to_id_in_place().transpose()? {
+ Some(id) => id.object().expect("downloaded from remote").peel_to_tree()?.id,
+ None => {
+ return Ok((
+ self.repo.take().expect("still present"),
+ gix_worktree::index::checkout::Outcome::default(),
+ ))
+ }
+ };
+ let index = gix_index::State::from_tree(&root_tree, |oid, buf| repo.objects.find_tree_iter(oid, buf).ok())
+ .map_err(|err| Error::IndexFromTree {
+ id: root_tree,
+ source: err,
+ })?;
+ let mut index = gix_index::File::from_state(index, repo.index_path());
+
+ let mut opts = repo.config.checkout_options(repo.git_dir())?;
+ opts.destination_is_initially_empty = true;
+
+ let mut files = progress.add_child_with_id("checkout", ProgressId::CheckoutFiles.into());
+ let mut bytes = progress.add_child_with_id("writing", ProgressId::BytesWritten.into());
+
+ files.init(Some(index.entries().len()), crate::progress::count("files"));
+ bytes.init(None, crate::progress::bytes());
+
+ let start = std::time::Instant::now();
+ let outcome = gix_worktree::index::checkout(
+ &mut index,
+ workdir,
+ {
+ let objects = repo.objects.clone().into_arc()?;
+ move |oid, buf| objects.find_blob(oid, buf)
+ },
+ &mut files,
+ &mut bytes,
+ should_interrupt,
+ opts,
+ )?;
+ files.show_throughput(start);
+ bytes.show_throughput(start);
+
+ index.write(Default::default())?;
+ Ok((self.repo.take().expect("still present"), outcome))
+ }
+ }
+}
+
+/// Access
+impl PrepareCheckout {
+ /// Get access to the repository while the checkout isn't yet completed.
+ ///
+ /// # Panics
+ ///
+ /// If the checkout is completed and the [`Repository`] was already passed on to the caller.
+ pub fn repo(&self) -> &Repository {
+ self.repo
+ .as_ref()
+ .expect("present as checkout operation isn't complete")
+ }
+}
+
+/// Consumption
+impl PrepareCheckout {
+ /// Persist the contained repository as is even if an error may have occurred when checking out the main working tree.
+ pub fn persist(mut self) -> Repository {
+ self.repo.take().expect("present and consumed once")
+ }
+}
+
+impl Drop for PrepareCheckout {
+ fn drop(&mut self) {
+ if let Some(repo) = self.repo.take() {
+ std::fs::remove_dir_all(repo.work_dir().unwrap_or_else(|| repo.path())).ok();
+ }
+ }
+}
+
+impl From<PrepareCheckout> for Repository {
+ fn from(prep: PrepareCheckout) -> Self {
+ prep.persist()
+ }
+}
diff --git a/vendor/gix/src/clone/fetch/mod.rs b/vendor/gix/src/clone/fetch/mod.rs
new file mode 100644
index 000000000..d663b47ea
--- /dev/null
+++ b/vendor/gix/src/clone/fetch/mod.rs
@@ -0,0 +1,212 @@
+use crate::{bstr::BString, clone::PrepareFetch, Repository};
+
+/// The error returned by [`PrepareFetch::fetch_only()`].
+#[derive(Debug, thiserror::Error)]
+#[allow(missing_docs)]
+#[cfg(feature = "blocking-network-client")]
+pub enum Error {
+ #[error(transparent)]
+ Connect(#[from] crate::remote::connect::Error),
+ #[error(transparent)]
+ PrepareFetch(#[from] crate::remote::fetch::prepare::Error),
+ #[error(transparent)]
+ Fetch(#[from] crate::remote::fetch::Error),
+ #[error(transparent)]
+ RemoteInit(#[from] crate::remote::init::Error),
+ #[error("Custom configuration of remote to clone from failed")]
+ RemoteConfiguration(#[source] Box<dyn std::error::Error + Send + Sync>),
+ #[error(transparent)]
+ RemoteName(#[from] crate::config::remote::symbolic_name::Error),
+ #[error("Failed to load repo-local git configuration before writing")]
+ LoadConfig(#[from] gix_config::file::init::from_paths::Error),
+ #[error("Failed to store configured remote in memory")]
+ SaveConfig(#[from] crate::remote::save::AsError),
+ #[error("Failed to write repository configuration to disk")]
+ SaveConfigIo(#[from] std::io::Error),
+ #[error("The remote HEAD points to a reference named {head_ref_name:?} which is invalid.")]
+ InvalidHeadRef {
+ source: gix_validate::refname::Error,
+ head_ref_name: BString,
+ },
+ #[error("Failed to update HEAD with values from remote")]
+ HeadUpdate(#[from] crate::reference::edit::Error),
+}
+
+/// Modification
+impl PrepareFetch {
+ /// Fetch a pack and update local branches according to refspecs, providing `progress` and checking `should_interrupt` to stop
+ /// the operation.
+ /// On success, the persisted repository is returned, and this method must not be called again to avoid a **panic**.
+ /// On error, the method may be called again to retry as often as needed.
+ ///
+ /// If the remote repository was empty, that is newly initialized, the returned repository will also be empty and like
+ /// it was newly initialized.
+ ///
+ /// Note that all data we created will be removed once this instance drops if the operation wasn't successful.
+ #[cfg(feature = "blocking-network-client")]
+ pub fn fetch_only<P>(
+ &mut self,
+ progress: P,
+ should_interrupt: &std::sync::atomic::AtomicBool,
+ ) -> Result<(Repository, crate::remote::fetch::Outcome), Error>
+ where
+ P: crate::Progress,
+ P::SubProgress: 'static,
+ {
+ use crate::{bstr::ByteVec, remote, remote::fetch::RefLogMessage};
+
+ let repo = self
+ .repo
+ .as_mut()
+ .expect("user error: multiple calls are allowed only until it succeeds");
+
+ let remote_name = match self.remote_name.as_ref() {
+ Some(name) => name.to_owned(),
+ None => repo
+ .config
+ .resolved
+ .string("clone", None, crate::config::tree::Clone::DEFAULT_REMOTE_NAME.name)
+ .map(|n| crate::config::tree::Clone::DEFAULT_REMOTE_NAME.try_into_symbolic_name(n))
+ .transpose()?
+ .unwrap_or_else(|| "origin".into()),
+ };
+
+ let mut remote = repo
+ .remote_at(self.url.clone())?
+ .with_refspecs(
+ Some(format!("+refs/heads/*:refs/remotes/{remote_name}/*").as_str()),
+ remote::Direction::Fetch,
+ )
+ .expect("valid static spec");
+ let mut clone_fetch_tags = None;
+ if let Some(f) = self.configure_remote.as_mut() {
+ remote = f(remote).map_err(|err| Error::RemoteConfiguration(err))?;
+ } else {
+ clone_fetch_tags = remote::fetch::Tags::All.into();
+ }
+
+ let config = util::write_remote_to_local_config_file(&mut remote, remote_name.clone())?;
+
+ // Now we are free to apply remote configuration we don't want to be written to disk.
+ if let Some(fetch_tags) = clone_fetch_tags {
+ remote = remote.with_fetch_tags(fetch_tags);
+ }
+
+ // Add HEAD after the remote was written to config, we need it to know what to checkout later, and assure
+ // the ref that HEAD points to is present no matter what.
+ let head_refspec = gix_refspec::parse(
+ format!("HEAD:refs/remotes/{remote_name}/HEAD").as_str().into(),
+ gix_refspec::parse::Operation::Fetch,
+ )
+ .expect("valid")
+ .to_owned();
+ let pending_pack: remote::fetch::Prepare<'_, '_, _, _> =
+ remote.connect(remote::Direction::Fetch, progress)?.prepare_fetch({
+ let mut opts = self.fetch_options.clone();
+ if !opts.extra_refspecs.contains(&head_refspec) {
+ opts.extra_refspecs.push(head_refspec)
+ }
+ opts
+ })?;
+ if pending_pack.ref_map().object_hash != repo.object_hash() {
+ unimplemented!("configure repository to expect a different object hash as advertised by the server")
+ }
+ let reflog_message = {
+ let mut b = self.url.to_bstring();
+ b.insert_str(0, "clone: from ");
+ b
+ };
+ let outcome = pending_pack
+ .with_write_packed_refs_only(true)
+ .with_reflog_message(RefLogMessage::Override {
+ message: reflog_message.clone(),
+ })
+ .receive(should_interrupt)?;
+
+ util::append_config_to_repo_config(repo, config);
+ util::update_head(
+ repo,
+ &outcome.ref_map.remote_refs,
+ reflog_message.as_ref(),
+ remote_name.as_ref(),
+ )?;
+
+ Ok((self.repo.take().expect("still present"), outcome))
+ }
+
+ /// Similar to [`fetch_only()`][Self::fetch_only()`], but passes ownership to a utility type to configure a checkout operation.
+ #[cfg(feature = "blocking-network-client")]
+ pub fn fetch_then_checkout<P>(
+ &mut self,
+ progress: P,
+ should_interrupt: &std::sync::atomic::AtomicBool,
+ ) -> Result<(crate::clone::PrepareCheckout, crate::remote::fetch::Outcome), Error>
+ where
+ P: crate::Progress,
+ P::SubProgress: 'static,
+ {
+ let (repo, fetch_outcome) = self.fetch_only(progress, should_interrupt)?;
+ Ok((crate::clone::PrepareCheckout { repo: repo.into() }, fetch_outcome))
+ }
+}
+
+/// Builder
+impl PrepareFetch {
+ /// Set additional options to adjust parts of the fetch operation that are not affected by the git configuration.
+ #[cfg(any(feature = "async-network-client", feature = "blocking-network-client"))]
+ pub fn with_fetch_options(mut self, opts: crate::remote::ref_map::Options) -> Self {
+ self.fetch_options = opts;
+ self
+ }
+ /// Use `f` to apply arbitrary changes to the remote that is about to be used to fetch a pack.
+ ///
+ /// The passed in `remote` will be un-named and pre-configured to be a default remote as we know it from git-clone.
+ /// It is not yet present in the configuration of the repository,
+ /// but each change it will eventually be written to the configuration prior to performing a the fetch operation,
+ /// _all changes done in `f()` will be persisted_.
+ ///
+ /// It can also be used to configure additional options, like those for fetching tags. Note that
+ /// [with_fetch_tags()][crate::Remote::with_fetch_tags()] should be called here to configure the clone as desired.
+ /// Otherwise a clone is configured to be complete and fetches all tags, not only those reachable from all branches.
+ pub fn configure_remote(
+ mut self,
+ f: impl FnMut(crate::Remote<'_>) -> Result<crate::Remote<'_>, Box<dyn std::error::Error + Send + Sync>> + 'static,
+ ) -> Self {
+ self.configure_remote = Some(Box::new(f));
+ self
+ }
+
+ /// Set the remote's name to the given value after it was configured using the function provided via
+ /// [`configure_remote()`][Self::configure_remote()].
+ ///
+ /// If not set here, it defaults to `origin` or the value of `clone.defaultRemoteName`.
+ pub fn with_remote_name(mut self, name: impl Into<BString>) -> Result<Self, crate::remote::name::Error> {
+ self.remote_name = Some(crate::remote::name::validated(name)?);
+ Ok(self)
+ }
+}
+
+/// Consumption
+impl PrepareFetch {
+ /// Persist the contained repository as is even if an error may have occurred when fetching from the remote.
+ pub fn persist(mut self) -> Repository {
+ self.repo.take().expect("present and consumed once")
+ }
+}
+
+impl Drop for PrepareFetch {
+ fn drop(&mut self) {
+ if let Some(repo) = self.repo.take() {
+ std::fs::remove_dir_all(repo.work_dir().unwrap_or_else(|| repo.path())).ok();
+ }
+ }
+}
+
+impl From<PrepareFetch> for Repository {
+ fn from(prep: PrepareFetch) -> Self {
+ prep.persist()
+ }
+}
+
+#[cfg(feature = "blocking-network-client")]
+mod util;
diff --git a/vendor/gix/src/clone/fetch/util.rs b/vendor/gix/src/clone/fetch/util.rs
new file mode 100644
index 000000000..ac8943f6e
--- /dev/null
+++ b/vendor/gix/src/clone/fetch/util.rs
@@ -0,0 +1,229 @@
+use std::{borrow::Cow, convert::TryInto, io::Write};
+
+use gix_odb::Find;
+use gix_ref::{
+ transaction::{LogChange, RefLog},
+ FullNameRef,
+};
+
+use super::Error;
+use crate::{
+ bstr::{BStr, BString, ByteSlice},
+ Repository,
+};
+
+enum WriteMode {
+ Overwrite,
+ Append,
+}
+
+#[allow(clippy::result_large_err)]
+pub fn write_remote_to_local_config_file(
+ remote: &mut crate::Remote<'_>,
+ remote_name: BString,
+) -> Result<gix_config::File<'static>, Error> {
+ let mut config = gix_config::File::new(local_config_meta(remote.repo));
+ remote.save_as_to(remote_name, &mut config)?;
+
+ write_to_local_config(&config, WriteMode::Append)?;
+ Ok(config)
+}
+
+fn local_config_meta(repo: &Repository) -> gix_config::file::Metadata {
+ let meta = repo.config.resolved.meta().clone();
+ assert_eq!(
+ meta.source,
+ gix_config::Source::Local,
+ "local path is the default for new sections"
+ );
+ meta
+}
+
+fn write_to_local_config(config: &gix_config::File<'static>, mode: WriteMode) -> std::io::Result<()> {
+ assert_eq!(
+ config.meta().source,
+ gix_config::Source::Local,
+ "made for appending to local configuration file"
+ );
+ let mut local_config = std::fs::OpenOptions::new()
+ .create(false)
+ .write(matches!(mode, WriteMode::Overwrite))
+ .append(matches!(mode, WriteMode::Append))
+ .open(config.meta().path.as_deref().expect("local config with path set"))?;
+ local_config.write_all(config.detect_newline_style())?;
+ config.write_to_filter(&mut local_config, |s| s.meta().source == gix_config::Source::Local)
+}
+
+pub fn append_config_to_repo_config(repo: &mut Repository, config: gix_config::File<'static>) {
+ let repo_config = gix_features::threading::OwnShared::make_mut(&mut repo.config.resolved);
+ repo_config.append(config);
+}
+
+/// HEAD cannot be written by means of refspec by design, so we have to do it manually here. Also create the pointed-to ref
+/// if we have to, as it might not have been naturally included in the ref-specs.
+pub fn update_head(
+ repo: &mut Repository,
+ remote_refs: &[gix_protocol::handshake::Ref],
+ reflog_message: &BStr,
+ remote_name: &BStr,
+) -> Result<(), Error> {
+ use gix_ref::{
+ transaction::{PreviousValue, RefEdit},
+ Target,
+ };
+ let (head_peeled_id, head_ref) = match remote_refs.iter().find_map(|r| {
+ Some(match r {
+ gix_protocol::handshake::Ref::Symbolic {
+ full_ref_name,
+ target,
+ object,
+ } if full_ref_name == "HEAD" => (Some(object.as_ref()), Some(target)),
+ gix_protocol::handshake::Ref::Direct { full_ref_name, object } if full_ref_name == "HEAD" => {
+ (Some(object.as_ref()), None)
+ }
+ gix_protocol::handshake::Ref::Unborn { full_ref_name, target } if full_ref_name == "HEAD" => {
+ (None, Some(target))
+ }
+ _ => return None,
+ })
+ }) {
+ Some(t) => t,
+ None => return Ok(()),
+ };
+
+ let head: gix_ref::FullName = "HEAD".try_into().expect("valid");
+ let reflog_message = || LogChange {
+ mode: RefLog::AndReference,
+ force_create_reflog: false,
+ message: reflog_message.to_owned(),
+ };
+ match head_ref {
+ Some(referent) => {
+ let referent: gix_ref::FullName = referent.try_into().map_err(|err| Error::InvalidHeadRef {
+ head_ref_name: referent.to_owned(),
+ source: err,
+ })?;
+ repo.refs
+ .transaction()
+ .packed_refs(gix_ref::file::transaction::PackedRefs::DeletionsAndNonSymbolicUpdates(
+ Box::new(|oid, buf| {
+ repo.objects
+ .try_find(oid, buf)
+ .map(|obj| obj.map(|obj| obj.kind))
+ .map_err(|err| Box::new(err) as Box<dyn std::error::Error + Send + Sync + 'static>)
+ }),
+ ))
+ .prepare(
+ {
+ let mut edits = vec![RefEdit {
+ change: gix_ref::transaction::Change::Update {
+ log: reflog_message(),
+ expected: PreviousValue::Any,
+ new: Target::Symbolic(referent.clone()),
+ },
+ name: head.clone(),
+ deref: false,
+ }];
+ if let Some(head_peeled_id) = head_peeled_id {
+ edits.push(RefEdit {
+ change: gix_ref::transaction::Change::Update {
+ log: reflog_message(),
+ expected: PreviousValue::Any,
+ new: Target::Peeled(head_peeled_id.to_owned()),
+ },
+ name: referent.clone(),
+ deref: false,
+ });
+ };
+ edits
+ },
+ gix_lock::acquire::Fail::Immediately,
+ gix_lock::acquire::Fail::Immediately,
+ )
+ .map_err(crate::reference::edit::Error::from)?
+ .commit(
+ repo.committer()
+ .transpose()
+ .map_err(|err| Error::HeadUpdate(crate::reference::edit::Error::ParseCommitterTime(err)))?,
+ )
+ .map_err(crate::reference::edit::Error::from)?;
+
+ if let Some(head_peeled_id) = head_peeled_id {
+ let mut log = reflog_message();
+ log.mode = RefLog::Only;
+ repo.edit_reference(RefEdit {
+ change: gix_ref::transaction::Change::Update {
+ log,
+ expected: PreviousValue::Any,
+ new: Target::Peeled(head_peeled_id.to_owned()),
+ },
+ name: head,
+ deref: false,
+ })?;
+ }
+
+ setup_branch_config(repo, referent.as_ref(), head_peeled_id, remote_name)?;
+ }
+ None => {
+ repo.edit_reference(RefEdit {
+ change: gix_ref::transaction::Change::Update {
+ log: reflog_message(),
+ expected: PreviousValue::Any,
+ new: Target::Peeled(
+ head_peeled_id
+ .expect("detached heads always point to something")
+ .to_owned(),
+ ),
+ },
+ name: head,
+ deref: false,
+ })?;
+ }
+ };
+ Ok(())
+}
+
+/// Setup the remote configuration for `branch` so that it points to itself, but on the remote, if and only if currently
+/// saved refspecs are able to match it.
+/// For that we reload the remote of `remote_name` and use its ref_specs for match.
+fn setup_branch_config(
+ repo: &mut Repository,
+ branch: &FullNameRef,
+ branch_id: Option<&gix_hash::oid>,
+ remote_name: &BStr,
+) -> Result<(), Error> {
+ let short_name = match branch.category_and_short_name() {
+ Some((cat, shortened)) if cat == gix_ref::Category::LocalBranch => match shortened.to_str() {
+ Ok(s) => s,
+ Err(_) => return Ok(()),
+ },
+ _ => return Ok(()),
+ };
+ let remote = repo
+ .find_remote(remote_name)
+ .expect("remote was just created and must be visible in config");
+ let group = gix_refspec::MatchGroup::from_fetch_specs(remote.fetch_specs.iter().map(|s| s.to_ref()));
+ let null = gix_hash::ObjectId::null(repo.object_hash());
+ let res = group.match_remotes(
+ Some(gix_refspec::match_group::Item {
+ full_ref_name: branch.as_bstr(),
+ target: branch_id.unwrap_or(&null),
+ object: None,
+ })
+ .into_iter(),
+ );
+ if !res.mappings.is_empty() {
+ let mut config = repo.config_snapshot_mut();
+ let mut section = config
+ .new_section("branch", Some(Cow::Owned(short_name.into())))
+ .expect("section header name is always valid per naming rules, our input branch name is valid");
+ section.push("remote".try_into().expect("valid at compile time"), Some(remote_name));
+ section.push(
+ "merge".try_into().expect("valid at compile time"),
+ Some(branch.as_bstr()),
+ );
+ write_to_local_config(&config, WriteMode::Overwrite)?;
+ config.commit().expect("configuration we set is valid");
+ }
+ Ok(())
+}
diff --git a/vendor/gix/src/clone/mod.rs b/vendor/gix/src/clone/mod.rs
new file mode 100644
index 000000000..249a66a42
--- /dev/null
+++ b/vendor/gix/src/clone/mod.rs
@@ -0,0 +1,118 @@
+#![allow(clippy::result_large_err)]
+use std::convert::TryInto;
+
+use crate::{bstr::BString, config::tree::gitoxide};
+
+type ConfigureRemoteFn =
+ Box<dyn FnMut(crate::Remote<'_>) -> Result<crate::Remote<'_>, Box<dyn std::error::Error + Send + Sync>>>;
+
+/// A utility to collect configuration on how to fetch from a remote and initiate a fetch operation. It will delete the newly
+/// created repository on when dropped without successfully finishing a fetch.
+#[must_use]
+pub struct PrepareFetch {
+ /// A freshly initialized repository which is owned by us, or `None` if it was handed to the user
+ repo: Option<crate::Repository>,
+ /// The name of the remote, which defaults to `origin` if not overridden.
+ remote_name: Option<BString>,
+ /// A function to configure a remote prior to fetching a pack.
+ configure_remote: Option<ConfigureRemoteFn>,
+ /// Options for preparing a fetch operation.
+ #[cfg(any(feature = "async-network-client", feature = "blocking-network-client"))]
+ fetch_options: crate::remote::ref_map::Options,
+ /// The url to clone from
+ #[cfg_attr(not(feature = "blocking-network-client"), allow(dead_code))]
+ url: gix_url::Url,
+}
+
+/// The error returned by [`PrepareFetch::new()`].
+#[derive(Debug, thiserror::Error)]
+#[allow(missing_docs)]
+pub enum Error {
+ #[error(transparent)]
+ Init(#[from] crate::init::Error),
+ #[error(transparent)]
+ UrlParse(#[from] gix_url::parse::Error),
+ #[error("Failed to turn a the relative file url \"{}\" into an absolute one", url.to_bstring())]
+ CanonicalizeUrl {
+ url: gix_url::Url,
+ source: gix_path::realpath::Error,
+ },
+}
+
+/// Instantiation
+impl PrepareFetch {
+ /// Create a new repository at `path` with `crate_opts` which is ready to clone from `url`, possibly after making additional adjustments to
+ /// configuration and settings.
+ ///
+ /// Note that this is merely a handle to perform the actual connection to the remote, and if any of it fails the freshly initialized repository
+ /// will be removed automatically as soon as this instance drops.
+ ///
+ /// # Deviation
+ ///
+ /// Similar to `git`, a missing user name and email configuration is not terminal and we will fill it in with dummy values. However,
+ /// instead of deriving values from the system, ours are hardcoded to indicate what happened.
+ #[allow(clippy::result_large_err)]
+ pub fn new<Url, E>(
+ url: Url,
+ path: impl AsRef<std::path::Path>,
+ kind: crate::create::Kind,
+ mut create_opts: crate::create::Options,
+ open_opts: crate::open::Options,
+ ) -> Result<Self, Error>
+ where
+ Url: TryInto<gix_url::Url, Error = E>,
+ gix_url::parse::Error: From<E>,
+ {
+ let mut url = url.try_into().map_err(gix_url::parse::Error::from)?;
+ url.canonicalize().map_err(|err| Error::CanonicalizeUrl {
+ url: url.clone(),
+ source: err,
+ })?;
+ create_opts.destination_must_be_empty = true;
+ let mut repo = crate::ThreadSafeRepository::init_opts(path, kind, create_opts, open_opts)?.to_thread_local();
+ if repo.committer().is_none() {
+ let mut config = gix_config::File::new(gix_config::file::Metadata::api());
+ config
+ .set_raw_value(
+ "gitoxide",
+ Some("committer".into()),
+ gitoxide::Committer::NAME_FALLBACK.name,
+ "no name configured during clone",
+ )
+ .expect("works - statically known");
+ config
+ .set_raw_value(
+ "gitoxide",
+ Some("committer".into()),
+ gitoxide::Committer::EMAIL_FALLBACK.name,
+ "noEmailAvailable@example.com",
+ )
+ .expect("works - statically known");
+ let mut repo_config = repo.config_snapshot_mut();
+ repo_config.append(config);
+ repo_config.commit().expect("configuration is still valid");
+ }
+ Ok(PrepareFetch {
+ url,
+ #[cfg(any(feature = "async-network-client", feature = "blocking-network-client"))]
+ fetch_options: Default::default(),
+ repo: Some(repo),
+ remote_name: None,
+ configure_remote: None,
+ })
+ }
+}
+
+/// A utility to collect configuration on how to perform a checkout into a working tree, and when dropped without checking out successfully
+/// the fetched repository will be dropped.
+#[must_use]
+pub struct PrepareCheckout {
+ /// A freshly initialized repository which is owned by us, or `None` if it was handed to the user
+ pub(self) repo: Option<crate::Repository>,
+}
+
+///
+pub mod fetch;
+
+///
+pub mod checkout;