summaryrefslogtreecommitdiffstats
path: root/vendor/gix-worktree/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:41:41 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:41:41 +0000
commit10ee2acdd26a7f1298c6f6d6b7af9b469fe29b87 (patch)
treebdffd5d80c26cf4a7a518281a204be1ace85b4c1 /vendor/gix-worktree/src
parentReleasing progress-linux version 1.70.0+dfsg1-9~progress7.99u1. (diff)
downloadrustc-10ee2acdd26a7f1298c6f6d6b7af9b469fe29b87.tar.xz
rustc-10ee2acdd26a7f1298c6f6d6b7af9b469fe29b87.zip
Merging upstream version 1.70.0+dfsg2.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/gix-worktree/src')
-rw-r--r--vendor/gix-worktree/src/fs/cache/mod.rs148
-rw-r--r--vendor/gix-worktree/src/fs/cache/platform.rs171
-rw-r--r--vendor/gix-worktree/src/fs/cache/state.rs301
-rw-r--r--vendor/gix-worktree/src/fs/capabilities.rs122
-rw-r--r--vendor/gix-worktree/src/fs/mod.rs81
-rw-r--r--vendor/gix-worktree/src/fs/stack.rs123
-rw-r--r--vendor/gix-worktree/src/index/checkout.rs95
-rw-r--r--vendor/gix-worktree/src/index/entry.rs189
-rw-r--r--vendor/gix-worktree/src/index/mod.rs311
-rw-r--r--vendor/gix-worktree/src/lib.rs15
-rw-r--r--vendor/gix-worktree/src/os.rs50
11 files changed, 1606 insertions, 0 deletions
diff --git a/vendor/gix-worktree/src/fs/cache/mod.rs b/vendor/gix-worktree/src/fs/cache/mod.rs
new file mode 100644
index 000000000..1f7710e59
--- /dev/null
+++ b/vendor/gix-worktree/src/fs/cache/mod.rs
@@ -0,0 +1,148 @@
+#![allow(missing_docs)]
+use std::path::{Path, PathBuf};
+
+use bstr::{BStr, ByteSlice};
+use gix_hash::oid;
+
+use super::Cache;
+use crate::{fs, fs::PathOidMapping};
+
+#[derive(Clone)]
+pub enum State {
+ /// Useful for checkout where directories need creation, but we need to access attributes as well.
+ CreateDirectoryAndAttributesStack {
+ /// If there is a symlink or a file in our path, try to unlink it before creating the directory.
+ unlink_on_collision: bool,
+
+ /// just for testing
+ #[cfg(debug_assertions)]
+ test_mkdir_calls: usize,
+ /// State to handle attribute information
+ attributes: state::Attributes,
+ },
+ /// Used when adding files, requiring access to both attributes and ignore information, for example during add operations.
+ AttributesAndIgnoreStack {
+ /// State to handle attribute information
+ attributes: state::Attributes,
+ /// State to handle exclusion information
+ ignore: state::Ignore,
+ },
+ /// Used when providing worktree status information.
+ IgnoreStack(state::Ignore),
+}
+
+#[cfg(debug_assertions)]
+impl Cache {
+ pub fn set_case(&mut self, case: gix_glob::pattern::Case) {
+ self.case = case;
+ }
+ pub fn num_mkdir_calls(&self) -> usize {
+ match self.state {
+ State::CreateDirectoryAndAttributesStack { test_mkdir_calls, .. } => test_mkdir_calls,
+ _ => 0,
+ }
+ }
+
+ pub fn reset_mkdir_calls(&mut self) {
+ if let State::CreateDirectoryAndAttributesStack { test_mkdir_calls, .. } = &mut self.state {
+ *test_mkdir_calls = 0;
+ }
+ }
+
+ pub fn unlink_on_collision(&mut self, value: bool) {
+ if let State::CreateDirectoryAndAttributesStack {
+ unlink_on_collision, ..
+ } = &mut self.state
+ {
+ *unlink_on_collision = value;
+ }
+ }
+}
+
+#[must_use]
+pub struct Platform<'a> {
+ parent: &'a Cache,
+ is_dir: Option<bool>,
+}
+
+impl Cache {
+ /// Create a new instance with `worktree_root` being the base for all future paths we handle, assuming it to be valid which includes
+ /// symbolic links to be included in it as well.
+ /// The `case` configures attribute and exclusion query case sensitivity.
+ pub fn new(
+ worktree_root: impl Into<PathBuf>,
+ state: State,
+ case: gix_glob::pattern::Case,
+ buf: Vec<u8>,
+ attribute_files_in_index: Vec<PathOidMapping>,
+ ) -> Self {
+ let root = worktree_root.into();
+ Cache {
+ stack: fs::Stack::new(root),
+ state,
+ case,
+ buf,
+ attribute_files_in_index,
+ }
+ }
+
+ /// Append the `relative` path to the root directory the cache contains and efficiently create leading directories
+ /// unless `is_dir` is known (`Some(…)`) then `relative` points to a directory itself in which case the entire resulting
+ /// path is created as directory. If it's not known it is assumed to be a file.
+ ///
+ /// Provide access to cached information for that `relative` entry via the platform returned.
+ pub fn at_path<Find, E>(
+ &mut self,
+ relative: impl AsRef<Path>,
+ is_dir: Option<bool>,
+ find: Find,
+ ) -> std::io::Result<Platform<'_>>
+ where
+ Find: for<'a> FnMut(&oid, &'a mut Vec<u8>) -> Result<gix_object::BlobRef<'a>, E>,
+ E: std::error::Error + Send + Sync + 'static,
+ {
+ let mut delegate = platform::StackDelegate {
+ state: &mut self.state,
+ buf: &mut self.buf,
+ is_dir: is_dir.unwrap_or(false),
+ attribute_files_in_index: &self.attribute_files_in_index,
+ find,
+ };
+ self.stack.make_relative_path_current(relative, &mut delegate)?;
+ Ok(Platform { parent: self, is_dir })
+ }
+
+ /// **Panics** on illformed UTF8 in `relative`
+ // TODO: more docs
+ pub fn at_entry<'r, Find, E>(
+ &mut self,
+ relative: impl Into<&'r BStr>,
+ is_dir: Option<bool>,
+ find: Find,
+ ) -> std::io::Result<Platform<'_>>
+ where
+ Find: for<'a> FnMut(&oid, &'a mut Vec<u8>) -> Result<gix_object::BlobRef<'a>, E>,
+ E: std::error::Error + Send + Sync + 'static,
+ {
+ let relative = relative.into();
+ let relative_path = gix_path::from_bstr(relative);
+
+ self.at_path(
+ relative_path,
+ is_dir.or_else(|| relative.ends_with_str("/").then_some(true)),
+ // is_dir,
+ find,
+ )
+ }
+
+ /// Return the base path against which all entries or paths should be relative to when querying.
+ ///
+ /// Note that this path _may_ not be canonicalized.
+ pub fn base(&self) -> &Path {
+ self.stack.root()
+ }
+}
+
+mod platform;
+///
+pub mod state;
diff --git a/vendor/gix-worktree/src/fs/cache/platform.rs b/vendor/gix-worktree/src/fs/cache/platform.rs
new file mode 100644
index 000000000..90bbdbe3c
--- /dev/null
+++ b/vendor/gix-worktree/src/fs/cache/platform.rs
@@ -0,0 +1,171 @@
+use std::path::Path;
+
+use bstr::ByteSlice;
+use gix_hash::oid;
+
+use crate::{
+ fs,
+ fs::{
+ cache::{Platform, State},
+ PathOidMapping,
+ },
+};
+
+impl<'a> Platform<'a> {
+ /// The full path to `relative` will be returned for use on the file system.
+ pub fn path(&self) -> &'a Path {
+ self.parent.stack.current()
+ }
+
+ /// See if the currently set entry is excluded as per exclude and git-ignore files.
+ ///
+ /// # Panics
+ ///
+ /// If the cache was configured without exclude patterns.
+ pub fn is_excluded(&self) -> bool {
+ self.matching_exclude_pattern()
+ .map_or(false, |m| !m.pattern.is_negative())
+ }
+
+ /// Check all exclude patterns to see if the currently set path matches any of them.
+ ///
+ /// Note that this pattern might be negated, and means this path in included.
+ ///
+ /// # Panics
+ ///
+ /// If the cache was configured without exclude patterns.
+ pub fn matching_exclude_pattern(&self) -> Option<gix_attributes::Match<'_, ()>> {
+ let ignore = self.parent.state.ignore_or_panic();
+ let relative_path =
+ gix_path::to_unix_separators_on_windows(gix_path::into_bstr(self.parent.stack.current_relative.as_path()));
+ ignore.matching_exclude_pattern(relative_path.as_bstr(), self.is_dir, self.parent.case)
+ }
+}
+
+impl<'a> std::fmt::Debug for Platform<'a> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ std::fmt::Debug::fmt(&self.path(), f)
+ }
+}
+
+pub struct StackDelegate<'a, Find> {
+ pub state: &'a mut State,
+ pub buf: &'a mut Vec<u8>,
+ pub is_dir: bool,
+ pub attribute_files_in_index: &'a Vec<PathOidMapping>,
+ pub find: Find,
+}
+
+impl<'a, Find, E> fs::stack::Delegate for StackDelegate<'a, Find>
+where
+ Find: for<'b> FnMut(&oid, &'b mut Vec<u8>) -> Result<gix_object::BlobRef<'b>, E>,
+ E: std::error::Error + Send + Sync + 'static,
+{
+ fn push_directory(&mut self, stack: &fs::Stack) -> std::io::Result<()> {
+ match &mut self.state {
+ State::CreateDirectoryAndAttributesStack { attributes: _, .. } => {
+ // TODO: attributes
+ }
+ State::AttributesAndIgnoreStack { ignore, attributes: _ } => {
+ // TODO: attributes
+ ignore.push_directory(
+ &stack.root,
+ &stack.current,
+ self.buf,
+ self.attribute_files_in_index,
+ &mut self.find,
+ )?
+ }
+ State::IgnoreStack(ignore) => ignore.push_directory(
+ &stack.root,
+ &stack.current,
+ self.buf,
+ self.attribute_files_in_index,
+ &mut self.find,
+ )?,
+ }
+ Ok(())
+ }
+
+ fn push(&mut self, is_last_component: bool, stack: &fs::Stack) -> std::io::Result<()> {
+ match &mut self.state {
+ State::CreateDirectoryAndAttributesStack {
+ #[cfg(debug_assertions)]
+ test_mkdir_calls,
+ unlink_on_collision,
+ attributes: _,
+ } => {
+ #[cfg(debug_assertions)]
+ {
+ create_leading_directory(
+ is_last_component,
+ stack,
+ self.is_dir,
+ test_mkdir_calls,
+ *unlink_on_collision,
+ )?
+ }
+ #[cfg(not(debug_assertions))]
+ {
+ create_leading_directory(is_last_component, stack, self.is_dir, *unlink_on_collision)?
+ }
+ }
+ State::AttributesAndIgnoreStack { .. } | State::IgnoreStack(_) => {}
+ }
+ Ok(())
+ }
+
+ fn pop_directory(&mut self) {
+ match &mut self.state {
+ State::CreateDirectoryAndAttributesStack { attributes: _, .. } => {
+ // TODO: attributes
+ }
+ State::AttributesAndIgnoreStack { attributes: _, ignore } => {
+ // TODO: attributes
+ ignore.pop_directory();
+ }
+ State::IgnoreStack(ignore) => {
+ ignore.pop_directory();
+ }
+ }
+ }
+}
+
+fn create_leading_directory(
+ is_last_component: bool,
+ stack: &fs::Stack,
+ is_dir: bool,
+ #[cfg(debug_assertions)] mkdir_calls: &mut usize,
+ unlink_on_collision: bool,
+) -> std::io::Result<()> {
+ if is_last_component && !is_dir {
+ return Ok(());
+ }
+ #[cfg(debug_assertions)]
+ {
+ *mkdir_calls += 1;
+ }
+ match std::fs::create_dir(stack.current()) {
+ Ok(()) => Ok(()),
+ Err(err) if err.kind() == std::io::ErrorKind::AlreadyExists => {
+ let meta = stack.current().symlink_metadata()?;
+ if meta.is_dir() {
+ Ok(())
+ } else if unlink_on_collision {
+ if meta.file_type().is_symlink() {
+ crate::os::remove_symlink(stack.current())?;
+ } else {
+ std::fs::remove_file(stack.current())?;
+ }
+ #[cfg(debug_assertions)]
+ {
+ *mkdir_calls += 1;
+ }
+ std::fs::create_dir(stack.current())
+ } else {
+ Err(err)
+ }
+ }
+ Err(err) => Err(err),
+ }
+}
diff --git a/vendor/gix-worktree/src/fs/cache/state.rs b/vendor/gix-worktree/src/fs/cache/state.rs
new file mode 100644
index 000000000..1692bfa5e
--- /dev/null
+++ b/vendor/gix-worktree/src/fs/cache/state.rs
@@ -0,0 +1,301 @@
+use std::path::Path;
+
+use bstr::{BStr, BString, ByteSlice};
+use gix_glob::pattern::Case;
+use gix_hash::oid;
+
+use crate::fs::{cache::State, PathOidMapping};
+
+type AttributeMatchGroup = gix_attributes::MatchGroup<gix_attributes::Attributes>;
+type IgnoreMatchGroup = gix_attributes::MatchGroup<gix_attributes::Ignore>;
+
+/// State related to attributes associated with files in the repository.
+#[derive(Default, Clone)]
+#[allow(unused)]
+pub struct Attributes {
+ /// Attribute patterns that match the currently set directory (in the stack).
+ pub stack: AttributeMatchGroup,
+ /// Attribute patterns which aren't tied to the repository root, hence are global. They are consulted last.
+ pub globals: AttributeMatchGroup,
+}
+
+/// State related to the exclusion of files.
+#[derive(Default, Clone)]
+#[allow(unused)]
+pub struct Ignore {
+ /// Ignore patterns passed as overrides to everything else, typically passed on the command-line and the first patterns to
+ /// be consulted.
+ overrides: IgnoreMatchGroup,
+ /// Ignore patterns that match the currently set director (in the stack), which is pushed and popped as needed.
+ stack: IgnoreMatchGroup,
+ /// Ignore patterns which aren't tied to the repository root, hence are global. They are consulted last.
+ globals: IgnoreMatchGroup,
+ /// A matching stack of pattern indices which is empty if we have just been initialized to indicate that the
+ /// currently set directory had a pattern matched. Note that this one could be negated.
+ /// (index into match groups, index into list of pattern lists, index into pattern list)
+ matched_directory_patterns_stack: Vec<Option<(usize, usize, usize)>>,
+ /// The name of the file to look for in directories.
+ exclude_file_name_for_directories: BString,
+ /// The case to use when matching directories as they are pushed onto the stack. We run them against the exclude engine
+ /// to know if an entire path can be ignored as a parent directory is ignored.
+ case: Case,
+}
+
+impl Ignore {
+ /// The `exclude_file_name_for_directories` is an optional override for the filename to use when checking per-directory
+ /// ignore files within the repository, defaults to`.gitignore`.
+ // TODO: more docs
+ pub fn new(
+ overrides: IgnoreMatchGroup,
+ globals: IgnoreMatchGroup,
+ exclude_file_name_for_directories: Option<&BStr>,
+ case: Case,
+ ) -> Self {
+ Ignore {
+ case,
+ overrides,
+ globals,
+ stack: Default::default(),
+ matched_directory_patterns_stack: Vec::with_capacity(6),
+ exclude_file_name_for_directories: exclude_file_name_for_directories
+ .map(ToOwned::to_owned)
+ .unwrap_or_else(|| ".gitignore".into()),
+ }
+ }
+}
+
+impl Ignore {
+ pub(crate) fn pop_directory(&mut self) {
+ self.matched_directory_patterns_stack.pop().expect("something to pop");
+ self.stack.patterns.pop().expect("something to pop");
+ }
+ /// The match groups from lowest priority to highest.
+ pub(crate) fn match_groups(&self) -> [&IgnoreMatchGroup; 3] {
+ [&self.globals, &self.stack, &self.overrides]
+ }
+
+ pub(crate) fn matching_exclude_pattern(
+ &self,
+ relative_path: &BStr,
+ is_dir: Option<bool>,
+ case: Case,
+ ) -> Option<gix_attributes::Match<'_, ()>> {
+ let groups = self.match_groups();
+ let mut dir_match = None;
+ if let Some((source, mapping)) = self
+ .matched_directory_patterns_stack
+ .iter()
+ .rev()
+ .filter_map(|v| *v)
+ .map(|(gidx, plidx, pidx)| {
+ let list = &groups[gidx].patterns[plidx];
+ (list.source.as_deref(), &list.patterns[pidx])
+ })
+ .next()
+ {
+ let match_ = gix_attributes::Match {
+ pattern: &mapping.pattern,
+ value: &mapping.value,
+ sequence_number: mapping.sequence_number,
+ source,
+ };
+ if mapping.pattern.is_negative() {
+ dir_match = Some(match_);
+ } else {
+ // Note that returning here is wrong if this pattern _was_ preceded by a negative pattern that
+ // didn't match the directory, but would match now.
+ // Git does it similarly so we do too even though it's incorrect.
+ // To fix this, one would probably keep track of whether there was a preceding negative pattern, and
+ // if so we check the path in full and only use the dir match if there was no match, similar to the negative
+ // case above whose fix fortunately won't change the overall result.
+ return match_.into();
+ }
+ }
+ groups
+ .iter()
+ .rev()
+ .find_map(|group| group.pattern_matching_relative_path(relative_path.as_bytes(), is_dir, case))
+ .or(dir_match)
+ }
+
+ /// Like `matching_exclude_pattern()` but without checking if the current directory is excluded.
+ /// It returns a triple-index into our data structure from which a match can be reconstructed.
+ pub(crate) fn matching_exclude_pattern_no_dir(
+ &self,
+ relative_path: &BStr,
+ is_dir: Option<bool>,
+ case: Case,
+ ) -> Option<(usize, usize, usize)> {
+ let groups = self.match_groups();
+ groups.iter().enumerate().rev().find_map(|(gidx, group)| {
+ let basename_pos = relative_path.rfind(b"/").map(|p| p + 1);
+ group
+ .patterns
+ .iter()
+ .enumerate()
+ .rev()
+ .find_map(|(plidx, pl)| {
+ pl.pattern_idx_matching_relative_path(relative_path, basename_pos, is_dir, case)
+ .map(|idx| (plidx, idx))
+ })
+ .map(|(plidx, pidx)| (gidx, plidx, pidx))
+ })
+ }
+
+ pub(crate) fn push_directory<Find, E>(
+ &mut self,
+ root: &Path,
+ dir: &Path,
+ buf: &mut Vec<u8>,
+ attribute_files_in_index: &[PathOidMapping],
+ mut find: Find,
+ ) -> std::io::Result<()>
+ where
+ Find: for<'b> FnMut(&oid, &'b mut Vec<u8>) -> Result<gix_object::BlobRef<'b>, E>,
+ E: std::error::Error + Send + Sync + 'static,
+ {
+ let rela_dir = dir.strip_prefix(root).expect("dir in root");
+ self.matched_directory_patterns_stack
+ .push(self.matching_exclude_pattern_no_dir(gix_path::into_bstr(rela_dir).as_ref(), Some(true), self.case));
+
+ let ignore_path_relative = rela_dir.join(".gitignore");
+ let ignore_path_relative = gix_path::to_unix_separators_on_windows(gix_path::into_bstr(ignore_path_relative));
+ let ignore_file_in_index =
+ attribute_files_in_index.binary_search_by(|t| t.0.as_bstr().cmp(ignore_path_relative.as_ref()));
+ let follow_symlinks = ignore_file_in_index.is_err();
+ if !self
+ .stack
+ .add_patterns_file(dir.join(".gitignore"), follow_symlinks, Some(root), buf)?
+ {
+ match ignore_file_in_index {
+ Ok(idx) => {
+ let ignore_blob = find(&attribute_files_in_index[idx].1, buf)
+ .map_err(|err| std::io::Error::new(std::io::ErrorKind::Other, err))?;
+ let ignore_path = gix_path::from_bstring(ignore_path_relative.into_owned());
+ self.stack
+ .add_patterns_buffer(ignore_blob.data, ignore_path, Some(root));
+ }
+ Err(_) => {
+ // Need one stack level per component so push and pop matches.
+ self.stack.patterns.push(Default::default())
+ }
+ }
+ }
+ Ok(())
+ }
+}
+
+impl Attributes {
+ /// Create a new instance from an attribute match group that represents `globals`.
+ ///
+ /// A stack of attributes will be applied on top of it later.
+ pub fn new(globals: AttributeMatchGroup) -> Self {
+ Attributes {
+ globals,
+ stack: Default::default(),
+ }
+ }
+}
+
+impl From<AttributeMatchGroup> for Attributes {
+ fn from(group: AttributeMatchGroup) -> Self {
+ Attributes::new(group)
+ }
+}
+
+impl State {
+ /// Configure a state to be suitable for checking out files.
+ pub fn for_checkout(unlink_on_collision: bool, attributes: Attributes) -> Self {
+ State::CreateDirectoryAndAttributesStack {
+ unlink_on_collision,
+ #[cfg(debug_assertions)]
+ test_mkdir_calls: 0,
+ attributes,
+ }
+ }
+
+ /// Configure a state for adding files.
+ pub fn for_add(attributes: Attributes, ignore: Ignore) -> Self {
+ State::AttributesAndIgnoreStack { attributes, ignore }
+ }
+
+ /// Configure a state for status retrieval.
+ pub fn for_status(ignore: Ignore) -> Self {
+ State::IgnoreStack(ignore)
+ }
+}
+
+impl State {
+ /// Returns a vec of tuples of relative index paths along with the best usable OID for either ignore, attribute files or both.
+ ///
+ /// - ignores entries which aren't blobs
+ /// - ignores ignore entries which are not skip-worktree
+ /// - within merges, picks 'our' stage both for ignore and attribute files.
+ pub fn build_attribute_list(
+ &self,
+ index: &gix_index::State,
+ paths: &gix_index::PathStorageRef,
+ case: Case,
+ ) -> Vec<PathOidMapping> {
+ let a1_backing;
+ let a2_backing;
+ let names = match self {
+ State::IgnoreStack(v) => {
+ a1_backing = [(v.exclude_file_name_for_directories.as_bytes().as_bstr(), true)];
+ a1_backing.as_ref()
+ }
+ State::AttributesAndIgnoreStack { ignore, .. } => {
+ a2_backing = [
+ (ignore.exclude_file_name_for_directories.as_bytes().as_bstr(), true),
+ (".gitattributes".into(), false),
+ ];
+ a2_backing.as_ref()
+ }
+ State::CreateDirectoryAndAttributesStack { .. } => {
+ a1_backing = [(".gitattributes".into(), true)];
+ a1_backing.as_ref()
+ }
+ };
+
+ index
+ .entries()
+ .iter()
+ .filter_map(move |entry| {
+ let path = entry.path_in(paths);
+
+ // Stage 0 means there is no merge going on, stage 2 means it's 'our' side of the merge, but then
+ // there won't be a stage 0.
+ if entry.mode == gix_index::entry::Mode::FILE && (entry.stage() == 0 || entry.stage() == 2) {
+ let basename = path
+ .rfind_byte(b'/')
+ .map(|pos| path[pos + 1..].as_bstr())
+ .unwrap_or(path);
+ let is_ignore = names.iter().find_map(|t| {
+ match case {
+ Case::Sensitive => basename == t.0,
+ Case::Fold => basename.eq_ignore_ascii_case(t.0),
+ }
+ .then_some(t.1)
+ })?;
+ // See https://github.com/git/git/blob/master/dir.c#L912:L912
+ if is_ignore && !entry.flags.contains(gix_index::entry::Flags::SKIP_WORKTREE) {
+ return None;
+ }
+ Some((path.to_owned(), entry.id))
+ } else {
+ None
+ }
+ })
+ .collect()
+ }
+
+ pub(crate) fn ignore_or_panic(&self) -> &Ignore {
+ match self {
+ State::IgnoreStack(v) => v,
+ State::AttributesAndIgnoreStack { ignore, .. } => ignore,
+ State::CreateDirectoryAndAttributesStack { .. } => {
+ unreachable!("BUG: must not try to check excludes without it being setup")
+ }
+ }
+ }
+}
diff --git a/vendor/gix-worktree/src/fs/capabilities.rs b/vendor/gix-worktree/src/fs/capabilities.rs
new file mode 100644
index 000000000..64daab9ce
--- /dev/null
+++ b/vendor/gix-worktree/src/fs/capabilities.rs
@@ -0,0 +1,122 @@
+use std::path::Path;
+
+use crate::fs::Capabilities;
+
+#[cfg(windows)]
+impl Default for Capabilities {
+ fn default() -> Self {
+ Capabilities {
+ precompose_unicode: false,
+ ignore_case: true,
+ executable_bit: false,
+ symlink: false,
+ }
+ }
+}
+
+#[cfg(target_os = "macos")]
+impl Default for Capabilities {
+ fn default() -> Self {
+ Capabilities {
+ precompose_unicode: true,
+ ignore_case: true,
+ executable_bit: true,
+ symlink: true,
+ }
+ }
+}
+
+#[cfg(all(unix, not(target_os = "macos")))]
+impl Default for Capabilities {
+ fn default() -> Self {
+ Capabilities {
+ precompose_unicode: false,
+ ignore_case: false,
+ executable_bit: true,
+ symlink: true,
+ }
+ }
+}
+
+impl Capabilities {
+ /// try to determine all values in this context by probing them in the given `git_dir`, which
+ /// should be on the file system the git repository is located on.
+ /// `git_dir` is a typical git repository, expected to be populated with the typical files like `config`.
+ ///
+ /// All errors are ignored and interpreted on top of the default for the platform the binary is compiled for.
+ pub fn probe(git_dir: impl AsRef<Path>) -> Self {
+ let root = git_dir.as_ref();
+ let ctx = Capabilities::default();
+ Capabilities {
+ symlink: Self::probe_symlink(root).unwrap_or(ctx.symlink),
+ ignore_case: Self::probe_ignore_case(root).unwrap_or(ctx.ignore_case),
+ precompose_unicode: Self::probe_precompose_unicode(root).unwrap_or(ctx.precompose_unicode),
+ executable_bit: Self::probe_file_mode(root).unwrap_or(ctx.executable_bit),
+ }
+ }
+
+ #[cfg(unix)]
+ fn probe_file_mode(root: &Path) -> std::io::Result<bool> {
+ use std::os::unix::fs::{MetadataExt, OpenOptionsExt};
+
+ // test it exactly as we typically create executable files, not using chmod.
+ let test_path = root.join("_test_executable_bit");
+ let res = std::fs::OpenOptions::new()
+ .create_new(true)
+ .write(true)
+ .mode(0o777)
+ .open(&test_path)
+ .and_then(|f| f.metadata().map(|m| m.mode() & 0o100 == 0o100));
+ std::fs::remove_file(test_path)?;
+ res
+ }
+
+ #[cfg(not(unix))]
+ fn probe_file_mode(_root: &Path) -> std::io::Result<bool> {
+ Ok(false)
+ }
+
+ fn probe_ignore_case(git_dir: &Path) -> std::io::Result<bool> {
+ std::fs::metadata(git_dir.join("cOnFiG")).map(|_| true).or_else(|err| {
+ if err.kind() == std::io::ErrorKind::NotFound {
+ Ok(false)
+ } else {
+ Err(err)
+ }
+ })
+ }
+
+ fn probe_precompose_unicode(root: &Path) -> std::io::Result<bool> {
+ let precomposed = "ä";
+ let decomposed = "a\u{308}";
+
+ let precomposed = root.join(precomposed);
+ std::fs::OpenOptions::new()
+ .create_new(true)
+ .write(true)
+ .open(&precomposed)?;
+ let res = root.join(decomposed).symlink_metadata().map(|_| true);
+ std::fs::remove_file(precomposed)?;
+ res
+ }
+
+ fn probe_symlink(root: &Path) -> std::io::Result<bool> {
+ let src_path = root.join("__link_src_file");
+ std::fs::OpenOptions::new()
+ .create_new(true)
+ .write(true)
+ .open(&src_path)?;
+ let link_path = root.join("__file_link");
+ if crate::os::create_symlink(&src_path, &link_path).is_err() {
+ std::fs::remove_file(&src_path)?;
+ return Ok(false);
+ }
+
+ let res = std::fs::symlink_metadata(&link_path).map(|m| m.file_type().is_symlink());
+
+ let cleanup = crate::os::remove_symlink(&link_path).or_else(|_| std::fs::remove_file(&link_path));
+ std::fs::remove_file(&src_path).and(cleanup)?;
+
+ res
+ }
+}
diff --git a/vendor/gix-worktree/src/fs/mod.rs b/vendor/gix-worktree/src/fs/mod.rs
new file mode 100644
index 000000000..a58c461fe
--- /dev/null
+++ b/vendor/gix-worktree/src/fs/mod.rs
@@ -0,0 +1,81 @@
+use std::path::PathBuf;
+
+use bstr::BString;
+
+/// Common knowledge about the worktree that is needed across most interactions with the work tree
+#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
+#[derive(PartialEq, Eq, Debug, Hash, Ord, PartialOrd, Clone, Copy)]
+pub struct Capabilities {
+ /// If true, the filesystem will store paths as decomposed unicode, i.e. `ä` becomes `"a\u{308}"`, which means that
+ /// we have to turn these forms back from decomposed to precomposed unicode before storing it in the index or generally
+ /// using it. This also applies to input received from the command-line, so callers may have to be aware of this and
+ /// perform conversions accordingly.
+ /// If false, no conversions will be performed.
+ pub precompose_unicode: bool,
+ /// If true, the filesystem ignores the case of input, which makes `A` the same file as `a`.
+ /// This is also called case-folding.
+ pub ignore_case: bool,
+ /// If true, we assume the the executable bit is honored as part of the files mode. If false, we assume the file system
+ /// ignores the executable bit, hence it will be reported as 'off' even though we just tried to set it to be on.
+ pub executable_bit: bool,
+ /// If true, the file system supports symbolic links and we should try to create them. Otherwise symbolic links will be checked
+ /// out as files which contain the link as text.
+ pub symlink: bool,
+}
+
+/// A stack of path components with the delegation of side-effects as the currently set path changes, component by component.
+#[derive(Clone)]
+pub struct Stack {
+ /// The prefix/root for all paths we handle.
+ root: PathBuf,
+ /// the most recent known cached that we know is valid.
+ current: PathBuf,
+ /// The relative portion of `valid` that was added previously.
+ current_relative: PathBuf,
+ /// The amount of path components of 'current' beyond the roots components.
+ valid_components: usize,
+ /// If set, we assume the `current` element is a directory to affect calls to `(push|pop)_directory()`.
+ current_is_directory: bool,
+}
+
+/// A cache for efficiently executing operations on directories and files which are encountered in sorted order.
+/// That way, these operations can be re-used for subsequent invocations in the same directory.
+///
+/// This cache can be configured to create directories efficiently, read git-ignore files and git-attribute files,
+/// in any combination.
+///
+/// A cache for directory creation to reduce the amount of stat calls when creating
+/// directories safely, that is without following symlinks that might be on the way.
+///
+/// As a special case, it offers a 'prefix' which (by itself) is assumed to exist and may contain symlinks.
+/// Everything past that prefix boundary must not contain a symlink. We do this by allowing any input path.
+///
+/// Another added benefit is its ability to store the path of full path of the entry to which leading directories
+/// are to be created to avoid allocating memory.
+///
+/// For this to work, it remembers the last 'good' path to a directory and assumes that all components of it
+/// are still valid, too.
+/// As directories are created, the cache will be adjusted to reflect the latest seen directory.
+///
+/// The caching is only useful if consecutive calls to create a directory are using a sorted list of entries.
+#[derive(Clone)]
+pub struct Cache {
+ stack: Stack,
+ /// tells us what to do as we change paths.
+ state: cache::State,
+ /// A buffer used when reading attribute or ignore files or their respective objects from the object database.
+ buf: Vec<u8>,
+ /// If case folding should happen when looking up attributes or exclusions.
+ case: gix_glob::pattern::Case,
+ /// A lookup table for object ids to read from in some situations when looking up attributes or exclusions.
+ attribute_files_in_index: Vec<PathOidMapping>,
+}
+
+pub(crate) type PathOidMapping = (BString, gix_hash::ObjectId);
+
+///
+pub mod cache;
+///
+pub mod stack;
+
+mod capabilities;
diff --git a/vendor/gix-worktree/src/fs/stack.rs b/vendor/gix-worktree/src/fs/stack.rs
new file mode 100644
index 000000000..734a4988b
--- /dev/null
+++ b/vendor/gix-worktree/src/fs/stack.rs
@@ -0,0 +1,123 @@
+use std::path::{Path, PathBuf};
+
+use crate::fs::Stack;
+
+impl Stack {
+ /// Returns the top-level path of the stack.
+ pub fn root(&self) -> &Path {
+ &self.root
+ }
+
+ /// Returns the absolute path the currently set path.
+ pub fn current(&self) -> &Path {
+ &self.current
+ }
+
+ /// Returns the currently set path relative to the [`root()`][Stack::root()].
+ pub fn current_relative(&self) -> &Path {
+ &self.current_relative
+ }
+}
+
+/// A delegate for use in a [`Stack`].
+pub trait Delegate {
+ /// Called whenever we push a directory on top of the stack, after the fact.
+ ///
+ /// It is also called if the currently acted on path is a directory in itself.
+ /// Use `stack.current()` to see the directory.
+ fn push_directory(&mut self, stack: &Stack) -> std::io::Result<()>;
+
+ /// Called after any component was pushed, with the path available at `stack.current()`.
+ ///
+ /// `is_last_component` is true if the path is completely built.
+ fn push(&mut self, is_last_component: bool, stack: &Stack) -> std::io::Result<()>;
+
+ /// Called right after a directory-component was popped off the stack.
+ ///
+ /// Use it to pop information off internal data structures.
+ fn pop_directory(&mut self);
+}
+
+impl Stack {
+ /// Create a new instance with `root` being the base for all future paths we handle, assuming it to be valid which includes
+ /// symbolic links to be included in it as well.
+ pub fn new(root: impl Into<PathBuf>) -> Self {
+ let root = root.into();
+ Stack {
+ current: root.clone(),
+ current_relative: PathBuf::with_capacity(128),
+ valid_components: 0,
+ root,
+ current_is_directory: true,
+ }
+ }
+
+ /// Set the current stack to point to the `relative` path and call `push_comp()` each time a new path component is popped
+ /// along with the stacks state for inspection to perform an operation that produces some data.
+ ///
+ /// The full path to `relative` will be returned along with the data returned by push_comp.
+ /// Note that this only works correctly for the delegate's `push_directory()` and `pop_directory()` methods if
+ /// `relative` paths are terminal, so point to their designated file or directory.
+ pub fn make_relative_path_current(
+ &mut self,
+ relative: impl AsRef<Path>,
+ delegate: &mut impl Delegate,
+ ) -> std::io::Result<()> {
+ let relative = relative.as_ref();
+ debug_assert!(
+ relative.is_relative(),
+ "only index paths are handled correctly here, must be relative"
+ );
+ debug_assert!(!relative.to_string_lossy().is_empty(), "empty paths are not allowed");
+
+ if self.valid_components == 0 {
+ delegate.push_directory(self)?;
+ }
+
+ let mut components = relative.components().peekable();
+ let mut existing_components = self.current_relative.components();
+ let mut matching_components = 0;
+ while let (Some(existing_comp), Some(new_comp)) = (existing_components.next(), components.peek()) {
+ if existing_comp == *new_comp {
+ components.next();
+ matching_components += 1;
+ } else {
+ break;
+ }
+ }
+
+ for _ in 0..self.valid_components - matching_components {
+ self.current.pop();
+ self.current_relative.pop();
+ if self.current_is_directory {
+ delegate.pop_directory();
+ }
+ self.current_is_directory = true;
+ }
+ self.valid_components = matching_components;
+
+ if !self.current_is_directory && components.peek().is_some() {
+ delegate.push_directory(self)?;
+ }
+
+ while let Some(comp) = components.next() {
+ let is_last_component = components.peek().is_none();
+ self.current_is_directory = !is_last_component;
+ self.current.push(comp);
+ self.current_relative.push(comp);
+ self.valid_components += 1;
+ let res = delegate.push(is_last_component, self);
+ if self.current_is_directory {
+ delegate.push_directory(self)?;
+ }
+
+ if let Err(err) = res {
+ self.current.pop();
+ self.current_relative.pop();
+ self.valid_components -= 1;
+ return Err(err);
+ }
+ }
+ Ok(())
+ }
+}
diff --git a/vendor/gix-worktree/src/index/checkout.rs b/vendor/gix-worktree/src/index/checkout.rs
new file mode 100644
index 000000000..6bc465375
--- /dev/null
+++ b/vendor/gix-worktree/src/index/checkout.rs
@@ -0,0 +1,95 @@
+#![allow(missing_docs)]
+use bstr::BString;
+use gix_attributes::Attributes;
+
+#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct Collision {
+ /// the path that collided with something already present on disk.
+ pub path: BString,
+ /// The io error we encountered when checking out `path`.
+ pub error_kind: std::io::ErrorKind,
+}
+
+pub struct ErrorRecord {
+ /// the path that encountered the error.
+ pub path: BString,
+ /// The error
+ pub error: Box<dyn std::error::Error + Send + Sync + 'static>,
+}
+
+#[derive(Default)]
+pub struct Outcome {
+ /// The amount of files updated, or created.
+ pub files_updated: usize,
+ /// The amount of bytes written to disk,
+ pub bytes_written: u64,
+ pub collisions: Vec<Collision>,
+ pub errors: Vec<ErrorRecord>,
+}
+
+#[derive(Clone)]
+pub struct Options {
+ /// capabilities of the file system
+ pub fs: crate::fs::Capabilities,
+ /// If set, don't use more than this amount of threads.
+ /// Otherwise, usually use as many threads as there are logical cores.
+ /// A value of 0 is interpreted as no-limit
+ pub thread_limit: Option<usize>,
+ /// If true, we assume no file to exist in the target directory, and want exclusive access to it.
+ /// This should be enabled when cloning to avoid checks for freshness of files. This also enables
+ /// detection of collisions based on whether or not exclusive file creation succeeds or fails.
+ pub destination_is_initially_empty: bool,
+ /// If true, default false, worktree entries on disk will be overwritten with content from the index
+ /// even if they appear to be changed. When creating directories that clash with existing worktree entries,
+ /// these will try to delete the existing entry.
+ /// This is similar in behaviour as `git checkout --force`.
+ pub overwrite_existing: bool,
+ /// If true, default false, try to checkout as much as possible and don't abort on first error which isn't
+ /// due to a conflict.
+ /// The checkout operation will never fail, but count the encountered errors instead along with their paths.
+ pub keep_going: bool,
+ /// If true, a files creation time is taken into consideration when checking if a file changed.
+ /// Can be set to false in case other tools alter the creation time in ways that interfere with our operation.
+ ///
+ /// Default true.
+ pub trust_ctime: bool,
+ /// If true, all stat fields will be used when checking for up-to-date'ness of the entry. Otherwise
+ /// nano-second parts of mtime and ctime,uid, gid, inode and device number _will not_ be used, leaving only
+ /// the whole-second part of ctime and mtime and the file size to be checked.
+ ///
+ /// Default true.
+ pub check_stat: bool,
+ /// A group of attribute patterns that are applied globally, i.e. aren't rooted within the repository itself.
+ pub attribute_globals: gix_attributes::MatchGroup<Attributes>,
+}
+
+impl Default for Options {
+ fn default() -> Self {
+ Options {
+ fs: Default::default(),
+ thread_limit: None,
+ destination_is_initially_empty: false,
+ keep_going: false,
+ trust_ctime: true,
+ check_stat: true,
+ overwrite_existing: false,
+ attribute_globals: Default::default(),
+ }
+ }
+}
+#[derive(Debug, thiserror::Error)]
+pub enum Error<E: std::error::Error + Send + Sync + 'static> {
+ #[error("Could not convert path to UTF8: {}", .path)]
+ IllformedUtf8 { path: BString },
+ #[error("The clock was off when reading file related metadata after updating a file on disk")]
+ Time(#[from] std::time::SystemTimeError),
+ #[error("IO error while writing blob or reading file metadata or changing filetype")]
+ Io(#[from] std::io::Error),
+ #[error("object {} for checkout at {} could not be retrieved from object database", .oid.to_hex(), .path.display())]
+ Find {
+ #[source]
+ err: E,
+ oid: gix_hash::ObjectId,
+ path: std::path::PathBuf,
+ },
+}
diff --git a/vendor/gix-worktree/src/index/entry.rs b/vendor/gix-worktree/src/index/entry.rs
new file mode 100644
index 000000000..32628c4e0
--- /dev/null
+++ b/vendor/gix-worktree/src/index/entry.rs
@@ -0,0 +1,189 @@
+use std::{convert::TryInto, fs::OpenOptions, io::Write, path::Path, time::Duration};
+
+use bstr::BStr;
+use gix_hash::oid;
+use gix_index::Entry;
+use io_close::Close;
+
+use crate::{fs, index, os};
+
+pub struct Context<'a, Find> {
+ pub find: &'a mut Find,
+ pub path_cache: &'a mut fs::Cache,
+ pub buf: &'a mut Vec<u8>,
+}
+
+#[cfg_attr(not(unix), allow(unused_variables))]
+pub fn checkout<Find, E>(
+ entry: &mut Entry,
+ entry_path: &BStr,
+ Context { find, path_cache, buf }: Context<'_, Find>,
+ index::checkout::Options {
+ fs: fs::Capabilities {
+ symlink,
+ executable_bit,
+ ..
+ },
+ destination_is_initially_empty,
+ overwrite_existing,
+ ..
+ }: index::checkout::Options,
+) -> Result<usize, index::checkout::Error<E>>
+where
+ Find: for<'a> FnMut(&oid, &'a mut Vec<u8>) -> Result<gix_object::BlobRef<'a>, E>,
+ E: std::error::Error + Send + Sync + 'static,
+{
+ let dest_relative = gix_path::try_from_bstr(entry_path).map_err(|_| index::checkout::Error::IllformedUtf8 {
+ path: entry_path.to_owned(),
+ })?;
+ let is_dir = Some(entry.mode == gix_index::entry::Mode::COMMIT || entry.mode == gix_index::entry::Mode::DIR);
+ let dest = path_cache.at_path(dest_relative, is_dir, &mut *find)?.path();
+
+ let object_size = match entry.mode {
+ gix_index::entry::Mode::FILE | gix_index::entry::Mode::FILE_EXECUTABLE => {
+ let obj = find(&entry.id, buf).map_err(|err| index::checkout::Error::Find {
+ err,
+ oid: entry.id,
+ path: dest.to_path_buf(),
+ })?;
+
+ #[cfg_attr(not(unix), allow(unused_mut))]
+ let mut options = open_options(dest, destination_is_initially_empty, overwrite_existing);
+ let needs_executable_bit = executable_bit && entry.mode == gix_index::entry::Mode::FILE_EXECUTABLE;
+ #[cfg(unix)]
+ if needs_executable_bit && destination_is_initially_empty {
+ use std::os::unix::fs::OpenOptionsExt;
+ // Note that these only work if the file was newly created, but won't if it's already
+ // existing, possibly without the executable bit set. Thus we do this only if the file is new.
+ options.mode(0o777);
+ }
+
+ let mut file = try_write_or_unlink(dest, overwrite_existing, |p| options.open(p))?;
+ file.write_all(obj.data)?;
+
+ // For possibly existing, overwritten files, we must change the file mode explicitly.
+ #[cfg(unix)]
+ if needs_executable_bit && !destination_is_initially_empty {
+ use std::os::unix::fs::PermissionsExt;
+ let mut perm = std::fs::symlink_metadata(dest)?.permissions();
+ perm.set_mode(0o777);
+ std::fs::set_permissions(dest, perm)?;
+ }
+ // NOTE: we don't call `file.sync_all()` here knowing that some filesystems don't handle this well.
+ // revisit this once there is a bug to fix.
+ update_fstat(entry, file.metadata()?)?;
+ file.close()?;
+ obj.data.len()
+ }
+ gix_index::entry::Mode::SYMLINK => {
+ let obj = find(&entry.id, buf).map_err(|err| index::checkout::Error::Find {
+ err,
+ oid: entry.id,
+ path: dest.to_path_buf(),
+ })?;
+ let symlink_destination = gix_path::try_from_byte_slice(obj.data)
+ .map_err(|_| index::checkout::Error::IllformedUtf8 { path: obj.data.into() })?;
+
+ if symlink {
+ try_write_or_unlink(dest, overwrite_existing, |p| os::create_symlink(symlink_destination, p))?;
+ } else {
+ let mut file = try_write_or_unlink(dest, overwrite_existing, |p| {
+ open_options(p, destination_is_initially_empty, overwrite_existing).open(dest)
+ })?;
+ file.write_all(obj.data)?;
+ file.close()?;
+ }
+
+ update_fstat(entry, std::fs::symlink_metadata(dest)?)?;
+ obj.data.len()
+ }
+ gix_index::entry::Mode::DIR => todo!(),
+ gix_index::entry::Mode::COMMIT => todo!(),
+ _ => unreachable!(),
+ };
+ Ok(object_size)
+}
+
+/// Note that this works only because we assume to not race ourselves when symlinks are involved, and we do this by
+/// delaying symlink creation to the end and will always do that sequentially.
+/// It's still possible to fall for a race if other actors create symlinks in our path, but that's nothing to defend against.
+fn try_write_or_unlink<T>(
+ path: &Path,
+ overwrite_existing: bool,
+ op: impl Fn(&Path) -> std::io::Result<T>,
+) -> std::io::Result<T> {
+ if overwrite_existing {
+ match op(path) {
+ Ok(res) => Ok(res),
+ Err(err) if os::indicates_collision(&err) => {
+ try_unlink_path_recursively(path, &std::fs::symlink_metadata(path)?)?;
+ op(path)
+ }
+ Err(err) => Err(err),
+ }
+ } else {
+ op(path)
+ }
+}
+
+fn try_unlink_path_recursively(path: &Path, path_meta: &std::fs::Metadata) -> std::io::Result<()> {
+ if path_meta.is_dir() {
+ std::fs::remove_dir_all(path)
+ } else if path_meta.file_type().is_symlink() {
+ os::remove_symlink(path)
+ } else {
+ std::fs::remove_file(path)
+ }
+}
+
+#[cfg(not(debug_assertions))]
+fn debug_assert_dest_is_no_symlink(_path: &Path) {}
+
+/// This is a debug assertion as we expect the machinery calling this to prevent this possibility in the first place
+#[cfg(debug_assertions)]
+fn debug_assert_dest_is_no_symlink(path: &Path) {
+ if let Ok(meta) = path.metadata() {
+ debug_assert!(
+ !meta.file_type().is_symlink(),
+ "BUG: should not ever allow to overwrite/write-into the target of a symbolic link: {}",
+ path.display()
+ );
+ }
+}
+
+fn open_options(path: &Path, destination_is_initially_empty: bool, overwrite_existing: bool) -> OpenOptions {
+ if overwrite_existing || !destination_is_initially_empty {
+ debug_assert_dest_is_no_symlink(path);
+ }
+ let mut options = gix_features::fs::open_options_no_follow();
+ options
+ .create_new(destination_is_initially_empty && !overwrite_existing)
+ .create(!destination_is_initially_empty || overwrite_existing)
+ .write(true);
+ options
+}
+
+fn update_fstat<E>(entry: &mut Entry, meta: std::fs::Metadata) -> Result<(), index::checkout::Error<E>>
+where
+ E: std::error::Error + Send + Sync + 'static,
+{
+ let ctime = meta
+ .created()
+ .map_or(Ok(Duration::default()), |x| x.duration_since(std::time::UNIX_EPOCH))?;
+ let mtime = meta
+ .modified()
+ .map_or(Ok(Duration::default()), |x| x.duration_since(std::time::UNIX_EPOCH))?;
+
+ let stat = &mut entry.stat;
+ stat.mtime.secs = mtime
+ .as_secs()
+ .try_into()
+ .expect("by 2038 we found a solution for this");
+ stat.mtime.nsecs = mtime.subsec_nanos();
+ stat.ctime.secs = ctime
+ .as_secs()
+ .try_into()
+ .expect("by 2038 we found a solution for this");
+ stat.ctime.nsecs = ctime.subsec_nanos();
+ Ok(())
+}
diff --git a/vendor/gix-worktree/src/index/mod.rs b/vendor/gix-worktree/src/index/mod.rs
new file mode 100644
index 000000000..684d1cae9
--- /dev/null
+++ b/vendor/gix-worktree/src/index/mod.rs
@@ -0,0 +1,311 @@
+use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
+
+use gix_features::{interrupt, parallel::in_parallel, progress, progress::Progress};
+use gix_hash::oid;
+
+use crate::fs;
+
+pub mod checkout;
+pub(crate) mod entry;
+
+/// Note that interruption still produce an `Ok(…)` value, so the caller should look at `should_interrupt` to communicate the outcome.
+/// `dir` is the directory into which to checkout the `index`.
+/// `git_dir` is the `.git` directory for reading additional per-repository configuration files.
+#[allow(clippy::too_many_arguments)]
+pub fn checkout<Find, E>(
+ index: &mut gix_index::State,
+ dir: impl Into<std::path::PathBuf>,
+ find: Find,
+ files: &mut impl Progress,
+ bytes: &mut impl Progress,
+ should_interrupt: &AtomicBool,
+ options: checkout::Options,
+) -> Result<checkout::Outcome, checkout::Error<E>>
+where
+ Find: for<'a> FnMut(&oid, &'a mut Vec<u8>) -> Result<gix_object::BlobRef<'a>, E> + Send + Clone,
+ E: std::error::Error + Send + Sync + 'static,
+{
+ let paths = index.take_path_backing();
+ let res = checkout_inner(index, &paths, dir, find, files, bytes, should_interrupt, options);
+ index.return_path_backing(paths);
+ res
+}
+#[allow(clippy::too_many_arguments)]
+fn checkout_inner<Find, E>(
+ index: &mut gix_index::State,
+ paths: &gix_index::PathStorage,
+ dir: impl Into<std::path::PathBuf>,
+ find: Find,
+ files: &mut impl Progress,
+ bytes: &mut impl Progress,
+ should_interrupt: &AtomicBool,
+ options: checkout::Options,
+) -> Result<checkout::Outcome, checkout::Error<E>>
+where
+ Find: for<'a> FnMut(&oid, &'a mut Vec<u8>) -> Result<gix_object::BlobRef<'a>, E> + Send + Clone,
+ E: std::error::Error + Send + Sync + 'static,
+{
+ let num_files = AtomicUsize::default();
+ let dir = dir.into();
+ let case = if options.fs.ignore_case {
+ gix_glob::pattern::Case::Fold
+ } else {
+ gix_glob::pattern::Case::Sensitive
+ };
+ let (chunk_size, thread_limit, num_threads) = gix_features::parallel::optimize_chunk_size_and_thread_limit(
+ 100,
+ index.entries().len().into(),
+ options.thread_limit,
+ None,
+ );
+
+ let state = fs::cache::State::for_checkout(options.overwrite_existing, options.attribute_globals.clone().into());
+ let attribute_files = state.build_attribute_list(index, paths, case);
+ let mut ctx = chunk::Context {
+ buf: Vec::new(),
+ path_cache: fs::Cache::new(dir, state, case, Vec::with_capacity(512), attribute_files),
+ find,
+ options,
+ num_files: &num_files,
+ };
+
+ let chunk::Outcome {
+ mut collisions,
+ mut errors,
+ mut bytes_written,
+ delayed,
+ } = if num_threads == 1 {
+ let entries_with_paths = interrupt::Iter::new(index.entries_mut_with_paths_in(paths), should_interrupt);
+ chunk::process(entries_with_paths, files, bytes, &mut ctx)?
+ } else {
+ let entries_with_paths = interrupt::Iter::new(index.entries_mut_with_paths_in(paths), should_interrupt);
+ in_parallel(
+ gix_features::iter::Chunks {
+ inner: entries_with_paths,
+ size: chunk_size,
+ },
+ thread_limit,
+ {
+ let ctx = ctx.clone();
+ move |_| (progress::Discard, progress::Discard, ctx.clone())
+ },
+ |chunk, (files, bytes, ctx)| chunk::process(chunk.into_iter(), files, bytes, ctx),
+ chunk::Reduce {
+ files,
+ bytes,
+ num_files: &num_files,
+ aggregate: Default::default(),
+ marker: Default::default(),
+ },
+ )?
+ };
+
+ for (entry, entry_path) in delayed {
+ bytes_written += chunk::checkout_entry_handle_result(
+ entry,
+ entry_path,
+ &mut errors,
+ &mut collisions,
+ files,
+ bytes,
+ &mut ctx,
+ )? as u64;
+ }
+
+ Ok(checkout::Outcome {
+ files_updated: num_files.load(Ordering::Relaxed),
+ collisions,
+ errors,
+ bytes_written,
+ })
+}
+
+mod chunk {
+ use std::sync::atomic::{AtomicUsize, Ordering};
+
+ use bstr::BStr;
+ use gix_features::progress::Progress;
+ use gix_hash::oid;
+
+ use crate::{
+ fs, index,
+ index::{checkout, entry},
+ os,
+ };
+
+ mod reduce {
+ use std::{
+ marker::PhantomData,
+ sync::atomic::{AtomicUsize, Ordering},
+ };
+
+ use gix_features::progress::Progress;
+
+ use crate::index::checkout;
+
+ pub struct Reduce<'a, 'entry, P1, P2, E> {
+ pub files: &'a mut P1,
+ pub bytes: &'a mut P2,
+ pub num_files: &'a AtomicUsize,
+ pub aggregate: super::Outcome<'entry>,
+ pub marker: PhantomData<E>,
+ }
+
+ impl<'a, 'entry, P1, P2, E> gix_features::parallel::Reduce for Reduce<'a, 'entry, P1, P2, E>
+ where
+ P1: Progress,
+ P2: Progress,
+ E: std::error::Error + Send + Sync + 'static,
+ {
+ type Input = Result<super::Outcome<'entry>, checkout::Error<E>>;
+ type FeedProduce = ();
+ type Output = super::Outcome<'entry>;
+ type Error = checkout::Error<E>;
+
+ fn feed(&mut self, item: Self::Input) -> Result<Self::FeedProduce, Self::Error> {
+ let item = item?;
+ let super::Outcome {
+ bytes_written,
+ delayed,
+ errors,
+ collisions,
+ } = item;
+ self.aggregate.bytes_written += bytes_written;
+ self.aggregate.delayed.extend(delayed);
+ self.aggregate.errors.extend(errors);
+ self.aggregate.collisions.extend(collisions);
+
+ self.bytes.set(self.aggregate.bytes_written as usize);
+ self.files.set(self.num_files.load(Ordering::Relaxed));
+
+ Ok(())
+ }
+
+ fn finalize(self) -> Result<Self::Output, Self::Error> {
+ Ok(self.aggregate)
+ }
+ }
+ }
+ pub use reduce::Reduce;
+
+ #[derive(Default)]
+ pub struct Outcome<'a> {
+ pub collisions: Vec<checkout::Collision>,
+ pub errors: Vec<checkout::ErrorRecord>,
+ pub delayed: Vec<(&'a mut gix_index::Entry, &'a BStr)>,
+ pub bytes_written: u64,
+ }
+
+ #[derive(Clone)]
+ pub struct Context<'a, Find: Clone> {
+ pub find: Find,
+ pub path_cache: fs::Cache,
+ pub buf: Vec<u8>,
+ pub options: checkout::Options,
+ /// We keep these shared so that there is the chance for printing numbers that aren't looking like
+ /// multiple of chunk sizes. Purely cosmetic. Otherwise it's the same as `files`.
+ pub num_files: &'a AtomicUsize,
+ }
+
+ pub fn process<'entry, Find, E>(
+ entries_with_paths: impl Iterator<Item = (&'entry mut gix_index::Entry, &'entry BStr)>,
+ files: &mut impl Progress,
+ bytes: &mut impl Progress,
+ ctx: &mut Context<'_, Find>,
+ ) -> Result<Outcome<'entry>, checkout::Error<E>>
+ where
+ Find: for<'a> FnMut(&oid, &'a mut Vec<u8>) -> Result<gix_object::BlobRef<'a>, E> + Clone,
+ E: std::error::Error + Send + Sync + 'static,
+ {
+ let mut delayed = Vec::new();
+ let mut collisions = Vec::new();
+ let mut errors = Vec::new();
+ let mut bytes_written = 0;
+
+ for (entry, entry_path) in entries_with_paths {
+ // TODO: write test for that
+ if entry.flags.contains(gix_index::entry::Flags::SKIP_WORKTREE) {
+ files.inc();
+ continue;
+ }
+
+ // Symlinks always have to be delayed on windows as they have to point to something that exists on creation.
+ // And even if not, there is a distinction between file and directory symlinks, hence we have to check what the target is
+ // before creating it.
+ // And to keep things sane, we just do the same on non-windows as well which is similar to what git does and adds some safety
+ // around writing through symlinks (even though we handle this).
+ // This also means that we prefer content in files over symlinks in case of collisions, which probably is for the better, too.
+ if entry.mode == gix_index::entry::Mode::SYMLINK {
+ delayed.push((entry, entry_path));
+ continue;
+ }
+
+ bytes_written +=
+ checkout_entry_handle_result(entry, entry_path, &mut errors, &mut collisions, files, bytes, ctx)?
+ as u64;
+ }
+
+ Ok(Outcome {
+ bytes_written,
+ errors,
+ collisions,
+ delayed,
+ })
+ }
+
+ pub fn checkout_entry_handle_result<Find, E>(
+ entry: &mut gix_index::Entry,
+ entry_path: &BStr,
+ errors: &mut Vec<checkout::ErrorRecord>,
+ collisions: &mut Vec<checkout::Collision>,
+ files: &mut impl Progress,
+ bytes: &mut impl Progress,
+ Context {
+ find,
+ path_cache,
+ buf,
+ options,
+ num_files,
+ }: &mut Context<'_, Find>,
+ ) -> Result<usize, checkout::Error<E>>
+ where
+ Find: for<'a> FnMut(&oid, &'a mut Vec<u8>) -> Result<gix_object::BlobRef<'a>, E> + Clone,
+ E: std::error::Error + Send + Sync + 'static,
+ {
+ let res = entry::checkout(
+ entry,
+ entry_path,
+ entry::Context { find, path_cache, buf },
+ options.clone(),
+ );
+ files.inc();
+ num_files.fetch_add(1, Ordering::SeqCst);
+ match res {
+ Ok(object_size) => {
+ bytes.inc_by(object_size);
+ Ok(object_size)
+ }
+ Err(index::checkout::Error::Io(err)) if os::indicates_collision(&err) => {
+ // We are here because a file existed or was blocked by a directory which shouldn't be possible unless
+ // we are on a file insensitive file system.
+ files.fail(format!("{}: collided ({:?})", entry_path, err.kind()));
+ collisions.push(checkout::Collision {
+ path: entry_path.into(),
+ error_kind: err.kind(),
+ });
+ Ok(0)
+ }
+ Err(err) => {
+ if options.keep_going {
+ errors.push(checkout::ErrorRecord {
+ path: entry_path.into(),
+ error: Box::new(err),
+ });
+ Ok(0)
+ } else {
+ Err(err)
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/gix-worktree/src/lib.rs b/vendor/gix-worktree/src/lib.rs
new file mode 100644
index 000000000..9a67e0289
--- /dev/null
+++ b/vendor/gix-worktree/src/lib.rs
@@ -0,0 +1,15 @@
+//! ## Feature Flags
+#![cfg_attr(
+ feature = "document-features",
+ cfg_attr(doc, doc = ::document_features::document_features!())
+)]
+#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
+#![deny(missing_docs, rust_2018_idioms, unsafe_code)]
+
+/// file system related utilities
+pub mod fs;
+
+///
+pub mod index;
+
+pub(crate) mod os;
diff --git a/vendor/gix-worktree/src/os.rs b/vendor/gix-worktree/src/os.rs
new file mode 100644
index 000000000..a297e73cd
--- /dev/null
+++ b/vendor/gix-worktree/src/os.rs
@@ -0,0 +1,50 @@
+use std::{io, io::ErrorKind::AlreadyExists, path::Path};
+
+#[cfg(not(windows))]
+pub fn create_symlink(original: &Path, link: &Path) -> io::Result<()> {
+ std::os::unix::fs::symlink(original, link)
+}
+
+#[cfg(not(windows))]
+pub fn remove_symlink(path: &Path) -> io::Result<()> {
+ std::fs::remove_file(path)
+}
+
+// TODO: use the `symlink` crate once it can delete directory symlinks
+#[cfg(windows)]
+pub fn remove_symlink(path: &Path) -> io::Result<()> {
+ if let Ok(meta) = std::fs::metadata(path) {
+ if meta.is_file() {
+ std::fs::remove_file(path) // this removes the link itself
+ } else {
+ std::fs::remove_dir(path) // however, this sees the destination directory, which isn't the right thing actually
+ }
+ } else {
+ std::fs::remove_file(path).or_else(|_| std::fs::remove_dir(path))
+ }
+}
+
+#[cfg(windows)]
+pub fn create_symlink(original: &Path, link: &Path) -> io::Result<()> {
+ use std::os::windows::fs::{symlink_dir, symlink_file};
+ // TODO: figure out if links to links count as files or whatever they point at
+ if std::fs::metadata(link.parent().expect("dir for link").join(original))?.is_dir() {
+ symlink_dir(original, link)
+ } else {
+ symlink_file(original, link)
+ }
+}
+
+#[cfg(not(windows))]
+pub fn indicates_collision(err: &std::io::Error) -> bool {
+ // TODO: use ::IsDirectory as well when stabilized instead of raw_os_error(), and ::FileSystemLoop respectively
+ err.kind() == AlreadyExists
+ || err.raw_os_error() == Some(21)
+ || err.raw_os_error() == Some(62) // no-follow on symlnk on mac-os
+ || err.raw_os_error() == Some(40) // no-follow on symlnk on ubuntu
+}
+
+#[cfg(windows)]
+pub fn indicates_collision(err: &std::io::Error) -> bool {
+ err.kind() == AlreadyExists || err.kind() == std::io::ErrorKind::PermissionDenied
+}