summaryrefslogtreecommitdiffstats
path: root/vendor/gix-worktree/src
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/gix-worktree/src')
-rw-r--r--vendor/gix-worktree/src/checkout/chunk.rs182
-rw-r--r--vendor/gix-worktree/src/checkout/entry.rs166
-rw-r--r--vendor/gix-worktree/src/checkout/function.rs119
-rw-r--r--vendor/gix-worktree/src/checkout/mod.rs77
-rw-r--r--vendor/gix-worktree/src/lib.rs18
-rw-r--r--vendor/gix-worktree/src/read.rs64
-rw-r--r--vendor/gix-worktree/src/stack/delegate.rs (renamed from vendor/gix-worktree/src/cache/delegate.rs)74
-rw-r--r--vendor/gix-worktree/src/stack/mod.rs (renamed from vendor/gix-worktree/src/cache/mod.rs)39
-rw-r--r--vendor/gix-worktree/src/stack/platform.rs (renamed from vendor/gix-worktree/src/cache/platform.rs)5
-rw-r--r--vendor/gix-worktree/src/stack/state/attributes.rs (renamed from vendor/gix-worktree/src/cache/state/attributes.rs)70
-rw-r--r--vendor/gix-worktree/src/stack/state/ignore.rs (renamed from vendor/gix-worktree/src/cache/state/ignore.rs)29
-rw-r--r--vendor/gix-worktree/src/stack/state/mod.rs (renamed from vendor/gix-worktree/src/cache/state/mod.rs)47
-rw-r--r--vendor/gix-worktree/src/status/content.rs80
-rw-r--r--vendor/gix-worktree/src/status/function.rs331
-rw-r--r--vendor/gix-worktree/src/status/mod.rs11
-rw-r--r--vendor/gix-worktree/src/status/recorder.rs27
-rw-r--r--vendor/gix-worktree/src/status/types.rs69
-rw-r--r--vendor/gix-worktree/src/untracked.rs1
18 files changed, 185 insertions, 1224 deletions
diff --git a/vendor/gix-worktree/src/checkout/chunk.rs b/vendor/gix-worktree/src/checkout/chunk.rs
deleted file mode 100644
index 9de9e424e..000000000
--- a/vendor/gix-worktree/src/checkout/chunk.rs
+++ /dev/null
@@ -1,182 +0,0 @@
-use std::sync::atomic::{AtomicUsize, Ordering};
-
-use bstr::BStr;
-use gix_features::progress::Progress;
-use gix_hash::oid;
-
-use crate::{checkout, checkout::entry, Cache};
-
-mod reduce {
- use std::{
- marker::PhantomData,
- sync::atomic::{AtomicUsize, Ordering},
- };
-
- use gix_features::progress::Progress;
-
- use crate::checkout;
-
- pub struct Reduce<'a, 'entry, P1, P2, E> {
- pub files: &'a mut P1,
- pub bytes: &'a mut P2,
- pub num_files: &'a AtomicUsize,
- pub aggregate: super::Outcome<'entry>,
- pub marker: PhantomData<E>,
- }
-
- impl<'a, 'entry, P1, P2, E> gix_features::parallel::Reduce for Reduce<'a, 'entry, P1, P2, E>
- where
- P1: Progress,
- P2: Progress,
- E: std::error::Error + Send + Sync + 'static,
- {
- type Input = Result<super::Outcome<'entry>, checkout::Error<E>>;
- type FeedProduce = ();
- type Output = super::Outcome<'entry>;
- type Error = checkout::Error<E>;
-
- fn feed(&mut self, item: Self::Input) -> Result<Self::FeedProduce, Self::Error> {
- let item = item?;
- let super::Outcome {
- bytes_written,
- delayed,
- errors,
- collisions,
- } = item;
- self.aggregate.bytes_written += bytes_written;
- self.aggregate.delayed.extend(delayed);
- self.aggregate.errors.extend(errors);
- self.aggregate.collisions.extend(collisions);
-
- self.bytes.set(self.aggregate.bytes_written as usize);
- self.files.set(self.num_files.load(Ordering::Relaxed));
-
- Ok(())
- }
-
- fn finalize(self) -> Result<Self::Output, Self::Error> {
- Ok(self.aggregate)
- }
- }
-}
-pub use reduce::Reduce;
-
-#[derive(Default)]
-pub struct Outcome<'a> {
- pub collisions: Vec<checkout::Collision>,
- pub errors: Vec<checkout::ErrorRecord>,
- pub delayed: Vec<(&'a mut gix_index::Entry, &'a BStr)>,
- pub bytes_written: u64,
-}
-
-#[derive(Clone)]
-pub struct Context<'a, Find: Clone> {
- pub find: Find,
- pub path_cache: Cache,
- pub buf: Vec<u8>,
- pub options: checkout::Options,
- /// We keep these shared so that there is the chance for printing numbers that aren't looking like
- /// multiple of chunk sizes. Purely cosmetic. Otherwise it's the same as `files`.
- pub num_files: &'a AtomicUsize,
-}
-
-pub fn process<'entry, Find, E>(
- entries_with_paths: impl Iterator<Item = (&'entry mut gix_index::Entry, &'entry BStr)>,
- files: &mut impl Progress,
- bytes: &mut impl Progress,
- ctx: &mut Context<'_, Find>,
-) -> Result<Outcome<'entry>, checkout::Error<E>>
-where
- Find: for<'a> FnMut(&oid, &'a mut Vec<u8>) -> Result<gix_object::BlobRef<'a>, E> + Clone,
- E: std::error::Error + Send + Sync + 'static,
-{
- let mut delayed = Vec::new();
- let mut collisions = Vec::new();
- let mut errors = Vec::new();
- let mut bytes_written = 0;
-
- for (entry, entry_path) in entries_with_paths {
- // TODO: write test for that
- if entry.flags.contains(gix_index::entry::Flags::SKIP_WORKTREE) {
- files.inc();
- continue;
- }
-
- // Symlinks always have to be delayed on windows as they have to point to something that exists on creation.
- // And even if not, there is a distinction between file and directory symlinks, hence we have to check what the target is
- // before creating it.
- // And to keep things sane, we just do the same on non-windows as well which is similar to what git does and adds some safety
- // around writing through symlinks (even though we handle this).
- // This also means that we prefer content in files over symlinks in case of collisions, which probably is for the better, too.
- if entry.mode == gix_index::entry::Mode::SYMLINK {
- delayed.push((entry, entry_path));
- continue;
- }
-
- bytes_written +=
- checkout_entry_handle_result(entry, entry_path, &mut errors, &mut collisions, files, bytes, ctx)? as u64;
- }
-
- Ok(Outcome {
- bytes_written,
- errors,
- collisions,
- delayed,
- })
-}
-
-pub fn checkout_entry_handle_result<Find, E>(
- entry: &mut gix_index::Entry,
- entry_path: &BStr,
- errors: &mut Vec<checkout::ErrorRecord>,
- collisions: &mut Vec<checkout::Collision>,
- files: &mut impl Progress,
- bytes: &mut impl Progress,
- Context {
- find,
- path_cache,
- buf,
- options,
- num_files,
- }: &mut Context<'_, Find>,
-) -> Result<usize, checkout::Error<E>>
-where
- Find: for<'a> FnMut(&oid, &'a mut Vec<u8>) -> Result<gix_object::BlobRef<'a>, E> + Clone,
- E: std::error::Error + Send + Sync + 'static,
-{
- let res = entry::checkout(
- entry,
- entry_path,
- entry::Context { find, path_cache, buf },
- options.clone(),
- );
- files.inc();
- num_files.fetch_add(1, Ordering::SeqCst);
- match res {
- Ok(object_size) => {
- bytes.inc_by(object_size);
- Ok(object_size)
- }
- Err(checkout::Error::Io(err)) if gix_fs::symlink::is_collision_error(&err) => {
- // We are here because a file existed or was blocked by a directory which shouldn't be possible unless
- // we are on a file insensitive file system.
- files.fail(format!("{}: collided ({:?})", entry_path, err.kind()));
- collisions.push(checkout::Collision {
- path: entry_path.into(),
- error_kind: err.kind(),
- });
- Ok(0)
- }
- Err(err) => {
- if options.keep_going {
- errors.push(checkout::ErrorRecord {
- path: entry_path.into(),
- error: Box::new(err),
- });
- Ok(0)
- } else {
- Err(err)
- }
- }
- }
-}
diff --git a/vendor/gix-worktree/src/checkout/entry.rs b/vendor/gix-worktree/src/checkout/entry.rs
deleted file mode 100644
index 524cf90f2..000000000
--- a/vendor/gix-worktree/src/checkout/entry.rs
+++ /dev/null
@@ -1,166 +0,0 @@
-use std::{fs::OpenOptions, io::Write, path::Path};
-
-use bstr::BStr;
-use gix_hash::oid;
-use gix_index::{entry::Stat, Entry};
-use io_close::Close;
-
-use crate::Cache;
-
-pub struct Context<'a, Find> {
- pub find: &'a mut Find,
- pub path_cache: &'a mut Cache,
- pub buf: &'a mut Vec<u8>,
-}
-
-#[cfg_attr(not(unix), allow(unused_variables))]
-pub fn checkout<Find, E>(
- entry: &mut Entry,
- entry_path: &BStr,
- Context { find, path_cache, buf }: Context<'_, Find>,
- crate::checkout::Options {
- fs: gix_fs::Capabilities {
- symlink,
- executable_bit,
- ..
- },
- destination_is_initially_empty,
- overwrite_existing,
- ..
- }: crate::checkout::Options,
-) -> Result<usize, crate::checkout::Error<E>>
-where
- Find: for<'a> FnMut(&oid, &'a mut Vec<u8>) -> Result<gix_object::BlobRef<'a>, E>,
- E: std::error::Error + Send + Sync + 'static,
-{
- let dest_relative = gix_path::try_from_bstr(entry_path).map_err(|_| crate::checkout::Error::IllformedUtf8 {
- path: entry_path.to_owned(),
- })?;
- let is_dir = Some(entry.mode == gix_index::entry::Mode::COMMIT || entry.mode == gix_index::entry::Mode::DIR);
- let dest = path_cache.at_path(dest_relative, is_dir, &mut *find)?.path();
-
- let object_size = match entry.mode {
- gix_index::entry::Mode::FILE | gix_index::entry::Mode::FILE_EXECUTABLE => {
- let obj = find(&entry.id, buf).map_err(|err| crate::checkout::Error::Find {
- err,
- oid: entry.id,
- path: dest.to_path_buf(),
- })?;
-
- #[cfg_attr(not(unix), allow(unused_mut))]
- let mut options = open_options(dest, destination_is_initially_empty, overwrite_existing);
- let needs_executable_bit = executable_bit && entry.mode == gix_index::entry::Mode::FILE_EXECUTABLE;
- #[cfg(unix)]
- if needs_executable_bit && destination_is_initially_empty {
- use std::os::unix::fs::OpenOptionsExt;
- // Note that these only work if the file was newly created, but won't if it's already
- // existing, possibly without the executable bit set. Thus we do this only if the file is new.
- options.mode(0o777);
- }
-
- let mut file = try_write_or_unlink(dest, overwrite_existing, |p| options.open(p))?;
- file.write_all(obj.data)?;
-
- // For possibly existing, overwritten files, we must change the file mode explicitly.
- #[cfg(unix)]
- if needs_executable_bit && !destination_is_initially_empty {
- use std::os::unix::fs::PermissionsExt;
- let mut perm = std::fs::symlink_metadata(dest)?.permissions();
- perm.set_mode(0o777);
- std::fs::set_permissions(dest, perm)?;
- }
- // NOTE: we don't call `file.sync_all()` here knowing that some filesystems don't handle this well.
- // revisit this once there is a bug to fix.
- entry.stat = Stat::from_fs(&file.metadata()?)?;
- file.close()?;
- obj.data.len()
- }
- gix_index::entry::Mode::SYMLINK => {
- let obj = find(&entry.id, buf).map_err(|err| crate::checkout::Error::Find {
- err,
- oid: entry.id,
- path: dest.to_path_buf(),
- })?;
- let symlink_destination = gix_path::try_from_byte_slice(obj.data)
- .map_err(|_| crate::checkout::Error::IllformedUtf8 { path: obj.data.into() })?;
-
- if symlink {
- try_write_or_unlink(dest, overwrite_existing, |p| {
- gix_fs::symlink::create(symlink_destination, p)
- })?;
- } else {
- let mut file = try_write_or_unlink(dest, overwrite_existing, |p| {
- open_options(p, destination_is_initially_empty, overwrite_existing).open(dest)
- })?;
- file.write_all(obj.data)?;
- file.close()?;
- }
-
- entry.stat = Stat::from_fs(&std::fs::symlink_metadata(dest)?)?;
- obj.data.len()
- }
- gix_index::entry::Mode::DIR => todo!(),
- gix_index::entry::Mode::COMMIT => todo!(),
- _ => unreachable!(),
- };
- Ok(object_size)
-}
-
-/// Note that this works only because we assume to not race ourselves when symlinks are involved, and we do this by
-/// delaying symlink creation to the end and will always do that sequentially.
-/// It's still possible to fall for a race if other actors create symlinks in our path, but that's nothing to defend against.
-fn try_write_or_unlink<T>(
- path: &Path,
- overwrite_existing: bool,
- op: impl Fn(&Path) -> std::io::Result<T>,
-) -> std::io::Result<T> {
- if overwrite_existing {
- match op(path) {
- Ok(res) => Ok(res),
- Err(err) if gix_fs::symlink::is_collision_error(&err) => {
- try_unlink_path_recursively(path, &std::fs::symlink_metadata(path)?)?;
- op(path)
- }
- Err(err) => Err(err),
- }
- } else {
- op(path)
- }
-}
-
-fn try_unlink_path_recursively(path: &Path, path_meta: &std::fs::Metadata) -> std::io::Result<()> {
- if path_meta.is_dir() {
- std::fs::remove_dir_all(path)
- } else if path_meta.file_type().is_symlink() {
- gix_fs::symlink::remove(path)
- } else {
- std::fs::remove_file(path)
- }
-}
-
-#[cfg(not(debug_assertions))]
-fn debug_assert_dest_is_no_symlink(_path: &Path) {}
-
-/// This is a debug assertion as we expect the machinery calling this to prevent this possibility in the first place
-#[cfg(debug_assertions)]
-fn debug_assert_dest_is_no_symlink(path: &Path) {
- if let Ok(meta) = path.metadata() {
- debug_assert!(
- !meta.file_type().is_symlink(),
- "BUG: should not ever allow to overwrite/write-into the target of a symbolic link: {}",
- path.display()
- );
- }
-}
-
-fn open_options(path: &Path, destination_is_initially_empty: bool, overwrite_existing: bool) -> OpenOptions {
- if overwrite_existing || !destination_is_initially_empty {
- debug_assert_dest_is_no_symlink(path);
- }
- let mut options = gix_features::fs::open_options_no_follow();
- options
- .create_new(destination_is_initially_empty && !overwrite_existing)
- .create(!destination_is_initially_empty || overwrite_existing)
- .write(true);
- options
-}
diff --git a/vendor/gix-worktree/src/checkout/function.rs b/vendor/gix-worktree/src/checkout/function.rs
deleted file mode 100644
index 8e69fd4d6..000000000
--- a/vendor/gix-worktree/src/checkout/function.rs
+++ /dev/null
@@ -1,119 +0,0 @@
-use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
-
-use gix_features::{interrupt, parallel::in_parallel, progress, progress::Progress};
-use gix_hash::oid;
-
-use crate::{cache, checkout::chunk, Cache};
-
-/// Note that interruption still produce an `Ok(…)` value, so the caller should look at `should_interrupt` to communicate the outcome.
-/// `dir` is the directory into which to checkout the `index`.
-/// `git_dir` is the `.git` directory for reading additional per-repository configuration files.
-#[allow(clippy::too_many_arguments)]
-pub fn checkout<Find, E>(
- index: &mut gix_index::State,
- dir: impl Into<std::path::PathBuf>,
- find: Find,
- files: &mut impl Progress,
- bytes: &mut impl Progress,
- should_interrupt: &AtomicBool,
- options: crate::checkout::Options,
-) -> Result<crate::checkout::Outcome, crate::checkout::Error<E>>
-where
- Find: for<'a> FnMut(&oid, &'a mut Vec<u8>) -> Result<gix_object::BlobRef<'a>, E> + Send + Clone,
- E: std::error::Error + Send + Sync + 'static,
-{
- let paths = index.take_path_backing();
- let res = checkout_inner(index, &paths, dir, find, files, bytes, should_interrupt, options);
- index.return_path_backing(paths);
- res
-}
-
-#[allow(clippy::too_many_arguments)]
-fn checkout_inner<Find, E>(
- index: &mut gix_index::State,
- paths: &gix_index::PathStorage,
- dir: impl Into<std::path::PathBuf>,
- find: Find,
- files: &mut impl Progress,
- bytes: &mut impl Progress,
- should_interrupt: &AtomicBool,
- options: crate::checkout::Options,
-) -> Result<crate::checkout::Outcome, crate::checkout::Error<E>>
-where
- Find: for<'a> FnMut(&oid, &'a mut Vec<u8>) -> Result<gix_object::BlobRef<'a>, E> + Send + Clone,
- E: std::error::Error + Send + Sync + 'static,
-{
- let num_files = AtomicUsize::default();
- let dir = dir.into();
- let case = if options.fs.ignore_case {
- gix_glob::pattern::Case::Fold
- } else {
- gix_glob::pattern::Case::Sensitive
- };
- let (chunk_size, thread_limit, num_threads) = gix_features::parallel::optimize_chunk_size_and_thread_limit(
- 100,
- index.entries().len().into(),
- options.thread_limit,
- None,
- );
-
- let state = cache::State::for_checkout(options.overwrite_existing, options.attributes.clone());
- let attribute_files = state.id_mappings_from_index(index, paths, Default::default(), case);
- let mut ctx = chunk::Context {
- buf: Vec::new(),
- path_cache: Cache::new(dir, state, case, Vec::with_capacity(512), attribute_files),
- find,
- options,
- num_files: &num_files,
- };
-
- let chunk::Outcome {
- mut collisions,
- mut errors,
- mut bytes_written,
- delayed,
- } = if num_threads == 1 {
- let entries_with_paths = interrupt::Iter::new(index.entries_mut_with_paths_in(paths), should_interrupt);
- chunk::process(entries_with_paths, files, bytes, &mut ctx)?
- } else {
- let entries_with_paths = interrupt::Iter::new(index.entries_mut_with_paths_in(paths), should_interrupt);
- in_parallel(
- gix_features::iter::Chunks {
- inner: entries_with_paths,
- size: chunk_size,
- },
- thread_limit,
- {
- let ctx = ctx.clone();
- move |_| (progress::Discard, progress::Discard, ctx.clone())
- },
- |chunk, (files, bytes, ctx)| chunk::process(chunk.into_iter(), files, bytes, ctx),
- chunk::Reduce {
- files,
- bytes,
- num_files: &num_files,
- aggregate: Default::default(),
- marker: Default::default(),
- },
- )?
- };
-
- for (entry, entry_path) in delayed {
- bytes_written += chunk::checkout_entry_handle_result(
- entry,
- entry_path,
- &mut errors,
- &mut collisions,
- files,
- bytes,
- &mut ctx,
- )? as u64;
- }
-
- Ok(crate::checkout::Outcome {
- files_updated: num_files.load(Ordering::Relaxed),
- collisions,
- errors,
- bytes_written,
- })
-}
diff --git a/vendor/gix-worktree/src/checkout/mod.rs b/vendor/gix-worktree/src/checkout/mod.rs
deleted file mode 100644
index 11f39b1b2..000000000
--- a/vendor/gix-worktree/src/checkout/mod.rs
+++ /dev/null
@@ -1,77 +0,0 @@
-#![allow(missing_docs)]
-
-use bstr::BString;
-use gix_index::entry::stat;
-
-#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
-pub struct Collision {
- /// the path that collided with something already present on disk.
- pub path: BString,
- /// The io error we encountered when checking out `path`.
- pub error_kind: std::io::ErrorKind,
-}
-
-pub struct ErrorRecord {
- /// the path that encountered the error.
- pub path: BString,
- /// The error
- pub error: Box<dyn std::error::Error + Send + Sync + 'static>,
-}
-
-#[derive(Default)]
-pub struct Outcome {
- /// The amount of files updated, or created.
- pub files_updated: usize,
- /// The amount of bytes written to disk,
- pub bytes_written: u64,
- pub collisions: Vec<Collision>,
- pub errors: Vec<ErrorRecord>,
-}
-
-#[derive(Clone, Default)]
-pub struct Options {
- /// capabilities of the file system
- pub fs: gix_fs::Capabilities,
- /// If set, don't use more than this amount of threads.
- /// Otherwise, usually use as many threads as there are logical cores.
- /// A value of 0 is interpreted as no-limit
- pub thread_limit: Option<usize>,
- /// If true, we assume no file to exist in the target directory, and want exclusive access to it.
- /// This should be enabled when cloning to avoid checks for freshness of files. This also enables
- /// detection of collisions based on whether or not exclusive file creation succeeds or fails.
- pub destination_is_initially_empty: bool,
- /// If true, default false, worktree entries on disk will be overwritten with content from the index
- /// even if they appear to be changed. When creating directories that clash with existing worktree entries,
- /// these will try to delete the existing entry.
- /// This is similar in behaviour as `git checkout --force`.
- pub overwrite_existing: bool,
- /// If true, default false, try to checkout as much as possible and don't abort on first error which isn't
- /// due to a conflict.
- /// The checkout operation will never fail, but count the encountered errors instead along with their paths.
- pub keep_going: bool,
- /// Control how stat comparisons are made when checking if a file is fresh.
- pub stat_options: stat::Options,
- /// A stack of attributes to use with the filesystem cache to use as driver for filters.
- pub attributes: crate::cache::state::Attributes,
-}
-
-#[derive(Debug, thiserror::Error)]
-pub enum Error<E: std::error::Error + Send + Sync + 'static> {
- #[error("Could not convert path to UTF8: {}", .path)]
- IllformedUtf8 { path: BString },
- #[error("The clock was off when reading file related metadata after updating a file on disk")]
- Time(#[from] std::time::SystemTimeError),
- #[error("IO error while writing blob or reading file metadata or changing filetype")]
- Io(#[from] std::io::Error),
- #[error("object {} for checkout at {} could not be retrieved from object database", .oid.to_hex(), .path.display())]
- Find {
- #[source]
- err: E,
- oid: gix_hash::ObjectId,
- path: std::path::PathBuf,
- },
-}
-
-mod chunk;
-mod entry;
-pub(crate) mod function;
diff --git a/vendor/gix-worktree/src/lib.rs b/vendor/gix-worktree/src/lib.rs
index 2626fe508..32d1d7c0e 100644
--- a/vendor/gix-worktree/src/lib.rs
+++ b/vendor/gix-worktree/src/lib.rs
@@ -1,4 +1,4 @@
-//! A crate with all index-centric functionality that is interacting with a worktree.
+//! A crate with utility types for use by other crates that implement specifics.
//!
//! Unless specified differently, all operations need an index file (e.g. `.git/index`) as driver.
//!
@@ -11,9 +11,6 @@
#![deny(missing_docs, rust_2018_idioms, unsafe_code)]
use bstr::BString;
-///
-pub mod read;
-
/// A cache for efficiently executing operations on directories and files which are encountered in sorted order.
/// That way, these operations can be re-used for subsequent invocations in the same directory.
///
@@ -35,25 +32,20 @@ pub mod read;
///
/// The caching is only useful if consecutive calls to create a directory are using a sorted list of entries.
#[derive(Clone)]
-pub struct Cache {
+pub struct Stack {
stack: gix_fs::Stack,
/// tells us what to do as we change paths.
- state: cache::State,
+ state: stack::State,
/// A buffer used when reading attribute or ignore files or their respective objects from the object database.
buf: Vec<u8>,
/// If case folding should happen when looking up attributes or exclusions.
case: gix_glob::pattern::Case,
/// A lookup table for object ids to read from in some situations when looking up attributes or exclusions.
id_mappings: Vec<PathIdMapping>,
- statistics: cache::Statistics,
+ statistics: stack::Statistics,
}
pub(crate) type PathIdMapping = (BString, gix_hash::ObjectId);
///
-pub mod cache;
-pub mod checkout;
-pub use checkout::function::checkout;
-
-pub mod status;
-pub use status::function::status;
+pub mod stack;
diff --git a/vendor/gix-worktree/src/read.rs b/vendor/gix-worktree/src/read.rs
deleted file mode 100644
index a54fc2c76..000000000
--- a/vendor/gix-worktree/src/read.rs
+++ /dev/null
@@ -1,64 +0,0 @@
-//! This module allows creating git blobs from worktree files.
-//!
-//! For the most part a blob just contains the raw on-disk data. However symlinks need to be considered properly
-//! and attributes/config options need to be considered.
-
-use std::{
- fs::{read_link, File},
- io::{self, Read},
- path::Path,
-};
-
-use gix_object::Blob;
-use gix_path as path;
-
-// TODO: tests
-
-// TODO: what to do about precompose unicode and ignore_case for symlinks
-
-/// Create a blob from a file or symlink.
-pub fn blob(path: &Path, capabilities: &gix_fs::Capabilities) -> io::Result<Blob> {
- let mut data = Vec::new();
- data_to_buf(path, &mut data, capabilities)?;
- Ok(Blob { data })
-}
-
-/// Create a blob from a file or symlink.
-pub fn blob_with_meta(path: &Path, is_symlink: bool, capabilities: &gix_fs::Capabilities) -> io::Result<Blob> {
- let mut data = Vec::new();
- data_to_buf_with_meta(path, &mut data, is_symlink, capabilities)?;
- Ok(Blob { data })
-}
-
-/// Create blob data from a file or symlink.
-pub fn data_to_buf<'a>(path: &Path, buf: &'a mut Vec<u8>, capabilities: &gix_fs::Capabilities) -> io::Result<&'a [u8]> {
- data_to_buf_with_meta(path, buf, path.symlink_metadata()?.is_symlink(), capabilities)
-}
-
-/// Create a blob from a file or symlink.
-pub fn data_to_buf_with_meta<'a>(
- path: &Path,
- buf: &'a mut Vec<u8>,
- is_symlink: bool,
- capabilities: &gix_fs::Capabilities,
-) -> io::Result<&'a [u8]> {
- buf.clear();
- // symlinks are only stored as actual symlinks if the FS supports it otherwise they are just
- // normal files with their content equal to the linked path (so can be read normally)
- //
- if is_symlink && capabilities.symlink {
- // conversion to bstr can never fail because symlinks are only used
- // on unix (by git) so no reason to use the try version here
- let symlink_path = path::into_bstr(read_link(path)?);
- buf.extend_from_slice(&symlink_path);
- // TODO: there is no reason this should be a clone
- // std isn't great about allowing users to avoid allocations but we could
- // simply write our own wrapper around libc::readlink which reuses the
- // buffer. This would require unsafe code tough (obviously)
- } else {
- buf.clear();
- File::open(path)?.read_to_end(buf)?;
- // TODO apply filters
- }
- Ok(buf.as_slice())
-}
diff --git a/vendor/gix-worktree/src/cache/delegate.rs b/vendor/gix-worktree/src/stack/delegate.rs
index 64b5a9bab..28d8ecf34 100644
--- a/vendor/gix-worktree/src/cache/delegate.rs
+++ b/vendor/gix-worktree/src/stack/delegate.rs
@@ -1,4 +1,6 @@
-use crate::{cache::State, PathIdMapping};
+use bstr::{BStr, ByteSlice};
+
+use crate::{stack::State, PathIdMapping};
/// Various aggregate numbers related to the stack delegate itself.
#[derive(Default, Clone, Copy, Debug)]
@@ -16,36 +18,44 @@ pub struct Statistics {
pub pop_directory: usize,
}
-pub(crate) struct StackDelegate<'a, Find> {
+pub(crate) type FindFn<'a> = dyn for<'b> FnMut(
+ &gix_hash::oid,
+ &'b mut Vec<u8>,
+ ) -> Result<gix_object::BlobRef<'b>, Box<dyn std::error::Error + Send + Sync>>
+ + 'a;
+
+pub(crate) struct StackDelegate<'a, 'find> {
pub state: &'a mut State,
pub buf: &'a mut Vec<u8>,
+ #[cfg_attr(not(feature = "attributes"), allow(dead_code))]
pub is_dir: bool,
pub id_mappings: &'a Vec<PathIdMapping>,
- pub find: Find,
+ pub find: &'find mut FindFn<'find>,
pub case: gix_glob::pattern::Case,
pub statistics: &'a mut super::Statistics,
}
-impl<'a, Find, E> gix_fs::stack::Delegate for StackDelegate<'a, Find>
-where
- Find: for<'b> FnMut(&gix_hash::oid, &'b mut Vec<u8>) -> Result<gix_object::BlobRef<'b>, E>,
- E: std::error::Error + Send + Sync + 'static,
-{
+impl<'a, 'find> gix_fs::stack::Delegate for StackDelegate<'a, 'find> {
fn push_directory(&mut self, stack: &gix_fs::Stack) -> std::io::Result<()> {
self.statistics.delegate.push_directory += 1;
let dir_bstr = gix_path::into_bstr(stack.current());
- let mut rela_dir = gix_glob::search::pattern::strip_base_handle_recompute_basename_pos(
- gix_path::into_bstr(stack.root()).as_ref(),
- dir_bstr.as_ref(),
- None,
- self.case,
- )
- .expect("dir in root")
- .0;
- if rela_dir.starts_with(b"/") {
- rela_dir = &rela_dir[1..];
- }
+ let rela_dir_cow = gix_path::to_unix_separators_on_windows(
+ gix_glob::search::pattern::strip_base_handle_recompute_basename_pos(
+ gix_path::into_bstr(stack.root()).as_ref(),
+ dir_bstr.as_ref(),
+ None,
+ self.case,
+ )
+ .expect("dir in root")
+ .0,
+ );
+ let rela_dir: &BStr = if rela_dir_cow.starts_with(b"/") {
+ rela_dir_cow[1..].as_bstr()
+ } else {
+ rela_dir_cow.as_ref()
+ };
match &mut self.state {
+ #[cfg(feature = "attributes")]
State::CreateDirectoryAndAttributesStack { attributes, .. } => {
attributes.push_directory(
stack.root(),
@@ -53,10 +63,11 @@ where
rela_dir,
self.buf,
self.id_mappings,
- &mut self.find,
+ self.find,
&mut self.statistics.attributes,
)?;
}
+ #[cfg(feature = "attributes")]
State::AttributesAndIgnoreStack { ignore, attributes } => {
attributes.push_directory(
stack.root(),
@@ -78,6 +89,16 @@ where
&mut self.statistics.ignore,
)?
}
+ #[cfg(feature = "attributes")]
+ State::AttributesStack(attributes) => attributes.push_directory(
+ stack.root(),
+ stack.current(),
+ rela_dir,
+ self.buf,
+ self.id_mappings,
+ &mut self.find,
+ &mut self.statistics.attributes,
+ )?,
State::IgnoreStack(ignore) => ignore.push_directory(
stack.root(),
stack.current(),
@@ -92,9 +113,11 @@ where
Ok(())
}
+ #[cfg_attr(not(feature = "attributes"), allow(unused_variables))]
fn push(&mut self, is_last_component: bool, stack: &gix_fs::Stack) -> std::io::Result<()> {
self.statistics.delegate.push_element += 1;
match &mut self.state {
+ #[cfg(feature = "attributes")]
State::CreateDirectoryAndAttributesStack {
unlink_on_collision,
attributes: _,
@@ -105,7 +128,9 @@ where
&mut self.statistics.delegate.num_mkdir_calls,
*unlink_on_collision,
)?,
- State::AttributesAndIgnoreStack { .. } | State::IgnoreStack(_) => {}
+ #[cfg(feature = "attributes")]
+ State::AttributesAndIgnoreStack { .. } | State::AttributesStack(_) => {}
+ State::IgnoreStack(_) => {}
}
Ok(())
}
@@ -113,13 +138,19 @@ where
fn pop_directory(&mut self) {
self.statistics.delegate.pop_directory += 1;
match &mut self.state {
+ #[cfg(feature = "attributes")]
State::CreateDirectoryAndAttributesStack { attributes, .. } => {
attributes.pop_directory();
}
+ #[cfg(feature = "attributes")]
State::AttributesAndIgnoreStack { attributes, ignore } => {
attributes.pop_directory();
ignore.pop_directory();
}
+ #[cfg(feature = "attributes")]
+ State::AttributesStack(attributes) => {
+ attributes.pop_directory();
+ }
State::IgnoreStack(ignore) => {
ignore.pop_directory();
}
@@ -127,6 +158,7 @@ where
}
}
+#[cfg(feature = "attributes")]
fn create_leading_directory(
is_last_component: bool,
stack: &gix_fs::Stack,
diff --git a/vendor/gix-worktree/src/cache/mod.rs b/vendor/gix-worktree/src/stack/mod.rs
index b3ccc6bac..c10320199 100644
--- a/vendor/gix-worktree/src/cache/mod.rs
+++ b/vendor/gix-worktree/src/stack/mod.rs
@@ -4,10 +4,10 @@ use std::path::{Path, PathBuf};
use bstr::{BStr, ByteSlice};
use gix_hash::oid;
-use super::Cache;
+use super::Stack;
use crate::PathIdMapping;
-/// Various aggregate numbers collected from when the corresponding [`Cache`] was instantiated.
+/// Various aggregate numbers collected from when the corresponding [`Stack`] was instantiated.
#[derive(Default, Clone, Copy, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Statistics {
@@ -16,6 +16,7 @@ pub struct Statistics {
/// Information about the stack delegate.
pub delegate: delegate::Statistics,
/// Information about attributes
+ #[cfg(feature = "attributes")]
pub attributes: state::attributes::Statistics,
/// Information about the ignore stack
pub ignore: state::ignore::Statistics,
@@ -24,6 +25,7 @@ pub struct Statistics {
#[derive(Clone)]
pub enum State {
/// Useful for checkout where directories need creation, but we need to access attributes as well.
+ #[cfg(feature = "attributes")]
CreateDirectoryAndAttributesStack {
/// If there is a symlink or a file in our path, try to unlink it before creating the directory.
unlink_on_collision: bool,
@@ -31,24 +33,28 @@ pub enum State {
attributes: state::Attributes,
},
/// Used when adding files, requiring access to both attributes and ignore information, for example during add operations.
+ #[cfg(feature = "attributes")]
AttributesAndIgnoreStack {
/// State to handle attribute information
attributes: state::Attributes,
/// State to handle exclusion information
ignore: state::Ignore,
},
+ /// Used when only attributes are required, typically with fully virtual worktrees.
+ #[cfg(feature = "attributes")]
+ AttributesStack(state::Attributes),
/// Used when providing worktree status information.
IgnoreStack(state::Ignore),
}
#[must_use]
pub struct Platform<'a> {
- parent: &'a Cache,
+ parent: &'a Stack,
is_dir: Option<bool>,
}
/// Initialization
-impl Cache {
+impl Stack {
/// Create a new instance with `worktree_root` being the base for all future paths we match.
/// `state` defines the capabilities of the cache.
/// The `case` configures attribute and exclusion case sensitivity at *query time*, which should match the case that
@@ -62,7 +68,7 @@ impl Cache {
id_mappings: Vec<PathIdMapping>,
) -> Self {
let root = worktree_root.into();
- Cache {
+ Stack {
stack: gix_fs::Stack::new(root),
state,
case,
@@ -74,19 +80,19 @@ impl Cache {
}
/// Entry points for attribute query
-impl Cache {
+impl Stack {
/// Append the `relative` path to the root directory of the cache and efficiently create leading directories, while assuring that no
/// symlinks are in that path.
/// Unless `is_dir` is known with `Some(…)`, then `relative` points to a directory itself in which case the entire resulting
/// path is created as directory. If it's not known it is assumed to be a file.
- /// `find` maybe used to lookup objects from an [id mapping][crate::cache::State::id_mappings_from_index()], with mappnigs
+ /// `find` maybe used to lookup objects from an [id mapping][crate::stack::State::id_mappings_from_index()], with mappnigs
///
/// Provide access to cached information for that `relative` path via the returned platform.
pub fn at_path<Find, E>(
&mut self,
relative: impl AsRef<Path>,
is_dir: Option<bool>,
- find: Find,
+ mut find: Find,
) -> std::io::Result<Platform<'_>>
where
Find: for<'a> FnMut(&oid, &'a mut Vec<u8>) -> Result<gix_object::BlobRef<'a>, E>,
@@ -98,17 +104,18 @@ impl Cache {
buf: &mut self.buf,
is_dir: is_dir.unwrap_or(false),
id_mappings: &self.id_mappings,
- find,
+ find: &mut |oid, buf| Ok(find(oid, buf).map_err(Box::new)?),
case: self.case,
statistics: &mut self.statistics,
};
- self.stack.make_relative_path_current(relative, &mut delegate)?;
+ self.stack
+ .make_relative_path_current(relative.as_ref(), &mut delegate)?;
Ok(Platform { parent: self, is_dir })
}
/// Obtain a platform for lookups from a repo-`relative` path, typically obtained from an index entry. `is_dir` should reflect
/// whether it's a directory or not, or left at `None` if unknown.
- /// `find` maybe used to lookup objects from an [id mapping][crate::cache::State::id_mappings_from_index()].
+ /// `find` maybe used to lookup objects from an [id mapping][crate::stack::State::id_mappings_from_index()].
/// All effects are similar to [`at_path()`][Self::at_path()].
///
/// If `relative` ends with `/` and `is_dir` is `None`, it is automatically assumed to be a directory.
@@ -138,7 +145,7 @@ impl Cache {
}
/// Mutation
-impl Cache {
+impl Stack {
/// Reset the statistics after returning them.
pub fn take_statistics(&mut self) -> Statistics {
std::mem::take(&mut self.statistics)
@@ -148,10 +155,16 @@ impl Cache {
pub fn state_mut(&mut self) -> &mut State {
&mut self.state
}
+
+ /// Change the `case` of the next match to the given one.
+ pub fn set_case(&mut self, case: gix_glob::pattern::Case) -> &mut Self {
+ self.case = case;
+ self
+ }
}
/// Access
-impl Cache {
+impl Stack {
/// Return the statistics we gathered thus far.
pub fn statistics(&self) -> &Statistics {
&self.statistics
diff --git a/vendor/gix-worktree/src/cache/platform.rs b/vendor/gix-worktree/src/stack/platform.rs
index 27d0bfbc8..3c6295f89 100644
--- a/vendor/gix-worktree/src/cache/platform.rs
+++ b/vendor/gix-worktree/src/stack/platform.rs
@@ -2,7 +2,7 @@ use std::path::Path;
use bstr::ByteSlice;
-use crate::cache::Platform;
+use crate::stack::Platform;
/// Access
impl<'a> Platform<'a> {
@@ -40,11 +40,12 @@ impl<'a> Platform<'a> {
/// # Panics
///
/// If the cache was configured without attributes.
+ #[cfg(feature = "attributes")]
pub fn matching_attributes(&self, out: &mut gix_attributes::search::Outcome) -> bool {
let attrs = self.parent.state.attributes_or_panic();
let relative_path =
gix_path::to_unix_separators_on_windows(gix_path::into_bstr(self.parent.stack.current_relative()));
- attrs.matching_attributes(relative_path.as_bstr(), self.parent.case, out)
+ attrs.matching_attributes(relative_path.as_bstr(), self.parent.case, self.is_dir, out)
}
}
diff --git a/vendor/gix-worktree/src/cache/state/attributes.rs b/vendor/gix-worktree/src/stack/state/attributes.rs
index c42e36f74..d49de1288 100644
--- a/vendor/gix-worktree/src/cache/state/attributes.rs
+++ b/vendor/gix-worktree/src/stack/state/attributes.rs
@@ -3,9 +3,10 @@ use std::path::{Path, PathBuf};
use bstr::{BStr, ByteSlice};
use gix_glob::pattern::Case;
+use crate::stack::delegate::FindFn;
use crate::{
- cache::state::{AttributeMatchGroup, Attributes},
- Cache, PathIdMapping,
+ stack::state::{AttributeMatchGroup, Attributes},
+ PathIdMapping, Stack,
};
/// Various aggregate numbers related [`Attributes`].
@@ -21,16 +22,17 @@ pub struct Statistics {
}
/// Decide where to read `.gitattributes` files from.
+///
+/// To Retrieve attribute files from id mappings, see
+/// [State::id_mappings_from_index()][crate::stack::State::id_mappings_from_index()].
+///
+/// These mappings are typically produced from an index.
+/// If a tree should be the source, build an attribute list from a tree instead, or convert a tree to an index.
+///
#[derive(Default, Debug, Clone, Copy)]
pub enum Source {
- /// Retrieve attribute files from id mappings, see
- /// [State::id_mappings_from_index()][crate::cache::State::id_mappings_from_index()].
- ///
- /// These mappings are typically produced from an index.
- /// If a tree should be the source, build an attribute list from a tree instead, or convert a tree to an index.
- ///
- /// Use this when no worktree checkout is available, like in bare repositories or when accessing blobs from other parts
- /// of the history which aren't checked out.
+ /// Use this when no worktree checkout is available, like in bare repositories, during clones, or when accessing blobs from
+ /// other parts of the history which aren't checked out.
#[default]
IdMapping,
/// Read from an id mappings and if not present, read from the worktree.
@@ -44,6 +46,17 @@ pub enum Source {
WorktreeThenIdMapping,
}
+impl Source {
+ /// Returns non-worktree variants of `self` if `is_bare` is true.
+ pub fn adjust_for_bare(self, is_bare: bool) -> Self {
+ if is_bare {
+ Source::IdMapping
+ } else {
+ self
+ }
+ }
+}
+
/// Initialization
impl Attributes {
/// Create a new instance from an attribute match group that represents `globals`. It can more easily be created with
@@ -75,20 +88,16 @@ impl Attributes {
}
#[allow(clippy::too_many_arguments)]
- pub(crate) fn push_directory<Find, E>(
+ pub(crate) fn push_directory(
&mut self,
root: &Path,
dir: &Path,
rela_dir: &BStr,
buf: &mut Vec<u8>,
id_mappings: &[PathIdMapping],
- mut find: Find,
+ find: &mut FindFn<'_>,
stats: &mut Statistics,
- ) -> std::io::Result<()>
- where
- Find: for<'b> FnMut(&gix_hash::oid, &'b mut Vec<u8>) -> Result<gix_object::BlobRef<'b>, E>,
- E: std::error::Error + Send + Sync + 'static,
- {
+ ) -> std::io::Result<()> {
let attr_path_relative =
gix_path::to_unix_separators_on_windows(gix_path::join_bstr_unix_pathsep(rela_dir, ".gitattributes"));
let attr_file_in_index = id_mappings.binary_search_by(|t| t.0.as_bstr().cmp(attr_path_relative.as_ref()));
@@ -157,7 +166,7 @@ impl Attributes {
// Need one stack level per component so push and pop matches, but only if this isn't the root level which is never popped.
if !added && self.info_attributes.is_none() {
self.stack
- .add_patterns_buffer(&[], Path::new("<empty dummy>"), None, &mut self.collection, true)
+ .add_patterns_buffer(&[], "<empty dummy>".into(), None, &mut self.collection, true)
}
// When reading the root, always the first call, we can try to also read the `.git/info/attributes` file which is
@@ -182,6 +191,7 @@ impl Attributes {
&self,
relative_path: &BStr,
case: Case,
+ is_dir: Option<bool>,
out: &mut gix_attributes::search::Outcome,
) -> bool {
// assure `out` is ready to deal with possibly changed collections (append-only)
@@ -190,7 +200,7 @@ impl Attributes {
let groups = [&self.globals, &self.stack];
let mut has_match = false;
groups.iter().rev().any(|group| {
- has_match |= group.pattern_matching_relative_path(relative_path, case, out);
+ has_match |= group.pattern_matching_relative_path(relative_path, case, is_dir, out);
out.is_done()
});
has_match
@@ -198,8 +208,12 @@ impl Attributes {
}
/// Attribute matching specific methods
-impl Cache {
+impl Stack {
/// Creates a new container to store match outcomes for all attribute matches.
+ ///
+ /// ### Panics
+ ///
+ /// If attributes aren't configured.
pub fn attribute_matches(&self) -> gix_attributes::search::Outcome {
let mut out = gix_attributes::search::Outcome::default();
out.initialize(&self.state.attributes_or_panic().collection);
@@ -207,6 +221,10 @@ impl Cache {
}
/// Creates a new container to store match outcomes for the given attributes.
+ ///
+ /// ### Panics
+ ///
+ /// If attributes aren't configured.
pub fn selected_attribute_matches<'a>(
&self,
given: impl IntoIterator<Item = impl Into<&'a str>>,
@@ -214,8 +232,18 @@ impl Cache {
let mut out = gix_attributes::search::Outcome::default();
out.initialize_with_selection(
&self.state.attributes_or_panic().collection,
- given.into_iter().map(|n| n.into()),
+ given.into_iter().map(Into::into),
);
out
}
+
+ /// Return the metadata collection that enables initializing attribute match outcomes as done in
+ /// [`attribute_matches()`][Stack::attribute_matches()] or [`selected_attribute_matches()`][Stack::selected_attribute_matches()]
+ ///
+ /// ### Panics
+ ///
+ /// If attributes aren't configured.
+ pub fn attributes_collection(&self) -> &gix_attributes::search::MetadataCollection {
+ &self.state.attributes_or_panic().collection
+ }
}
diff --git a/vendor/gix-worktree/src/cache/state/ignore.rs b/vendor/gix-worktree/src/stack/state/ignore.rs
index dde98da55..e2a2d5a3d 100644
--- a/vendor/gix-worktree/src/cache/state/ignore.rs
+++ b/vendor/gix-worktree/src/stack/state/ignore.rs
@@ -3,8 +3,9 @@ use std::path::Path;
use bstr::{BStr, ByteSlice};
use gix_glob::pattern::Case;
+use crate::stack::delegate::FindFn;
use crate::{
- cache::state::{Ignore, IgnoreMatchGroup},
+ stack::state::{Ignore, IgnoreMatchGroup},
PathIdMapping,
};
@@ -12,7 +13,7 @@ use crate::{
#[derive(Default, Debug, Clone, Copy)]
pub enum Source {
/// Retrieve ignore files from id mappings, see
- /// [State::id_mappings_from_index()][crate::cache::State::id_mappings_from_index()].
+ /// [State::id_mappings_from_index()][crate::stack::State::id_mappings_from_index()].
///
/// These mappings are typically produced from an index.
/// If a tree should be the source, build an attribute list from a tree instead, or convert a tree to an index.
@@ -25,6 +26,17 @@ pub enum Source {
WorktreeThenIdMappingIfNotSkipped,
}
+impl Source {
+ /// Returns non-worktree variants of `self` if `is_bare` is true.
+ pub fn adjust_for_bare(self, is_bare: bool) -> Self {
+ if is_bare {
+ Source::IdMapping
+ } else {
+ self
+ }
+ }
+}
+
/// Various aggregate numbers related [`Ignore`].
#[derive(Default, Clone, Copy, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
@@ -145,26 +157,21 @@ impl Ignore {
}
#[allow(clippy::too_many_arguments)]
- pub(crate) fn push_directory<Find, E>(
+ pub(crate) fn push_directory(
&mut self,
root: &Path,
dir: &Path,
rela_dir: &BStr,
buf: &mut Vec<u8>,
id_mappings: &[PathIdMapping],
- mut find: Find,
+ find: &mut FindFn<'_>,
case: Case,
stats: &mut Statistics,
- ) -> std::io::Result<()>
- where
- Find: for<'b> FnMut(&gix_hash::oid, &'b mut Vec<u8>) -> Result<gix_object::BlobRef<'b>, E>,
- E: std::error::Error + Send + Sync + 'static,
- {
+ ) -> std::io::Result<()> {
self.matched_directory_patterns_stack
.push(self.matching_exclude_pattern_no_dir(rela_dir, Some(true), case));
- let ignore_path_relative =
- gix_path::to_unix_separators_on_windows(gix_path::join_bstr_unix_pathsep(rela_dir, ".gitignore"));
+ let ignore_path_relative = gix_path::join_bstr_unix_pathsep(rela_dir, ".gitignore");
let ignore_file_in_index = id_mappings.binary_search_by(|t| t.0.as_bstr().cmp(ignore_path_relative.as_ref()));
match self.source {
Source::IdMapping => {
diff --git a/vendor/gix-worktree/src/cache/state/mod.rs b/vendor/gix-worktree/src/stack/state/mod.rs
index bdf504568..0b371425a 100644
--- a/vendor/gix-worktree/src/cache/state/mod.rs
+++ b/vendor/gix-worktree/src/stack/state/mod.rs
@@ -1,15 +1,15 @@
-use std::path::PathBuf;
-
use bstr::{BString, ByteSlice};
use gix_glob::pattern::Case;
-use crate::{cache::State, PathIdMapping};
+use crate::{stack::State, PathIdMapping};
+#[cfg(feature = "attributes")]
type AttributeMatchGroup = gix_attributes::Search;
type IgnoreMatchGroup = gix_ignore::Search;
/// State related to attributes associated with files in the repository.
#[derive(Default, Clone)]
+#[cfg(feature = "attributes")]
pub struct Attributes {
/// Attribute patterns which aren't tied to the repository root, hence are global, they contribute first.
globals: AttributeMatchGroup,
@@ -20,7 +20,7 @@ pub struct Attributes {
stack: AttributeMatchGroup,
/// The first time we push the root, we have to load additional information from this file if it exists along with the root attributes
/// file if possible, and keep them there throughout.
- info_attributes: Option<PathBuf>,
+ info_attributes: Option<std::path::PathBuf>,
/// A lookup table to accelerate searches.
collection: gix_attributes::search::MetadataCollection,
/// Where to read `.gitattributes` data from.
@@ -50,6 +50,7 @@ pub struct Ignore {
}
///
+#[cfg(feature = "attributes")]
pub mod attributes;
///
pub mod ignore;
@@ -57,6 +58,7 @@ pub mod ignore;
/// Initialization
impl State {
/// Configure a state to be suitable for checking out files, which only needs access to attribute files read from the index.
+ #[cfg(feature = "attributes")]
pub fn for_checkout(unlink_on_collision: bool, attributes: Attributes) -> Self {
State::CreateDirectoryAndAttributesStack {
unlink_on_collision,
@@ -65,6 +67,7 @@ impl State {
}
/// Configure a state for adding files, with support for ignore files and attribute files.
+ #[cfg(feature = "attributes")]
pub fn for_add(attributes: Attributes, ignore: Ignore) -> Self {
State::AttributesAndIgnoreStack { attributes, ignore }
}
@@ -93,25 +96,33 @@ impl State {
&self,
index: &gix_index::State,
paths: &gix_index::PathStorageRef,
- ignore_source: ignore::Source,
case: Case,
) -> Vec<PathIdMapping> {
let a1_backing;
+ #[cfg(feature = "attributes")]
let a2_backing;
let names = match self {
- State::IgnoreStack(v) => {
- a1_backing = [(v.exclude_file_name_for_directories.as_bytes().as_bstr(), true)];
+ State::IgnoreStack(ignore) => {
+ a1_backing = [(
+ ignore.exclude_file_name_for_directories.as_bytes().as_bstr(),
+ Some(ignore.source),
+ )];
a1_backing.as_ref()
}
+ #[cfg(feature = "attributes")]
State::AttributesAndIgnoreStack { ignore, .. } => {
a2_backing = [
- (ignore.exclude_file_name_for_directories.as_bytes().as_bstr(), true),
- (".gitattributes".into(), false),
+ (
+ ignore.exclude_file_name_for_directories.as_bytes().as_bstr(),
+ Some(ignore.source),
+ ),
+ (".gitattributes".into(), None),
];
a2_backing.as_ref()
}
- State::CreateDirectoryAndAttributesStack { .. } => {
- a1_backing = [(".gitattributes".into(), true)];
+ #[cfg(feature = "attributes")]
+ State::CreateDirectoryAndAttributesStack { .. } | State::AttributesStack(_) => {
+ a1_backing = [(".gitattributes".into(), None)];
a1_backing.as_ref()
}
};
@@ -126,15 +137,15 @@ impl State {
// there won't be a stage 0.
if entry.mode == gix_index::entry::Mode::FILE && (entry.stage() == 0 || entry.stage() == 2) {
let basename = path.rfind_byte(b'/').map_or(path, |pos| path[pos + 1..].as_bstr());
- let is_ignore = names.iter().find_map(|t| {
+ let ignore_source = names.iter().find_map(|t| {
match case {
Case::Sensitive => basename == t.0,
Case::Fold => basename.eq_ignore_ascii_case(t.0),
}
.then_some(t.1)
})?;
- if is_ignore {
- match ignore_source {
+ if let Some(source) = ignore_source {
+ match source {
ignore::Source::IdMapping => {}
ignore::Source::WorktreeThenIdMappingIfNotSkipped => {
// See https://github.com/git/git/blob/master/dir.c#L912:L912
@@ -155,16 +166,20 @@ impl State {
pub(crate) fn ignore_or_panic(&self) -> &Ignore {
match self {
State::IgnoreStack(v) => v,
+ #[cfg(feature = "attributes")]
State::AttributesAndIgnoreStack { ignore, .. } => ignore,
- State::CreateDirectoryAndAttributesStack { .. } => {
+ #[cfg(feature = "attributes")]
+ State::AttributesStack(_) | State::CreateDirectoryAndAttributesStack { .. } => {
unreachable!("BUG: must not try to check excludes without it being setup")
}
}
}
+ #[cfg(feature = "attributes")]
pub(crate) fn attributes_or_panic(&self) -> &Attributes {
match self {
- State::AttributesAndIgnoreStack { attributes, .. }
+ State::AttributesStack(attributes)
+ | State::AttributesAndIgnoreStack { attributes, .. }
| State::CreateDirectoryAndAttributesStack { attributes, .. } => attributes,
State::IgnoreStack(_) => {
unreachable!("BUG: must not try to check excludes without it being setup")
diff --git a/vendor/gix-worktree/src/status/content.rs b/vendor/gix-worktree/src/status/content.rs
deleted file mode 100644
index aa775821a..000000000
--- a/vendor/gix-worktree/src/status/content.rs
+++ /dev/null
@@ -1,80 +0,0 @@
-use gix_hash::ObjectId;
-use gix_index as index;
-use index::Entry;
-
-/// Compares the content of two blobs in some way.
-pub trait CompareBlobs {
- /// Output data produced by [`compare_blobs()`][CompareBlobs::compare_blobs()].
- type Output;
-
- /// Providing the underlying index `entry`, allow comparing a file in the worktree of size `worktree_blob_size`
- /// and allow reading its bytes using `worktree_blob`.
- /// If this function returns `None` the `entry` and the `worktree_blob` are assumed to be identical.
- /// Use `entry_blob` to obtain the data for the blob referred to by `entry`, allowing comparisons of the data itself.
- fn compare_blobs<'a, E>(
- &mut self,
- entry: &'a gix_index::Entry,
- worktree_blob_size: usize,
- worktree_blob: impl ReadDataOnce<'a, E>,
- entry_blob: impl ReadDataOnce<'a, E>,
- ) -> Result<Option<Self::Output>, E>;
-}
-
-/// Lazy borrowed access to blob data.
-pub trait ReadDataOnce<'a, E> {
- /// Returns the contents of this blob.
- ///
- /// This potentially performs IO and other expensive operations
- /// and should only be called when necessary.
- fn read_data(self) -> Result<&'a [u8], E>;
-}
-
-/// Compares to blobs by comparing their size and oid, and only looks at the file if
-/// the size matches, therefore it's very fast.
-#[derive(Clone)]
-pub struct FastEq;
-
-impl CompareBlobs for FastEq {
- type Output = ();
-
- fn compare_blobs<'a, E>(
- &mut self,
- entry: &'a Entry,
- worktree_blob_size: usize,
- worktree_blob: impl ReadDataOnce<'a, E>,
- _entry_blob: impl ReadDataOnce<'a, E>,
- ) -> Result<Option<Self::Output>, E> {
- // make sure to account for racily smudged entries here so that they don't always keep
- // showing up as modified even after their contents have changed again, to a potentially
- // unmodified state. That means that we want to ignore stat.size == 0 for non_empty_blobs.
- if entry.stat.size as usize != worktree_blob_size && (entry.id.is_empty_blob() || entry.stat.size != 0) {
- return Ok(Some(()));
- }
- let blob = worktree_blob.read_data()?;
- let file_hash = gix_object::compute_hash(entry.id.kind(), gix_object::Kind::Blob, blob);
- Ok((entry.id != file_hash).then_some(()))
- }
-}
-
-/// Compares files to blobs by *always* comparing their hashes.
-///
-/// Same as [`FastEq`] but does not contain a fast path for files with mismatched files and
-/// therefore always returns an OID that can be reused later.
-#[derive(Clone)]
-pub struct HashEq;
-
-impl CompareBlobs for HashEq {
- type Output = ObjectId;
-
- fn compare_blobs<'a, E>(
- &mut self,
- entry: &'a Entry,
- _worktree_blob_size: usize,
- worktree_blob: impl ReadDataOnce<'a, E>,
- _entry_blob: impl ReadDataOnce<'a, E>,
- ) -> Result<Option<Self::Output>, E> {
- let blob = worktree_blob.read_data()?;
- let file_hash = gix_object::compute_hash(entry.id.kind(), gix_object::Kind::Blob, blob);
- Ok((entry.id != file_hash).then_some(file_hash))
- }
-}
diff --git a/vendor/gix-worktree/src/status/function.rs b/vendor/gix-worktree/src/status/function.rs
deleted file mode 100644
index 5e01628b4..000000000
--- a/vendor/gix-worktree/src/status/function.rs
+++ /dev/null
@@ -1,331 +0,0 @@
-use std::{io, marker::PhantomData, path::Path};
-
-use bstr::BStr;
-use filetime::FileTime;
-use gix_features::parallel::{in_parallel_if, Reduce};
-
-use crate::{
- read,
- status::{
- content,
- content::CompareBlobs,
- types::{Error, Options},
- Change, VisitEntry,
- },
-};
-
-/// Calculates the changes that need to be applied to an `index` to match the state of the `worktree` and makes them
-/// observable in `collector`, along with information produced by `compare` which gets to see blobs that may have changes.
-/// `options` are used to configure the operation.
-///
-/// Note that `index` is updated with the latest seen stat information from the worktree, and its timestamp is adjusted to
-/// the current time for which it will be considered fresh.
-///
-/// Note that this isn't technically quite what this function does as this also provides some additional information,
-/// like whether a file has conflicts, and files that were added with `git add` are shown as a special
-/// changes despite not technically requiring a change to the index since `git add` already added the file to the index.
-pub fn status<'index, T, Find, E>(
- index: &'index mut gix_index::State,
- worktree: &Path,
- collector: &mut impl VisitEntry<'index, ContentChange = T>,
- compare: impl CompareBlobs<Output = T> + Send + Clone,
- find: Find,
- options: Options,
-) -> Result<(), Error>
-where
- T: Send,
- E: std::error::Error + Send + Sync + 'static,
- Find: for<'a> FnMut(&gix_hash::oid, &'a mut Vec<u8>) -> Result<gix_object::BlobRef<'a>, E> + Send + Clone,
-{
- // the order is absolutely critical here we use the old timestamp to detect racy index entries
- // (modified at or after the last index update) during the index update we then set those
- // entries size to 0 (see below) to ensure they keep showing up as racy and reset the timestamp.
- let timestamp = index.timestamp();
- index.set_timestamp(FileTime::now());
- let (chunk_size, thread_limit, _) = gix_features::parallel::optimize_chunk_size_and_thread_limit(
- 100,
- index.entries().len().into(),
- options.thread_limit,
- None,
- );
- let (entries, path_backing) = index.entries_mut_and_pathbacking();
- in_parallel_if(
- || true, // TODO: heuristic: when is parallelization not worth it?
- entries.chunks_mut(chunk_size),
- thread_limit,
- {
- let options = &options;
- move |_| {
- (
- State {
- buf: Vec::new(),
- odb_buf: Vec::new(),
- timestamp,
- path_backing,
- worktree,
- options,
- },
- compare.clone(),
- find.clone(),
- )
- }
- },
- |entries, (state, diff, find)| {
- entries
- .iter_mut()
- .filter_map(|entry| state.process(entry, diff, find))
- .collect()
- },
- ReduceChange {
- collector,
- phantom: PhantomData,
- },
- )
-}
-
-struct State<'a, 'b> {
- buf: Vec<u8>,
- odb_buf: Vec<u8>,
- timestamp: FileTime,
- // path_cache: fs::Cache TODO path cache
- path_backing: &'b [u8],
- worktree: &'a Path,
- options: &'a Options,
-}
-
-type StatusResult<'index, T> = Result<(&'index gix_index::Entry, &'index BStr, Option<Change<T>>, bool), Error>;
-
-impl<'index> State<'_, 'index> {
- fn process<T, Find, E>(
- &mut self,
- entry: &'index mut gix_index::Entry,
- diff: &mut impl CompareBlobs<Output = T>,
- find: &mut Find,
- ) -> Option<StatusResult<'index, T>>
- where
- E: std::error::Error + Send + Sync + 'static,
- Find: for<'a> FnMut(&gix_hash::oid, &'a mut Vec<u8>) -> Result<gix_object::BlobRef<'a>, E> + Send + Clone,
- {
- let conflict = match entry.stage() {
- 0 => false,
- 1 => true,
- _ => return None,
- };
- if entry.flags.intersects(
- gix_index::entry::Flags::UPTODATE
- | gix_index::entry::Flags::SKIP_WORKTREE
- | gix_index::entry::Flags::ASSUME_VALID
- | gix_index::entry::Flags::FSMONITOR_VALID,
- ) {
- return None;
- }
- let path = entry.path_in(self.path_backing);
- let status = self.compute_status(&mut *entry, path, diff, find);
- Some(status.map(move |status| (&*entry, path, status, conflict)))
- }
-
- /// # On how racy-git is handled here
- ///
- /// Basically the racy detection is a safety mechanism that ensures we can always just compare the stat
- /// information between index and worktree and if they match we don't need to look at the content.
- /// This usually just works but if a file updates quickly we could run into the following situation:
- ///
- /// * save file version `A` from disk into worktree (git add)
- /// * file is changed so fast that the mtime doesn't change - *we only looks at seconds by default*
- /// * file contents change but file-size stays the same, so `"foo" -> "bar"` has the same size but different content
- ///
- /// Now both `mtime` and `size`, and all other stat information, is the same but the file has actually changed.
- /// This case is called *racily clean*. *The file should show up as changed but due to a data race it doesn't.*
- /// This is the racy git problem.
- ///
- /// To solve this we do the following trick: Whenever we modify the index, which includes `git status`, we save the
- /// current timestamp before the modification starts. This timestamp fundamentally represents a checkpoint of sorts.
- /// We "promise" ourselves that after the modification finishes all entries modified before this timestamp have the
- /// racy git problem resolved.
- ///
- /// So now when we modify the index we must resolve the racy git problem somehow. To do that we only need to look at
- /// unchanged entries. Changed entries are not interesting since they are already showing up as changed anyway so there
- /// isn't really a race-condition to worry about. This also explains why removing the `return` here doesn't have an apparent effect.
- /// This entire branch here is just the optimization of "don't even look at index entries where the stat hasn't changed".
- /// If we don't have this optimization the result shouldn't change, our status implementation will just be super slow :D
-
- /// We calculate whether this change is `racy_clean`, so if the last `timestamp` is before or the same as the `mtime` of the entry
- /// which is what `new_stat.is_racy(..)` does in the branch, and only if we are sure that there is no race condition
- /// do we `return` early. Since we don't `return` early we just do a full content comparison below,
- /// which always yields the correct result, there is no race condition there.
- ///
- /// If a file showed up as racily clean and didn't change then we don't need to do anything. After this status check is
- /// complete and the file won't show up as racily clean anymore, since it's mtime is now before the new timestamp.
- /// However if the file did actually change then we really ran into one of those rare race conditions in that case we,
- /// and git does the same, set the size of the file in the index to 0. This will always make the file show up as changed.
- /// This adds the need to treat all files of size 0 in the index as changed. This is not quite right of course because 0 sized files
- /// could be entirely valid and unchanged. Therefore this only applies if the oid doesn't match the oid of an empty file,
- /// which is a constant.
- ///
- /// Adapted from [here](https://github.com/Byron/gitoxide/pull/805#discussion_r1164676777).
- fn compute_status<T, Find, E>(
- &mut self,
- entry: &mut gix_index::Entry,
- git_path: &BStr,
- diff: &mut impl CompareBlobs<Output = T>,
- find: &mut Find,
- ) -> Result<Option<Change<T>>, Error>
- where
- E: std::error::Error + Send + Sync + 'static,
- Find: for<'a> FnMut(&gix_hash::oid, &'a mut Vec<u8>) -> Result<gix_object::BlobRef<'a>, E> + Send + Clone,
- {
- // TODO fs cache
- let worktree_path = gix_path::try_from_bstr(git_path).map_err(|_| Error::IllformedUtf8)?;
- let worktree_path = self.worktree.join(worktree_path);
- let metadata = match worktree_path.symlink_metadata() {
- // TODO: check if any parent directory is a symlink
- // we need to use fs::Cache for that
- Ok(metadata) if metadata.is_dir() => {
- // index entries are normally only for files/symlinks
- // if a file turned into a directory it was removed
- // the only exception here are submodules which are
- // part of the index despite being directories
- //
- // TODO: submodules:
- // if entry.mode.contains(Mode::COMMIT) &&
- // resolve_gitlink_ref(ce->name, "HEAD", &sub))
- return Ok(Some(Change::Removed));
- }
- Ok(metadata) => metadata,
- Err(err) if err.kind() == io::ErrorKind::NotFound => return Ok(Some(Change::Removed)),
- Err(err) => {
- return Err(err.into());
- }
- };
- if entry.flags.contains(gix_index::entry::Flags::INTENT_TO_ADD) {
- return Ok(Some(Change::IntentToAdd));
- }
- let new_stat = gix_index::entry::Stat::from_fs(&metadata)?;
- let executable_bit_changed =
- match entry
- .mode
- .change_to_match_fs(&metadata, self.options.fs.symlink, self.options.fs.executable_bit)
- {
- Some(gix_index::entry::mode::Change::Type { .. }) => return Ok(Some(Change::Type)),
- Some(gix_index::entry::mode::Change::ExecutableBit) => true,
- None => false,
- };
-
- // Here we implement racy-git. See racy-git.txt in the git documentation for a detailed documentation.
- //
- // A file is racy if:
- // 1. its `mtime` is at or after the last index timestamp and its entry stat information
- // matches the on-disk file but the file contents are actually modified
- // 2. it's size is 0 (set after detecting a file was racy previously)
- //
- // The first case is detected below by checking the timestamp if the file is marked unmodified.
- // The second case is usually detected either because the on-disk file is not empty, hence
- // the basic stat match fails, or by checking whether the size doesn't fit the oid.
- let mut racy_clean = false;
- if !executable_bit_changed
- && new_stat.matches(&entry.stat, self.options.stat)
- // TODO: find a test for the following line or remove it. Is this more often hit with smudge/clean filters?
- && (!entry.id.is_empty_blob() || entry.stat.size == 0)
- {
- racy_clean = new_stat.is_racy(self.timestamp, self.options.stat);
- if !racy_clean {
- return Ok(None);
- }
- }
-
- let read_file = WorktreeBlob {
- buf: &mut self.buf,
- path: &worktree_path,
- entry,
- options: self.options,
- };
- let read_blob = OdbBlob {
- buf: &mut self.odb_buf,
- id: &entry.id,
- find,
- };
- let content_change = diff.compare_blobs::<Error>(entry, metadata.len() as usize, read_file, read_blob)?;
- // This file is racy clean! Set the size to 0 so we keep detecting this as the file is updated.
- if content_change.is_some() && racy_clean {
- entry.stat.size = 0;
- }
- if content_change.is_some() || executable_bit_changed {
- Ok(Some(Change::Modification {
- executable_bit_changed,
- content_change,
- }))
- } else {
- // don't diff against this file next time since we know the file is unchanged.
- entry.stat = new_stat;
- Ok(None)
- }
- }
-}
-
-struct ReduceChange<'a, 'index, T: VisitEntry<'index>> {
- collector: &'a mut T,
- phantom: PhantomData<fn(&'index ())>,
-}
-
-impl<'index, T, C: VisitEntry<'index, ContentChange = T>> Reduce for ReduceChange<'_, 'index, C> {
- type Input = Vec<StatusResult<'index, T>>;
-
- type FeedProduce = ();
-
- type Output = ();
-
- type Error = Error;
-
- fn feed(&mut self, items: Self::Input) -> Result<Self::FeedProduce, Self::Error> {
- for item in items {
- let (entry, path, change, conflict) = item?;
- self.collector.visit_entry(entry, path, change, conflict);
- }
- Ok(())
- }
-
- fn finalize(self) -> Result<Self::Output, Self::Error> {
- Ok(())
- }
-}
-
-struct WorktreeBlob<'a> {
- buf: &'a mut Vec<u8>,
- path: &'a Path,
- entry: &'a gix_index::Entry,
- options: &'a Options,
-}
-
-struct OdbBlob<'a, Find, E>
-where
- E: std::error::Error + Send + Sync + 'static,
- Find: FnMut(&gix_hash::oid, &'a mut Vec<u8>) -> Result<gix_object::BlobRef<'a>, E>,
-{
- buf: &'a mut Vec<u8>,
- id: &'a gix_hash::oid,
- find: Find,
-}
-
-impl<'a> content::ReadDataOnce<'a, Error> for WorktreeBlob<'a> {
- fn read_data(self) -> Result<&'a [u8], Error> {
- let res = read::data_to_buf_with_meta(
- self.path,
- self.buf,
- self.entry.mode == gix_index::entry::Mode::SYMLINK,
- &self.options.fs,
- )?;
- Ok(res)
- }
-}
-
-impl<'a, Find, E> content::ReadDataOnce<'a, Error> for OdbBlob<'a, Find, E>
-where
- E: std::error::Error + Send + Sync + 'static,
- Find: FnMut(&gix_hash::oid, &'a mut Vec<u8>) -> Result<gix_object::BlobRef<'a>, E>,
-{
- fn read_data(mut self) -> Result<&'a [u8], Error> {
- (self.find)(self.id, self.buf)
- .map(|b| b.data)
- .map_err(move |err| Error::Find(Box::new(err)))
- }
-}
diff --git a/vendor/gix-worktree/src/status/mod.rs b/vendor/gix-worktree/src/status/mod.rs
deleted file mode 100644
index 8294a54e8..000000000
--- a/vendor/gix-worktree/src/status/mod.rs
+++ /dev/null
@@ -1,11 +0,0 @@
-//! Changes between an index and a worktree.
-///
-mod types;
-pub use types::{Change, Error, Options, VisitEntry};
-
-mod recorder;
-pub use recorder::Recorder;
-
-///
-pub mod content;
-pub(crate) mod function;
diff --git a/vendor/gix-worktree/src/status/recorder.rs b/vendor/gix-worktree/src/status/recorder.rs
deleted file mode 100644
index ea10303ae..000000000
--- a/vendor/gix-worktree/src/status/recorder.rs
+++ /dev/null
@@ -1,27 +0,0 @@
-use bstr::BStr;
-use gix_index as index;
-
-use crate::status::{Change, VisitEntry};
-
-/// Convenience implementation of [`VisitEntry`] that collects all non-trivial changes into a `Vec`.
-#[derive(Debug, Default)]
-pub struct Recorder<'index, T = ()> {
- /// collected changes, index entries without conflicts or changes are excluded.
- pub records: Vec<(&'index BStr, Option<Change<T>>, bool)>,
-}
-
-impl<'index, T: Send> VisitEntry<'index> for Recorder<'index, T> {
- type ContentChange = T;
-
- fn visit_entry(
- &mut self,
- _entry: &'index index::Entry,
- rela_path: &'index BStr,
- status: Option<Change<Self::ContentChange>>,
- conflict: bool,
- ) {
- if conflict || status.is_some() {
- self.records.push((rela_path, status, conflict))
- }
- }
-}
diff --git a/vendor/gix-worktree/src/status/types.rs b/vendor/gix-worktree/src/status/types.rs
deleted file mode 100644
index 3d488d24e..000000000
--- a/vendor/gix-worktree/src/status/types.rs
+++ /dev/null
@@ -1,69 +0,0 @@
-use bstr::BStr;
-
-/// The error returned by [`status()`][crate::status()].
-#[derive(Debug, thiserror::Error)]
-#[allow(missing_docs)]
-pub enum Error {
- #[error("Could not convert path to UTF8")]
- IllformedUtf8,
- #[error("The clock was off when reading file related metadata after updating a file on disk")]
- Time(#[from] std::time::SystemTimeError),
- #[error("IO error while writing blob or reading file metadata or changing filetype")]
- Io(#[from] std::io::Error),
- #[error("Failed to obtain blob from object database")]
- Find(#[source] Box<dyn std::error::Error + Send + Sync + 'static>),
-}
-
-#[derive(Clone, Default)]
-/// Options that control how the index status with a worktree is computed.
-pub struct Options {
- /// Capabilities of the file system which affect the status computation.
- pub fs: gix_fs::Capabilities,
- /// If set, don't use more than this amount of threads.
- /// Otherwise, usually use as many threads as there are logical cores.
- /// A value of 0 is interpreted as no-limit
- pub thread_limit: Option<usize>,
- /// Options that control how stat comparisons are made when checking if a file is fresh.
- pub stat: gix_index::entry::stat::Options,
-}
-
-/// How an index entry needs to be changed to obtain the destination worktree state, i.e. `entry.apply(this_change) == worktree-entry`.
-#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
-pub enum Change<T = ()> {
- /// This corresponding file does not exist in the worktree anymore.
- Removed,
- /// The type of file changed compared to the worktree, i.e. a symlink s now a file.
- Type,
- /// This worktree file was modified in some form, like a permission change or content change or both,
- /// as compared to this entry.
- Modification {
- /// Indicates that one of the stat changes was an executable bit change
- /// which is a significant change itself.
- executable_bit_changed: bool,
- /// The output of the [`CompareBlobs`][crate::status::content::CompareBlobs] run on this entry.
- /// If there is no content change and only the executable bit
- /// changed than this is `None`.
- content_change: Option<T>,
- },
- /// An index entry that correspond to an untracked worktree file marked with `git add --intent-to-add`.
- ///
- /// This means it's not available in the object database yet or the index was created from,
- /// even though now an entry exists that represents the worktree file.
- IntentToAdd,
-}
-
-/// Observe changes by comparing an index entry to the worktree or another index.
-pub trait VisitEntry<'index> {
- /// Data generated by comparing an entry with a file.
- type ContentChange;
- /// Observe the `change` of `entry` at the repository-relative `rela_path`, indicating whether
- /// or not it has a `conflict`.
- /// If `change` is `None`, there is no change.
- fn visit_entry(
- &mut self,
- entry: &'index gix_index::Entry,
- rela_path: &'index BStr,
- change: Option<Change<Self::ContentChange>>,
- conflict: bool,
- );
-}
diff --git a/vendor/gix-worktree/src/untracked.rs b/vendor/gix-worktree/src/untracked.rs
deleted file mode 100644
index 6e77d7fa3..000000000
--- a/vendor/gix-worktree/src/untracked.rs
+++ /dev/null
@@ -1 +0,0 @@
-// TODO: untracked file detection, needs fs::Cache