summaryrefslogtreecommitdiffstats
path: root/vendor/gix-index/src/access/mod.rs
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/gix-index/src/access/mod.rs')
-rw-r--r--vendor/gix-index/src/access/mod.rs205
1 files changed, 202 insertions, 3 deletions
diff --git a/vendor/gix-index/src/access/mod.rs b/vendor/gix-index/src/access/mod.rs
index d07a55bf0..08cb23020 100644
--- a/vendor/gix-index/src/access/mod.rs
+++ b/vendor/gix-index/src/access/mod.rs
@@ -1,4 +1,5 @@
use std::cmp::Ordering;
+use std::ops::Range;
use bstr::{BStr, ByteSlice, ByteVec};
use filetime::FileTime;
@@ -70,9 +71,67 @@ impl State {
///
/// Use the index for accessing multiple stages if they exists, but at least the single matching entry.
pub fn entry_index_by_path_and_stage(&self, path: &BStr, stage: entry::Stage) -> Option<usize> {
- self.entries
- .binary_search_by(|e| e.path(self).cmp(path).then_with(|| e.stage().cmp(&stage)))
- .ok()
+ let mut stage_cmp = Ordering::Equal;
+ let idx = self
+ .entries
+ .binary_search_by(|e| {
+ let res = e.path(self).cmp(path);
+ if res.is_eq() {
+ stage_cmp = e.stage().cmp(&stage);
+ }
+ res
+ })
+ .ok()?;
+ self.entry_index_by_idx_and_stage(path, idx, stage, stage_cmp)
+ }
+
+ /// Walk as far in `direction` as possible, with [`Ordering::Greater`] towards higher stages, and [`Ordering::Less`]
+ /// towards lower stages, and return the lowest or highest seen stage.
+ /// Return `None` if there is no greater or smaller stage.
+ fn walk_entry_stages(&self, path: &BStr, base: usize, direction: Ordering) -> Option<usize> {
+ match direction {
+ Ordering::Greater => self
+ .entries
+ .get(base + 1..)?
+ .iter()
+ .enumerate()
+ .take_while(|(_, e)| e.path(self) == path)
+ .last()
+ .map(|(idx, _)| base + 1 + idx),
+ Ordering::Equal => Some(base),
+ Ordering::Less => self.entries[..base]
+ .iter()
+ .enumerate()
+ .rev()
+ .take_while(|(_, e)| e.path(self) == path)
+ .last()
+ .map(|(idx, _)| idx),
+ }
+ }
+
+ fn entry_index_by_idx_and_stage(
+ &self,
+ path: &BStr,
+ idx: usize,
+ wanted_stage: entry::Stage,
+ stage_cmp: Ordering,
+ ) -> Option<usize> {
+ match stage_cmp {
+ Ordering::Greater => self.entries[..idx]
+ .iter()
+ .enumerate()
+ .rev()
+ .take_while(|(_, e)| e.path(self) == path)
+ .find_map(|(idx, e)| (e.stage() == wanted_stage).then_some(idx)),
+ Ordering::Equal => Some(idx),
+ Ordering::Less => self
+ .entries
+ .get(idx + 1..)?
+ .iter()
+ .enumerate()
+ .take_while(|(_, e)| e.path(self) == path)
+ .find_map(|(ofs, e)| (e.stage() == wanted_stage).then_some(idx + ofs + 1)),
+ }
}
/// Find the entry index in [`entries()[..upper_bound]`][State::entries()] matching the given repository-relative
@@ -101,6 +160,68 @@ impl State {
.map(|idx| &self.entries[idx])
}
+ /// Return the entry at `path` that is either at stage 0, or at stage 2 (ours) in case of a merge conflict.
+ ///
+ /// Using this method is more efficient in comparison to doing two searches, one for stage 0 and one for stage 2.
+ pub fn entry_by_path(&self, path: &BStr) -> Option<&Entry> {
+ let mut stage_at_index = 0;
+ let idx = self
+ .entries
+ .binary_search_by(|e| {
+ let res = e.path(self).cmp(path);
+ if res.is_eq() {
+ stage_at_index = e.stage();
+ }
+ res
+ })
+ .ok()?;
+ let idx = if stage_at_index == 0 || stage_at_index == 2 {
+ idx
+ } else {
+ self.entry_index_by_idx_and_stage(path, idx, 2, stage_at_index.cmp(&2))?
+ };
+ Some(&self.entries[idx])
+ }
+
+ /// Return the slice of entries which all share the same `prefix`, or `None` if there isn't a single such entry.
+ ///
+ /// If `prefix` is empty, all entries are returned.
+ pub fn prefixed_entries(&self, prefix: &BStr) -> Option<&[Entry]> {
+ self.prefixed_entries_range(prefix).map(|range| &self.entries[range])
+ }
+
+ /// Return the range of entries which all share the same `prefix`, or `None` if there isn't a single such entry.
+ ///
+ /// If `prefix` is empty, the range will include all entries.
+ pub fn prefixed_entries_range(&self, prefix: &BStr) -> Option<Range<usize>> {
+ if prefix.is_empty() {
+ return Some(0..self.entries.len());
+ }
+ let prefix_len = prefix.len();
+ let mut low = self.entries.partition_point(|e| {
+ e.path(self)
+ .get(..prefix_len)
+ .map_or_else(|| e.path(self) <= &prefix[..e.path.len()], |p| p < prefix)
+ });
+ let mut high = low
+ + self.entries[low..].partition_point(|e| e.path(self).get(..prefix_len).map_or(false, |p| p <= prefix));
+
+ let low_entry = &self.entries.get(low)?;
+ if low_entry.stage() != 0 {
+ low = self
+ .walk_entry_stages(low_entry.path(self), low, Ordering::Less)
+ .unwrap_or(low);
+ }
+ if let Some(high_entry) = self.entries.get(high) {
+ if high_entry.stage() != 0 {
+ high = self
+ .walk_entry_stages(high_entry.path(self), high, Ordering::Less)
+ .unwrap_or(high);
+ }
+ }
+ (low != high).then_some(low..high)
+ }
+
/// Return the entry at `idx` or _panic_ if the index is out of bounds.
///
/// The `idx` is typically returned by [`entry_by_path_and_stage()`][State::entry_by_path_and_stage()].
@@ -114,6 +235,30 @@ impl State {
pub fn is_sparse(&self) -> bool {
self.is_sparse
}
+
+ /// Return the range of entries that exactly match the given `path`, in all available stages, or `None` if no entry with such
+ /// path exists.
+ ///
+ /// The range can be used to access the respective entries via [`entries()`](Self::entries()) or [`entries_mut()](Self::entries_mut()).
+ pub fn entry_range(&self, path: &BStr) -> Option<Range<usize>> {
+ let mut stage_at_index = 0;
+ let idx = self
+ .entries
+ .binary_search_by(|e| {
+ let res = e.path(self).cmp(path);
+ if res.is_eq() {
+ stage_at_index = e.stage();
+ }
+ res
+ })
+ .ok()?;
+
+ let (start, end) = (
+ self.walk_entry_stages(path, idx, Ordering::Less).unwrap_or(idx),
+ self.walk_entry_stages(path, idx, Ordering::Greater).unwrap_or(idx) + 1,
+ );
+ Some(start..end)
+ }
}
/// Mutation
@@ -224,6 +369,25 @@ impl State {
.then_with(|| compare(a, b))
});
}
+
+ /// Physically remove all entries for which `should_remove(idx, path, entry)` returns `true`, traversing them from first to last.
+ ///
+ /// Note that the memory used for the removed entries paths is not freed, as it's append-only.
+ ///
+ /// ### Performance
+ ///
+ /// To implement this operation typically, one would rather add [entry::Flags::REMOVE] to each entry to remove
+ /// them when [writing the index](Self::write_to()).
+ pub fn remove_entries(&mut self, mut should_remove: impl FnMut(usize, &BStr, &mut Entry) -> bool) {
+ let mut index = 0;
+ let paths = &self.path_backing;
+ self.entries.retain_mut(|e| {
+ let path = e.path_in(paths);
+ let res = !should_remove(index, path, e);
+ index += 1;
+ res
+ });
+ }
}
/// Extensions
@@ -249,3 +413,38 @@ impl State {
self.fs_monitor.as_ref()
}
}
+
+#[cfg(test)]
+mod tests {
+ use std::path::{Path, PathBuf};
+
+ #[test]
+ fn entry_by_path_with_conflicting_file() {
+ let file = PathBuf::from("tests")
+ .join("fixtures")
+ .join(Path::new("loose_index").join("conflicting-file.git-index"));
+ let file = crate::File::at(file, gix_hash::Kind::Sha1, false, Default::default()).expect("valid file");
+ assert_eq!(
+ file.entries().len(),
+ 3,
+ "we have a set of conflict entries for a single file"
+ );
+ for idx in 0..3 {
+ for wanted_stage in 1..=3 {
+ let actual_idx = file
+ .entry_index_by_idx_and_stage(
+ "file".into(),
+ idx,
+ wanted_stage,
+ (idx + 1).cmp(&(wanted_stage as usize)),
+ )
+ .expect("found");
+ assert_eq!(
+ actual_idx + 1,
+ wanted_stage as usize,
+ "the index and stage have a relation, and that is upheld if we search correctly"
+ );
+ }
+ }
+ }
+}