summaryrefslogtreecommitdiffstats
path: root/library/core/src/slice/cmp.rs
diff options
context:
space:
mode:
Diffstat (limited to 'library/core/src/slice/cmp.rs')
-rw-r--r--library/core/src/slice/cmp.rs260
1 files changed, 260 insertions, 0 deletions
diff --git a/library/core/src/slice/cmp.rs b/library/core/src/slice/cmp.rs
new file mode 100644
index 000000000..5e1b218e5
--- /dev/null
+++ b/library/core/src/slice/cmp.rs
@@ -0,0 +1,260 @@
+//! Comparison traits for `[T]`.
+
+use crate::cmp::{self, Ordering};
+use crate::ffi;
+use crate::mem;
+
+use super::from_raw_parts;
+use super::memchr;
+
+extern "C" {
+ /// Calls implementation provided memcmp.
+ ///
+ /// Interprets the data as u8.
+ ///
+ /// Returns 0 for equal, < 0 for less than and > 0 for greater
+ /// than.
+ fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> ffi::c_int;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B> PartialEq<[B]> for [A]
+where
+ A: PartialEq<B>,
+{
+ fn eq(&self, other: &[B]) -> bool {
+ SlicePartialEq::equal(self, other)
+ }
+
+ fn ne(&self, other: &[B]) -> bool {
+ SlicePartialEq::not_equal(self, other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Eq> Eq for [T] {}
+
+/// Implements comparison of vectors [lexicographically](Ord#lexicographical-comparison).
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> Ord for [T] {
+ fn cmp(&self, other: &[T]) -> Ordering {
+ SliceOrd::compare(self, other)
+ }
+}
+
+/// Implements comparison of vectors [lexicographically](Ord#lexicographical-comparison).
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialOrd> PartialOrd for [T] {
+ fn partial_cmp(&self, other: &[T]) -> Option<Ordering> {
+ SlicePartialOrd::partial_compare(self, other)
+ }
+}
+
+#[doc(hidden)]
+// intermediate trait for specialization of slice's PartialEq
+trait SlicePartialEq<B> {
+ fn equal(&self, other: &[B]) -> bool;
+
+ fn not_equal(&self, other: &[B]) -> bool {
+ !self.equal(other)
+ }
+}
+
+// Generic slice equality
+impl<A, B> SlicePartialEq<B> for [A]
+where
+ A: PartialEq<B>,
+{
+ default fn equal(&self, other: &[B]) -> bool {
+ if self.len() != other.len() {
+ return false;
+ }
+
+ self.iter().zip(other.iter()).all(|(x, y)| x == y)
+ }
+}
+
+// Use memcmp for bytewise equality when the types allow
+impl<A, B> SlicePartialEq<B> for [A]
+where
+ A: BytewiseEquality<B>,
+{
+ fn equal(&self, other: &[B]) -> bool {
+ if self.len() != other.len() {
+ return false;
+ }
+
+ // SAFETY: `self` and `other` are references and are thus guaranteed to be valid.
+ // The two slices have been checked to have the same size above.
+ unsafe {
+ let size = mem::size_of_val(self);
+ memcmp(self.as_ptr() as *const u8, other.as_ptr() as *const u8, size) == 0
+ }
+ }
+}
+
+#[doc(hidden)]
+// intermediate trait for specialization of slice's PartialOrd
+trait SlicePartialOrd: Sized {
+ fn partial_compare(left: &[Self], right: &[Self]) -> Option<Ordering>;
+}
+
+impl<A: PartialOrd> SlicePartialOrd for A {
+ default fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
+ let l = cmp::min(left.len(), right.len());
+
+ // Slice to the loop iteration range to enable bound check
+ // elimination in the compiler
+ let lhs = &left[..l];
+ let rhs = &right[..l];
+
+ for i in 0..l {
+ match lhs[i].partial_cmp(&rhs[i]) {
+ Some(Ordering::Equal) => (),
+ non_eq => return non_eq,
+ }
+ }
+
+ left.len().partial_cmp(&right.len())
+ }
+}
+
+// This is the impl that we would like to have. Unfortunately it's not sound.
+// See `partial_ord_slice.rs`.
+/*
+impl<A> SlicePartialOrd for A
+where
+ A: Ord,
+{
+ default fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
+ Some(SliceOrd::compare(left, right))
+ }
+}
+*/
+
+impl<A: AlwaysApplicableOrd> SlicePartialOrd for A {
+ fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
+ Some(SliceOrd::compare(left, right))
+ }
+}
+
+#[rustc_specialization_trait]
+trait AlwaysApplicableOrd: SliceOrd + Ord {}
+
+macro_rules! always_applicable_ord {
+ ($([$($p:tt)*] $t:ty,)*) => {
+ $(impl<$($p)*> AlwaysApplicableOrd for $t {})*
+ }
+}
+
+always_applicable_ord! {
+ [] u8, [] u16, [] u32, [] u64, [] u128, [] usize,
+ [] i8, [] i16, [] i32, [] i64, [] i128, [] isize,
+ [] bool, [] char,
+ [T: ?Sized] *const T, [T: ?Sized] *mut T,
+ [T: AlwaysApplicableOrd] &T,
+ [T: AlwaysApplicableOrd] &mut T,
+ [T: AlwaysApplicableOrd] Option<T>,
+}
+
+#[doc(hidden)]
+// intermediate trait for specialization of slice's Ord
+trait SliceOrd: Sized {
+ fn compare(left: &[Self], right: &[Self]) -> Ordering;
+}
+
+impl<A: Ord> SliceOrd for A {
+ default fn compare(left: &[Self], right: &[Self]) -> Ordering {
+ let l = cmp::min(left.len(), right.len());
+
+ // Slice to the loop iteration range to enable bound check
+ // elimination in the compiler
+ let lhs = &left[..l];
+ let rhs = &right[..l];
+
+ for i in 0..l {
+ match lhs[i].cmp(&rhs[i]) {
+ Ordering::Equal => (),
+ non_eq => return non_eq,
+ }
+ }
+
+ left.len().cmp(&right.len())
+ }
+}
+
+// memcmp compares a sequence of unsigned bytes lexicographically.
+// this matches the order we want for [u8], but no others (not even [i8]).
+impl SliceOrd for u8 {
+ #[inline]
+ fn compare(left: &[Self], right: &[Self]) -> Ordering {
+ // Since the length of a slice is always less than or equal to isize::MAX, this never underflows.
+ let diff = left.len() as isize - right.len() as isize;
+ // This comparison gets optimized away (on x86_64 and ARM) because the subtraction updates flags.
+ let len = if left.len() < right.len() { left.len() } else { right.len() };
+ // SAFETY: `left` and `right` are references and are thus guaranteed to be valid.
+ // We use the minimum of both lengths which guarantees that both regions are
+ // valid for reads in that interval.
+ let mut order = unsafe { memcmp(left.as_ptr(), right.as_ptr(), len) as isize };
+ if order == 0 {
+ order = diff;
+ }
+ order.cmp(&0)
+ }
+}
+
+// Hack to allow specializing on `Eq` even though `Eq` has a method.
+#[rustc_unsafe_specialization_marker]
+trait MarkerEq<T>: PartialEq<T> {}
+
+impl<T: Eq> MarkerEq<T> for T {}
+
+#[doc(hidden)]
+/// Trait implemented for types that can be compared for equality using
+/// their bytewise representation
+#[rustc_specialization_trait]
+trait BytewiseEquality<T>: MarkerEq<T> + Copy {}
+
+macro_rules! impl_marker_for {
+ ($traitname:ident, $($ty:ty)*) => {
+ $(
+ impl $traitname<$ty> for $ty { }
+ )*
+ }
+}
+
+impl_marker_for!(BytewiseEquality,
+ u8 i8 u16 i16 u32 i32 u64 i64 u128 i128 usize isize char bool);
+
+pub(super) trait SliceContains: Sized {
+ fn slice_contains(&self, x: &[Self]) -> bool;
+}
+
+impl<T> SliceContains for T
+where
+ T: PartialEq,
+{
+ default fn slice_contains(&self, x: &[Self]) -> bool {
+ x.iter().any(|y| *y == *self)
+ }
+}
+
+impl SliceContains for u8 {
+ #[inline]
+ fn slice_contains(&self, x: &[Self]) -> bool {
+ memchr::memchr(*self, x).is_some()
+ }
+}
+
+impl SliceContains for i8 {
+ #[inline]
+ fn slice_contains(&self, x: &[Self]) -> bool {
+ let byte = *self as u8;
+ // SAFETY: `i8` and `u8` have the same memory layout, thus casting `x.as_ptr()`
+ // as `*const u8` is safe. The `x.as_ptr()` comes from a reference and is thus guaranteed
+ // to be valid for reads for the length of the slice `x.len()`, which cannot be larger
+ // than `isize::MAX`. The returned slice is never mutated.
+ let bytes: &[u8] = unsafe { from_raw_parts(x.as_ptr() as *const u8, x.len()) };
+ memchr::memchr(byte, bytes).is_some()
+ }
+}