summaryrefslogtreecommitdiffstats
path: root/third_party/rust/rayon/src/slice
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 01:47:29 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 01:47:29 +0000
commit0ebf5bdf043a27fd3dfb7f92e0cb63d88954c44d (patch)
treea31f07c9bcca9d56ce61e9a1ffd30ef350d513aa /third_party/rust/rayon/src/slice
parentInitial commit. (diff)
downloadfirefox-esr-0ebf5bdf043a27fd3dfb7f92e0cb63d88954c44d.tar.xz
firefox-esr-0ebf5bdf043a27fd3dfb7f92e0cb63d88954c44d.zip
Adding upstream version 115.8.0esr.upstream/115.8.0esr
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/rayon/src/slice')
-rw-r--r--third_party/rust/rayon/src/slice/chunks.rs389
-rw-r--r--third_party/rust/rayon/src/slice/mergesort.rs755
-rw-r--r--third_party/rust/rayon/src/slice/mod.rs1041
-rw-r--r--third_party/rust/rayon/src/slice/quicksort.rs897
-rw-r--r--third_party/rust/rayon/src/slice/rchunks.rs386
-rw-r--r--third_party/rust/rayon/src/slice/test.rs170
6 files changed, 3638 insertions, 0 deletions
diff --git a/third_party/rust/rayon/src/slice/chunks.rs b/third_party/rust/rayon/src/slice/chunks.rs
new file mode 100644
index 0000000000..a3cc709536
--- /dev/null
+++ b/third_party/rust/rayon/src/slice/chunks.rs
@@ -0,0 +1,389 @@
+use crate::iter::plumbing::*;
+use crate::iter::*;
+use crate::math::div_round_up;
+use std::cmp;
+
+/// Parallel iterator over immutable non-overlapping chunks of a slice
+#[derive(Debug)]
+pub struct Chunks<'data, T: Sync> {
+ chunk_size: usize,
+ slice: &'data [T],
+}
+
+impl<'data, T: Sync> Chunks<'data, T> {
+ pub(super) fn new(chunk_size: usize, slice: &'data [T]) -> Self {
+ Self { chunk_size, slice }
+ }
+}
+
+impl<'data, T: Sync> Clone for Chunks<'data, T> {
+ fn clone(&self) -> Self {
+ Chunks { ..*self }
+ }
+}
+
+impl<'data, T: Sync + 'data> ParallelIterator for Chunks<'data, T> {
+ type Item = &'data [T];
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn opt_len(&self) -> Option<usize> {
+ Some(self.len())
+ }
+}
+
+impl<'data, T: Sync + 'data> IndexedParallelIterator for Chunks<'data, T> {
+ fn drive<C>(self, consumer: C) -> C::Result
+ where
+ C: Consumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn len(&self) -> usize {
+ div_round_up(self.slice.len(), self.chunk_size)
+ }
+
+ fn with_producer<CB>(self, callback: CB) -> CB::Output
+ where
+ CB: ProducerCallback<Self::Item>,
+ {
+ callback.callback(ChunksProducer {
+ chunk_size: self.chunk_size,
+ slice: self.slice,
+ })
+ }
+}
+
+struct ChunksProducer<'data, T: Sync> {
+ chunk_size: usize,
+ slice: &'data [T],
+}
+
+impl<'data, T: 'data + Sync> Producer for ChunksProducer<'data, T> {
+ type Item = &'data [T];
+ type IntoIter = ::std::slice::Chunks<'data, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.slice.chunks(self.chunk_size)
+ }
+
+ fn split_at(self, index: usize) -> (Self, Self) {
+ let elem_index = cmp::min(index * self.chunk_size, self.slice.len());
+ let (left, right) = self.slice.split_at(elem_index);
+ (
+ ChunksProducer {
+ chunk_size: self.chunk_size,
+ slice: left,
+ },
+ ChunksProducer {
+ chunk_size: self.chunk_size,
+ slice: right,
+ },
+ )
+ }
+}
+
+/// Parallel iterator over immutable non-overlapping chunks of a slice
+#[derive(Debug)]
+pub struct ChunksExact<'data, T: Sync> {
+ chunk_size: usize,
+ slice: &'data [T],
+ rem: &'data [T],
+}
+
+impl<'data, T: Sync> ChunksExact<'data, T> {
+ pub(super) fn new(chunk_size: usize, slice: &'data [T]) -> Self {
+ let rem_len = slice.len() % chunk_size;
+ let len = slice.len() - rem_len;
+ let (slice, rem) = slice.split_at(len);
+ Self {
+ chunk_size,
+ slice,
+ rem,
+ }
+ }
+
+ /// Return the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `chunk_size-1`
+ /// elements.
+ pub fn remainder(&self) -> &'data [T] {
+ self.rem
+ }
+}
+
+impl<'data, T: Sync> Clone for ChunksExact<'data, T> {
+ fn clone(&self) -> Self {
+ ChunksExact { ..*self }
+ }
+}
+
+impl<'data, T: Sync + 'data> ParallelIterator for ChunksExact<'data, T> {
+ type Item = &'data [T];
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn opt_len(&self) -> Option<usize> {
+ Some(self.len())
+ }
+}
+
+impl<'data, T: Sync + 'data> IndexedParallelIterator for ChunksExact<'data, T> {
+ fn drive<C>(self, consumer: C) -> C::Result
+ where
+ C: Consumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn len(&self) -> usize {
+ self.slice.len() / self.chunk_size
+ }
+
+ fn with_producer<CB>(self, callback: CB) -> CB::Output
+ where
+ CB: ProducerCallback<Self::Item>,
+ {
+ callback.callback(ChunksExactProducer {
+ chunk_size: self.chunk_size,
+ slice: self.slice,
+ })
+ }
+}
+
+struct ChunksExactProducer<'data, T: Sync> {
+ chunk_size: usize,
+ slice: &'data [T],
+}
+
+impl<'data, T: 'data + Sync> Producer for ChunksExactProducer<'data, T> {
+ type Item = &'data [T];
+ type IntoIter = ::std::slice::ChunksExact<'data, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.slice.chunks_exact(self.chunk_size)
+ }
+
+ fn split_at(self, index: usize) -> (Self, Self) {
+ let elem_index = index * self.chunk_size;
+ let (left, right) = self.slice.split_at(elem_index);
+ (
+ ChunksExactProducer {
+ chunk_size: self.chunk_size,
+ slice: left,
+ },
+ ChunksExactProducer {
+ chunk_size: self.chunk_size,
+ slice: right,
+ },
+ )
+ }
+}
+
+/// Parallel iterator over mutable non-overlapping chunks of a slice
+#[derive(Debug)]
+pub struct ChunksMut<'data, T: Send> {
+ chunk_size: usize,
+ slice: &'data mut [T],
+}
+
+impl<'data, T: Send> ChunksMut<'data, T> {
+ pub(super) fn new(chunk_size: usize, slice: &'data mut [T]) -> Self {
+ Self { chunk_size, slice }
+ }
+}
+
+impl<'data, T: Send + 'data> ParallelIterator for ChunksMut<'data, T> {
+ type Item = &'data mut [T];
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn opt_len(&self) -> Option<usize> {
+ Some(self.len())
+ }
+}
+
+impl<'data, T: Send + 'data> IndexedParallelIterator for ChunksMut<'data, T> {
+ fn drive<C>(self, consumer: C) -> C::Result
+ where
+ C: Consumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn len(&self) -> usize {
+ div_round_up(self.slice.len(), self.chunk_size)
+ }
+
+ fn with_producer<CB>(self, callback: CB) -> CB::Output
+ where
+ CB: ProducerCallback<Self::Item>,
+ {
+ callback.callback(ChunksMutProducer {
+ chunk_size: self.chunk_size,
+ slice: self.slice,
+ })
+ }
+}
+
+struct ChunksMutProducer<'data, T: Send> {
+ chunk_size: usize,
+ slice: &'data mut [T],
+}
+
+impl<'data, T: 'data + Send> Producer for ChunksMutProducer<'data, T> {
+ type Item = &'data mut [T];
+ type IntoIter = ::std::slice::ChunksMut<'data, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.slice.chunks_mut(self.chunk_size)
+ }
+
+ fn split_at(self, index: usize) -> (Self, Self) {
+ let elem_index = cmp::min(index * self.chunk_size, self.slice.len());
+ let (left, right) = self.slice.split_at_mut(elem_index);
+ (
+ ChunksMutProducer {
+ chunk_size: self.chunk_size,
+ slice: left,
+ },
+ ChunksMutProducer {
+ chunk_size: self.chunk_size,
+ slice: right,
+ },
+ )
+ }
+}
+
+/// Parallel iterator over mutable non-overlapping chunks of a slice
+#[derive(Debug)]
+pub struct ChunksExactMut<'data, T: Send> {
+ chunk_size: usize,
+ slice: &'data mut [T],
+ rem: &'data mut [T],
+}
+
+impl<'data, T: Send> ChunksExactMut<'data, T> {
+ pub(super) fn new(chunk_size: usize, slice: &'data mut [T]) -> Self {
+ let rem_len = slice.len() % chunk_size;
+ let len = slice.len() - rem_len;
+ let (slice, rem) = slice.split_at_mut(len);
+ Self {
+ chunk_size,
+ slice,
+ rem,
+ }
+ }
+
+ /// Return the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `chunk_size-1`
+ /// elements.
+ ///
+ /// Note that this has to consume `self` to return the original lifetime of
+ /// the data, which prevents this from actually being used as a parallel
+ /// iterator since that also consumes. This method is provided for parity
+ /// with `std::iter::ChunksExactMut`, but consider calling `remainder()` or
+ /// `take_remainder()` as alternatives.
+ pub fn into_remainder(self) -> &'data mut [T] {
+ self.rem
+ }
+
+ /// Return the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `chunk_size-1`
+ /// elements.
+ ///
+ /// Consider `take_remainder()` if you need access to the data with its
+ /// original lifetime, rather than borrowing through `&mut self` here.
+ pub fn remainder(&mut self) -> &mut [T] {
+ self.rem
+ }
+
+ /// Return the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `chunk_size-1`
+ /// elements. Subsequent calls will return an empty slice.
+ pub fn take_remainder(&mut self) -> &'data mut [T] {
+ std::mem::take(&mut self.rem)
+ }
+}
+
+impl<'data, T: Send + 'data> ParallelIterator for ChunksExactMut<'data, T> {
+ type Item = &'data mut [T];
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn opt_len(&self) -> Option<usize> {
+ Some(self.len())
+ }
+}
+
+impl<'data, T: Send + 'data> IndexedParallelIterator for ChunksExactMut<'data, T> {
+ fn drive<C>(self, consumer: C) -> C::Result
+ where
+ C: Consumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn len(&self) -> usize {
+ self.slice.len() / self.chunk_size
+ }
+
+ fn with_producer<CB>(self, callback: CB) -> CB::Output
+ where
+ CB: ProducerCallback<Self::Item>,
+ {
+ callback.callback(ChunksExactMutProducer {
+ chunk_size: self.chunk_size,
+ slice: self.slice,
+ })
+ }
+}
+
+struct ChunksExactMutProducer<'data, T: Send> {
+ chunk_size: usize,
+ slice: &'data mut [T],
+}
+
+impl<'data, T: 'data + Send> Producer for ChunksExactMutProducer<'data, T> {
+ type Item = &'data mut [T];
+ type IntoIter = ::std::slice::ChunksExactMut<'data, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.slice.chunks_exact_mut(self.chunk_size)
+ }
+
+ fn split_at(self, index: usize) -> (Self, Self) {
+ let elem_index = index * self.chunk_size;
+ let (left, right) = self.slice.split_at_mut(elem_index);
+ (
+ ChunksExactMutProducer {
+ chunk_size: self.chunk_size,
+ slice: left,
+ },
+ ChunksExactMutProducer {
+ chunk_size: self.chunk_size,
+ slice: right,
+ },
+ )
+ }
+}
diff --git a/third_party/rust/rayon/src/slice/mergesort.rs b/third_party/rust/rayon/src/slice/mergesort.rs
new file mode 100644
index 0000000000..fec309d273
--- /dev/null
+++ b/third_party/rust/rayon/src/slice/mergesort.rs
@@ -0,0 +1,755 @@
+//! Parallel merge sort.
+//!
+//! This implementation is copied verbatim from `std::slice::sort` and then parallelized.
+//! The only difference from the original is that the sequential `mergesort` returns
+//! `MergesortResult` and leaves descending arrays intact.
+
+use crate::iter::*;
+use crate::slice::ParallelSliceMut;
+use crate::SendPtr;
+use std::mem;
+use std::mem::size_of;
+use std::ptr;
+use std::slice;
+
+unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
+ let old = *ptr;
+ *ptr = ptr.offset(1);
+ old
+}
+
+unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
+ *ptr = ptr.offset(-1);
+ *ptr
+}
+
+/// When dropped, copies from `src` into `dest` a sequence of length `len`.
+struct CopyOnDrop<T> {
+ src: *const T,
+ dest: *mut T,
+ len: usize,
+}
+
+impl<T> Drop for CopyOnDrop<T> {
+ fn drop(&mut self) {
+ unsafe {
+ ptr::copy_nonoverlapping(self.src, self.dest, self.len);
+ }
+ }
+}
+
+/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
+///
+/// This is the integral subroutine of insertion sort.
+fn insert_head<T, F>(v: &mut [T], is_less: &F)
+where
+ F: Fn(&T, &T) -> bool,
+{
+ if v.len() >= 2 && is_less(&v[1], &v[0]) {
+ unsafe {
+ // There are three ways to implement insertion here:
+ //
+ // 1. Swap adjacent elements until the first one gets to its final destination.
+ // However, this way we copy data around more than is necessary. If elements are big
+ // structures (costly to copy), this method will be slow.
+ //
+ // 2. Iterate until the right place for the first element is found. Then shift the
+ // elements succeeding it to make room for it and finally place it into the
+ // remaining hole. This is a good method.
+ //
+ // 3. Copy the first element into a temporary variable. Iterate until the right place
+ // for it is found. As we go along, copy every traversed element into the slot
+ // preceding it. Finally, copy data from the temporary variable into the remaining
+ // hole. This method is very good. Benchmarks demonstrated slightly better
+ // performance than with the 2nd method.
+ //
+ // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
+ let tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
+
+ // Intermediate state of the insertion process is always tracked by `hole`, which
+ // serves two purposes:
+ // 1. Protects integrity of `v` from panics in `is_less`.
+ // 2. Fills the remaining hole in `v` in the end.
+ //
+ // Panic safety:
+ //
+ // If `is_less` panics at any point during the process, `hole` will get dropped and
+ // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
+ // initially held exactly once.
+ let mut hole = InsertionHole {
+ src: &*tmp,
+ dest: &mut v[1],
+ };
+ ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
+
+ for i in 2..v.len() {
+ if !is_less(&v[i], &*tmp) {
+ break;
+ }
+ ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
+ hole.dest = &mut v[i];
+ }
+ // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
+ }
+ }
+
+ // When dropped, copies from `src` into `dest`.
+ struct InsertionHole<T> {
+ src: *const T,
+ dest: *mut T,
+ }
+
+ impl<T> Drop for InsertionHole<T> {
+ fn drop(&mut self) {
+ unsafe {
+ ptr::copy_nonoverlapping(self.src, self.dest, 1);
+ }
+ }
+ }
+}
+
+/// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
+/// stores the result into `v[..]`.
+///
+/// # Safety
+///
+/// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
+/// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
+unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &F)
+where
+ F: Fn(&T, &T) -> bool,
+{
+ let len = v.len();
+ let v = v.as_mut_ptr();
+ let v_mid = v.add(mid);
+ let v_end = v.add(len);
+
+ // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
+ // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
+ // copying the lesser (or greater) one into `v`.
+ //
+ // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
+ // consumed first, then we must copy whatever is left of the shorter run into the remaining
+ // hole in `v`.
+ //
+ // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
+ // 1. Protects integrity of `v` from panics in `is_less`.
+ // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
+ //
+ // Panic safety:
+ //
+ // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
+ // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
+ // object it initially held exactly once.
+ let mut hole;
+
+ if mid <= len - mid {
+ // The left run is shorter.
+ ptr::copy_nonoverlapping(v, buf, mid);
+ hole = MergeHole {
+ start: buf,
+ end: buf.add(mid),
+ dest: v,
+ };
+
+ // Initially, these pointers point to the beginnings of their arrays.
+ let left = &mut hole.start;
+ let mut right = v_mid;
+ let out = &mut hole.dest;
+
+ while *left < hole.end && right < v_end {
+ // Consume the lesser side.
+ // If equal, prefer the left run to maintain stability.
+ let to_copy = if is_less(&*right, &**left) {
+ get_and_increment(&mut right)
+ } else {
+ get_and_increment(left)
+ };
+ ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
+ }
+ } else {
+ // The right run is shorter.
+ ptr::copy_nonoverlapping(v_mid, buf, len - mid);
+ hole = MergeHole {
+ start: buf,
+ end: buf.add(len - mid),
+ dest: v_mid,
+ };
+
+ // Initially, these pointers point past the ends of their arrays.
+ let left = &mut hole.dest;
+ let right = &mut hole.end;
+ let mut out = v_end;
+
+ while v < *left && buf < *right {
+ // Consume the greater side.
+ // If equal, prefer the right run to maintain stability.
+ let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
+ decrement_and_get(left)
+ } else {
+ decrement_and_get(right)
+ };
+ ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
+ }
+ }
+ // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
+ // it will now be copied into the hole in `v`.
+
+ // When dropped, copies the range `start..end` into `dest..`.
+ struct MergeHole<T> {
+ start: *mut T,
+ end: *mut T,
+ dest: *mut T,
+ }
+
+ impl<T> Drop for MergeHole<T> {
+ fn drop(&mut self) {
+ // `T` is not a zero-sized type, so it's okay to divide by its size.
+ unsafe {
+ let len = self.end.offset_from(self.start) as usize;
+ ptr::copy_nonoverlapping(self.start, self.dest, len);
+ }
+ }
+ }
+}
+
+/// The result of merge sort.
+#[must_use]
+#[derive(Clone, Copy, PartialEq, Eq)]
+enum MergesortResult {
+ /// The slice has already been sorted.
+ NonDescending,
+ /// The slice has been descending and therefore it was left intact.
+ Descending,
+ /// The slice was sorted.
+ Sorted,
+}
+
+/// A sorted run that starts at index `start` and is of length `len`.
+#[derive(Clone, Copy)]
+struct Run {
+ start: usize,
+ len: usize,
+}
+
+/// Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
+/// if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
+/// algorithm should continue building a new run instead, `None` is returned.
+///
+/// TimSort is infamous for its buggy implementations, as described here:
+/// http://envisage-project.eu/timsort-specification-and-verification/
+///
+/// The gist of the story is: we must enforce the invariants on the top four runs on the stack.
+/// Enforcing them on just top three is not sufficient to ensure that the invariants will still
+/// hold for *all* runs in the stack.
+///
+/// This function correctly checks invariants for the top four runs. Additionally, if the top
+/// run starts at index 0, it will always demand a merge operation until the stack is fully
+/// collapsed, in order to complete the sort.
+#[inline]
+fn collapse(runs: &[Run]) -> Option<usize> {
+ let n = runs.len();
+
+ if n >= 2
+ && (runs[n - 1].start == 0
+ || runs[n - 2].len <= runs[n - 1].len
+ || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len)
+ || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
+ {
+ if n >= 3 && runs[n - 3].len < runs[n - 1].len {
+ Some(n - 3)
+ } else {
+ Some(n - 2)
+ }
+ } else {
+ None
+ }
+}
+
+/// Sorts a slice using merge sort, unless it is already in descending order.
+///
+/// This function doesn't modify the slice if it is already non-descending or descending.
+/// Otherwise, it sorts the slice into non-descending order.
+///
+/// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
+/// [here](https://github.com/python/cpython/blob/main/Objects/listsort.txt).
+///
+/// The algorithm identifies strictly descending and non-descending subsequences, which are called
+/// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
+/// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
+/// satisfied:
+///
+/// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
+/// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
+///
+/// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case.
+///
+/// # Safety
+///
+/// The argument `buf` is used as a temporary buffer and must be at least as long as `v`.
+unsafe fn mergesort<T, F>(v: &mut [T], buf: *mut T, is_less: &F) -> MergesortResult
+where
+ T: Send,
+ F: Fn(&T, &T) -> bool + Sync,
+{
+ // Very short runs are extended using insertion sort to span at least this many elements.
+ const MIN_RUN: usize = 10;
+
+ let len = v.len();
+
+ // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
+ // strange decision, but consider the fact that merges more often go in the opposite direction
+ // (forwards). According to benchmarks, merging forwards is slightly faster than merging
+ // backwards. To conclude, identifying runs by traversing backwards improves performance.
+ let mut runs = vec![];
+ let mut end = len;
+ while end > 0 {
+ // Find the next natural run, and reverse it if it's strictly descending.
+ let mut start = end - 1;
+
+ if start > 0 {
+ start -= 1;
+
+ if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
+ while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
+ start -= 1;
+ }
+
+ // If this descending run covers the whole slice, return immediately.
+ if start == 0 && end == len {
+ return MergesortResult::Descending;
+ } else {
+ v[start..end].reverse();
+ }
+ } else {
+ while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
+ start -= 1;
+ }
+
+ // If this non-descending run covers the whole slice, return immediately.
+ if end - start == len {
+ return MergesortResult::NonDescending;
+ }
+ }
+ }
+
+ // Insert some more elements into the run if it's too short. Insertion sort is faster than
+ // merge sort on short sequences, so this significantly improves performance.
+ while start > 0 && end - start < MIN_RUN {
+ start -= 1;
+ insert_head(&mut v[start..end], &is_less);
+ }
+
+ // Push this run onto the stack.
+ runs.push(Run {
+ start,
+ len: end - start,
+ });
+ end = start;
+
+ // Merge some pairs of adjacent runs to satisfy the invariants.
+ while let Some(r) = collapse(&runs) {
+ let left = runs[r + 1];
+ let right = runs[r];
+ merge(
+ &mut v[left.start..right.start + right.len],
+ left.len,
+ buf,
+ &is_less,
+ );
+
+ runs[r] = Run {
+ start: left.start,
+ len: left.len + right.len,
+ };
+ runs.remove(r + 1);
+ }
+ }
+
+ // Finally, exactly one run must remain in the stack.
+ debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
+
+ // The original order of the slice was neither non-descending nor descending.
+ MergesortResult::Sorted
+}
+
+////////////////////////////////////////////////////////////////////////////
+// Everything above this line is copied from `std::slice::sort` (with very minor tweaks).
+// Everything below this line is parallelization.
+////////////////////////////////////////////////////////////////////////////
+
+/// Splits two sorted slices so that they can be merged in parallel.
+///
+/// Returns two indices `(a, b)` so that slices `left[..a]` and `right[..b]` come before
+/// `left[a..]` and `right[b..]`.
+fn split_for_merge<T, F>(left: &[T], right: &[T], is_less: &F) -> (usize, usize)
+where
+ F: Fn(&T, &T) -> bool,
+{
+ let left_len = left.len();
+ let right_len = right.len();
+
+ if left_len >= right_len {
+ let left_mid = left_len / 2;
+
+ // Find the first element in `right` that is greater than or equal to `left[left_mid]`.
+ let mut a = 0;
+ let mut b = right_len;
+ while a < b {
+ let m = a + (b - a) / 2;
+ if is_less(&right[m], &left[left_mid]) {
+ a = m + 1;
+ } else {
+ b = m;
+ }
+ }
+
+ (left_mid, a)
+ } else {
+ let right_mid = right_len / 2;
+
+ // Find the first element in `left` that is greater than `right[right_mid]`.
+ let mut a = 0;
+ let mut b = left_len;
+ while a < b {
+ let m = a + (b - a) / 2;
+ if is_less(&right[right_mid], &left[m]) {
+ b = m;
+ } else {
+ a = m + 1;
+ }
+ }
+
+ (a, right_mid)
+ }
+}
+
+/// Merges slices `left` and `right` in parallel and stores the result into `dest`.
+///
+/// # Safety
+///
+/// The `dest` pointer must have enough space to store the result.
+///
+/// Even if `is_less` panics at any point during the merge process, this function will fully copy
+/// all elements from `left` and `right` into `dest` (not necessarily in sorted order).
+unsafe fn par_merge<T, F>(left: &mut [T], right: &mut [T], dest: *mut T, is_less: &F)
+where
+ T: Send,
+ F: Fn(&T, &T) -> bool + Sync,
+{
+ // Slices whose lengths sum up to this value are merged sequentially. This number is slightly
+ // larger than `CHUNK_LENGTH`, and the reason is that merging is faster than merge sorting, so
+ // merging needs a bit coarser granularity in order to hide the overhead of Rayon's task
+ // scheduling.
+ const MAX_SEQUENTIAL: usize = 5000;
+
+ let left_len = left.len();
+ let right_len = right.len();
+
+ // Intermediate state of the merge process, which serves two purposes:
+ // 1. Protects integrity of `dest` from panics in `is_less`.
+ // 2. Copies the remaining elements as soon as one of the two sides is exhausted.
+ //
+ // Panic safety:
+ //
+ // If `is_less` panics at any point during the merge process, `s` will get dropped and copy the
+ // remaining parts of `left` and `right` into `dest`.
+ let mut s = State {
+ left_start: left.as_mut_ptr(),
+ left_end: left.as_mut_ptr().add(left_len),
+ right_start: right.as_mut_ptr(),
+ right_end: right.as_mut_ptr().add(right_len),
+ dest,
+ };
+
+ if left_len == 0 || right_len == 0 || left_len + right_len < MAX_SEQUENTIAL {
+ while s.left_start < s.left_end && s.right_start < s.right_end {
+ // Consume the lesser side.
+ // If equal, prefer the left run to maintain stability.
+ let to_copy = if is_less(&*s.right_start, &*s.left_start) {
+ get_and_increment(&mut s.right_start)
+ } else {
+ get_and_increment(&mut s.left_start)
+ };
+ ptr::copy_nonoverlapping(to_copy, get_and_increment(&mut s.dest), 1);
+ }
+ } else {
+ // Function `split_for_merge` might panic. If that happens, `s` will get destructed and copy
+ // the whole `left` and `right` into `dest`.
+ let (left_mid, right_mid) = split_for_merge(left, right, is_less);
+ let (left_l, left_r) = left.split_at_mut(left_mid);
+ let (right_l, right_r) = right.split_at_mut(right_mid);
+
+ // Prevent the destructor of `s` from running. Rayon will ensure that both calls to
+ // `par_merge` happen. If one of the two calls panics, they will ensure that elements still
+ // get copied into `dest_left` and `dest_right``.
+ mem::forget(s);
+
+ // Wrap pointers in SendPtr so that they can be sent to another thread
+ // See the documentation of SendPtr for a full explanation
+ let dest_l = SendPtr(dest);
+ let dest_r = SendPtr(dest.add(left_l.len() + right_l.len()));
+ rayon_core::join(
+ move || par_merge(left_l, right_l, dest_l.get(), is_less),
+ move || par_merge(left_r, right_r, dest_r.get(), is_less),
+ );
+ }
+ // Finally, `s` gets dropped if we used sequential merge, thus copying the remaining elements
+ // all at once.
+
+ // When dropped, copies arrays `left_start..left_end` and `right_start..right_end` into `dest`,
+ // in that order.
+ struct State<T> {
+ left_start: *mut T,
+ left_end: *mut T,
+ right_start: *mut T,
+ right_end: *mut T,
+ dest: *mut T,
+ }
+
+ impl<T> Drop for State<T> {
+ fn drop(&mut self) {
+ let size = size_of::<T>();
+ let left_len = (self.left_end as usize - self.left_start as usize) / size;
+ let right_len = (self.right_end as usize - self.right_start as usize) / size;
+
+ // Copy array `left`, followed by `right`.
+ unsafe {
+ ptr::copy_nonoverlapping(self.left_start, self.dest, left_len);
+ self.dest = self.dest.add(left_len);
+ ptr::copy_nonoverlapping(self.right_start, self.dest, right_len);
+ }
+ }
+ }
+}
+
+/// Recursively merges pre-sorted chunks inside `v`.
+///
+/// Chunks of `v` are stored in `chunks` as intervals (inclusive left and exclusive right bound).
+/// Argument `buf` is an auxiliary buffer that will be used during the procedure.
+/// If `into_buf` is true, the result will be stored into `buf`, otherwise it will be in `v`.
+///
+/// # Safety
+///
+/// The number of chunks must be positive and they must be adjacent: the right bound of each chunk
+/// must equal the left bound of the following chunk.
+///
+/// The buffer must be at least as long as `v`.
+unsafe fn recurse<T, F>(
+ v: *mut T,
+ buf: *mut T,
+ chunks: &[(usize, usize)],
+ into_buf: bool,
+ is_less: &F,
+) where
+ T: Send,
+ F: Fn(&T, &T) -> bool + Sync,
+{
+ let len = chunks.len();
+ debug_assert!(len > 0);
+
+ // Base case of the algorithm.
+ // If only one chunk is remaining, there's no more work to split and merge.
+ if len == 1 {
+ if into_buf {
+ // Copy the chunk from `v` into `buf`.
+ let (start, end) = chunks[0];
+ let src = v.add(start);
+ let dest = buf.add(start);
+ ptr::copy_nonoverlapping(src, dest, end - start);
+ }
+ return;
+ }
+
+ // Split the chunks into two halves.
+ let (start, _) = chunks[0];
+ let (mid, _) = chunks[len / 2];
+ let (_, end) = chunks[len - 1];
+ let (left, right) = chunks.split_at(len / 2);
+
+ // After recursive calls finish we'll have to merge chunks `(start, mid)` and `(mid, end)` from
+ // `src` into `dest`. If the current invocation has to store the result into `buf`, we'll
+ // merge chunks from `v` into `buf`, and vice versa.
+ //
+ // Recursive calls flip `into_buf` at each level of recursion. More concretely, `par_merge`
+ // merges chunks from `buf` into `v` at the first level, from `v` into `buf` at the second
+ // level etc.
+ let (src, dest) = if into_buf { (v, buf) } else { (buf, v) };
+
+ // Panic safety:
+ //
+ // If `is_less` panics at any point during the recursive calls, the destructor of `guard` will
+ // be executed, thus copying everything from `src` into `dest`. This way we ensure that all
+ // chunks are in fact copied into `dest`, even if the merge process doesn't finish.
+ let guard = CopyOnDrop {
+ src: src.add(start),
+ dest: dest.add(start),
+ len: end - start,
+ };
+
+ // Wrap pointers in SendPtr so that they can be sent to another thread
+ // See the documentation of SendPtr for a full explanation
+ let v = SendPtr(v);
+ let buf = SendPtr(buf);
+ rayon_core::join(
+ move || recurse(v.get(), buf.get(), left, !into_buf, is_less),
+ move || recurse(v.get(), buf.get(), right, !into_buf, is_less),
+ );
+
+ // Everything went all right - recursive calls didn't panic.
+ // Forget the guard in order to prevent its destructor from running.
+ mem::forget(guard);
+
+ // Merge chunks `(start, mid)` and `(mid, end)` from `src` into `dest`.
+ let src_left = slice::from_raw_parts_mut(src.add(start), mid - start);
+ let src_right = slice::from_raw_parts_mut(src.add(mid), end - mid);
+ par_merge(src_left, src_right, dest.add(start), is_less);
+}
+
+/// Sorts `v` using merge sort in parallel.
+///
+/// The algorithm is stable, allocates memory, and `O(n log n)` worst-case.
+/// The allocated temporary buffer is of the same length as is `v`.
+pub(super) fn par_mergesort<T, F>(v: &mut [T], is_less: F)
+where
+ T: Send,
+ F: Fn(&T, &T) -> bool + Sync,
+{
+ // Slices of up to this length get sorted using insertion sort in order to avoid the cost of
+ // buffer allocation.
+ const MAX_INSERTION: usize = 20;
+ // The length of initial chunks. This number is as small as possible but so that the overhead
+ // of Rayon's task scheduling is still negligible.
+ const CHUNK_LENGTH: usize = 2000;
+
+ // Sorting has no meaningful behavior on zero-sized types.
+ if size_of::<T>() == 0 {
+ return;
+ }
+
+ let len = v.len();
+
+ // Short slices get sorted in-place via insertion sort to avoid allocations.
+ if len <= MAX_INSERTION {
+ if len >= 2 {
+ for i in (0..len - 1).rev() {
+ insert_head(&mut v[i..], &is_less);
+ }
+ }
+ return;
+ }
+
+ // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
+ // shallow copies of the contents of `v` without risking the dtors running on copies if
+ // `is_less` panics.
+ let mut buf = Vec::<T>::with_capacity(len);
+ let buf = buf.as_mut_ptr();
+
+ // If the slice is not longer than one chunk would be, do sequential merge sort and return.
+ if len <= CHUNK_LENGTH {
+ let res = unsafe { mergesort(v, buf, &is_less) };
+ if res == MergesortResult::Descending {
+ v.reverse();
+ }
+ return;
+ }
+
+ // Split the slice into chunks and merge sort them in parallel.
+ // However, descending chunks will not be sorted - they will be simply left intact.
+ let mut iter = {
+ // Wrap pointer in SendPtr so that it can be sent to another thread
+ // See the documentation of SendPtr for a full explanation
+ let buf = SendPtr(buf);
+ let is_less = &is_less;
+
+ v.par_chunks_mut(CHUNK_LENGTH)
+ .with_max_len(1)
+ .enumerate()
+ .map(move |(i, chunk)| {
+ let l = CHUNK_LENGTH * i;
+ let r = l + chunk.len();
+ unsafe {
+ let buf = buf.get().add(l);
+ (l, r, mergesort(chunk, buf, is_less))
+ }
+ })
+ .collect::<Vec<_>>()
+ .into_iter()
+ .peekable()
+ };
+
+ // Now attempt to concatenate adjacent chunks that were left intact.
+ let mut chunks = Vec::with_capacity(iter.len());
+
+ while let Some((a, mut b, res)) = iter.next() {
+ // If this chunk was not modified by the sort procedure...
+ if res != MergesortResult::Sorted {
+ while let Some(&(x, y, r)) = iter.peek() {
+ // If the following chunk is of the same type and can be concatenated...
+ if r == res && (r == MergesortResult::Descending) == is_less(&v[x], &v[x - 1]) {
+ // Concatenate them.
+ b = y;
+ iter.next();
+ } else {
+ break;
+ }
+ }
+ }
+
+ // Descending chunks must be reversed.
+ if res == MergesortResult::Descending {
+ v[a..b].reverse();
+ }
+
+ chunks.push((a, b));
+ }
+
+ // All chunks are properly sorted.
+ // Now we just have to merge them together.
+ unsafe {
+ recurse(v.as_mut_ptr(), buf, &chunks, false, &is_less);
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::split_for_merge;
+ use rand::distributions::Uniform;
+ use rand::{thread_rng, Rng};
+
+ #[test]
+ fn test_split_for_merge() {
+ fn check(left: &[u32], right: &[u32]) {
+ let (l, r) = split_for_merge(left, right, &|&a, &b| a < b);
+ assert!(left[..l]
+ .iter()
+ .all(|&x| right[r..].iter().all(|&y| x <= y)));
+ assert!(right[..r].iter().all(|&x| left[l..].iter().all(|&y| x < y)));
+ }
+
+ check(&[1, 2, 2, 2, 2, 3], &[1, 2, 2, 2, 2, 3]);
+ check(&[1, 2, 2, 2, 2, 3], &[]);
+ check(&[], &[1, 2, 2, 2, 2, 3]);
+
+ let rng = &mut thread_rng();
+
+ for _ in 0..100 {
+ let limit: u32 = rng.gen_range(1..21);
+ let left_len: usize = rng.gen_range(0..20);
+ let right_len: usize = rng.gen_range(0..20);
+
+ let mut left = rng
+ .sample_iter(&Uniform::new(0, limit))
+ .take(left_len)
+ .collect::<Vec<_>>();
+ let mut right = rng
+ .sample_iter(&Uniform::new(0, limit))
+ .take(right_len)
+ .collect::<Vec<_>>();
+
+ left.sort();
+ right.sort();
+ check(&left, &right);
+ }
+ }
+}
diff --git a/third_party/rust/rayon/src/slice/mod.rs b/third_party/rust/rayon/src/slice/mod.rs
new file mode 100644
index 0000000000..dab56deb32
--- /dev/null
+++ b/third_party/rust/rayon/src/slice/mod.rs
@@ -0,0 +1,1041 @@
+//! Parallel iterator types for [slices][std::slice]
+//!
+//! You will rarely need to interact with this module directly unless you need
+//! to name one of the iterator types.
+//!
+//! [std::slice]: https://doc.rust-lang.org/stable/std/slice/
+
+mod chunks;
+mod mergesort;
+mod quicksort;
+mod rchunks;
+
+mod test;
+
+use self::mergesort::par_mergesort;
+use self::quicksort::par_quicksort;
+use crate::iter::plumbing::*;
+use crate::iter::*;
+use crate::split_producer::*;
+use std::cmp;
+use std::cmp::Ordering;
+use std::fmt::{self, Debug};
+use std::mem;
+
+pub use self::chunks::{Chunks, ChunksExact, ChunksExactMut, ChunksMut};
+pub use self::rchunks::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
+
+/// Parallel extensions for slices.
+pub trait ParallelSlice<T: Sync> {
+ /// Returns a plain slice, which is used to implement the rest of the
+ /// parallel methods.
+ fn as_parallel_slice(&self) -> &[T];
+
+ /// Returns a parallel iterator over subslices separated by elements that
+ /// match the separator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ /// let smallest = [1, 2, 3, 0, 2, 4, 8, 0, 3, 6, 9]
+ /// .par_split(|i| *i == 0)
+ /// .map(|numbers| numbers.iter().min().unwrap())
+ /// .min();
+ /// assert_eq!(Some(&1), smallest);
+ /// ```
+ fn par_split<P>(&self, separator: P) -> Split<'_, T, P>
+ where
+ P: Fn(&T) -> bool + Sync + Send,
+ {
+ Split {
+ slice: self.as_parallel_slice(),
+ separator,
+ }
+ }
+
+ /// Returns a parallel iterator over all contiguous windows of length
+ /// `window_size`. The windows overlap.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ /// let windows: Vec<_> = [1, 2, 3].par_windows(2).collect();
+ /// assert_eq!(vec![[1, 2], [2, 3]], windows);
+ /// ```
+ fn par_windows(&self, window_size: usize) -> Windows<'_, T> {
+ Windows {
+ window_size,
+ slice: self.as_parallel_slice(),
+ }
+ }
+
+ /// Returns a parallel iterator over at most `chunk_size` elements of
+ /// `self` at a time. The chunks do not overlap.
+ ///
+ /// If the number of elements in the iterator is not divisible by
+ /// `chunk_size`, the last chunk may be shorter than `chunk_size`. All
+ /// other chunks will have that exact length.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ /// let chunks: Vec<_> = [1, 2, 3, 4, 5].par_chunks(2).collect();
+ /// assert_eq!(chunks, vec![&[1, 2][..], &[3, 4], &[5]]);
+ /// ```
+ #[track_caller]
+ fn par_chunks(&self, chunk_size: usize) -> Chunks<'_, T> {
+ assert!(chunk_size != 0, "chunk_size must not be zero");
+ Chunks::new(chunk_size, self.as_parallel_slice())
+ }
+
+ /// Returns a parallel iterator over `chunk_size` elements of
+ /// `self` at a time. The chunks do not overlap.
+ ///
+ /// If `chunk_size` does not divide the length of the slice, then the
+ /// last up to `chunk_size-1` elements will be omitted and can be
+ /// retrieved from the remainder function of the iterator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ /// let chunks: Vec<_> = [1, 2, 3, 4, 5].par_chunks_exact(2).collect();
+ /// assert_eq!(chunks, vec![&[1, 2][..], &[3, 4]]);
+ /// ```
+ #[track_caller]
+ fn par_chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> {
+ assert!(chunk_size != 0, "chunk_size must not be zero");
+ ChunksExact::new(chunk_size, self.as_parallel_slice())
+ }
+
+ /// Returns a parallel iterator over at most `chunk_size` elements of `self` at a time,
+ /// starting at the end. The chunks do not overlap.
+ ///
+ /// If the number of elements in the iterator is not divisible by
+ /// `chunk_size`, the last chunk may be shorter than `chunk_size`. All
+ /// other chunks will have that exact length.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ /// let chunks: Vec<_> = [1, 2, 3, 4, 5].par_rchunks(2).collect();
+ /// assert_eq!(chunks, vec![&[4, 5][..], &[2, 3], &[1]]);
+ /// ```
+ #[track_caller]
+ fn par_rchunks(&self, chunk_size: usize) -> RChunks<'_, T> {
+ assert!(chunk_size != 0, "chunk_size must not be zero");
+ RChunks::new(chunk_size, self.as_parallel_slice())
+ }
+
+ /// Returns a parallel iterator over `chunk_size` elements of `self` at a time,
+ /// starting at the end. The chunks do not overlap.
+ ///
+ /// If `chunk_size` does not divide the length of the slice, then the
+ /// last up to `chunk_size-1` elements will be omitted and can be
+ /// retrieved from the remainder function of the iterator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ /// let chunks: Vec<_> = [1, 2, 3, 4, 5].par_rchunks_exact(2).collect();
+ /// assert_eq!(chunks, vec![&[4, 5][..], &[2, 3]]);
+ /// ```
+ #[track_caller]
+ fn par_rchunks_exact(&self, chunk_size: usize) -> RChunksExact<'_, T> {
+ assert!(chunk_size != 0, "chunk_size must not be zero");
+ RChunksExact::new(chunk_size, self.as_parallel_slice())
+ }
+}
+
+impl<T: Sync> ParallelSlice<T> for [T] {
+ #[inline]
+ fn as_parallel_slice(&self) -> &[T] {
+ self
+ }
+}
+
+/// Parallel extensions for mutable slices.
+pub trait ParallelSliceMut<T: Send> {
+ /// Returns a plain mutable slice, which is used to implement the rest of
+ /// the parallel methods.
+ fn as_parallel_slice_mut(&mut self) -> &mut [T];
+
+ /// Returns a parallel iterator over mutable subslices separated by
+ /// elements that match the separator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ /// let mut array = [1, 2, 3, 0, 2, 4, 8, 0, 3, 6, 9];
+ /// array.par_split_mut(|i| *i == 0)
+ /// .for_each(|slice| slice.reverse());
+ /// assert_eq!(array, [3, 2, 1, 0, 8, 4, 2, 0, 9, 6, 3]);
+ /// ```
+ fn par_split_mut<P>(&mut self, separator: P) -> SplitMut<'_, T, P>
+ where
+ P: Fn(&T) -> bool + Sync + Send,
+ {
+ SplitMut {
+ slice: self.as_parallel_slice_mut(),
+ separator,
+ }
+ }
+
+ /// Returns a parallel iterator over at most `chunk_size` elements of
+ /// `self` at a time. The chunks are mutable and do not overlap.
+ ///
+ /// If the number of elements in the iterator is not divisible by
+ /// `chunk_size`, the last chunk may be shorter than `chunk_size`. All
+ /// other chunks will have that exact length.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ /// let mut array = [1, 2, 3, 4, 5];
+ /// array.par_chunks_mut(2)
+ /// .for_each(|slice| slice.reverse());
+ /// assert_eq!(array, [2, 1, 4, 3, 5]);
+ /// ```
+ #[track_caller]
+ fn par_chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> {
+ assert!(chunk_size != 0, "chunk_size must not be zero");
+ ChunksMut::new(chunk_size, self.as_parallel_slice_mut())
+ }
+
+ /// Returns a parallel iterator over `chunk_size` elements of
+ /// `self` at a time. The chunks are mutable and do not overlap.
+ ///
+ /// If `chunk_size` does not divide the length of the slice, then the
+ /// last up to `chunk_size-1` elements will be omitted and can be
+ /// retrieved from the remainder function of the iterator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ /// let mut array = [1, 2, 3, 4, 5];
+ /// array.par_chunks_exact_mut(3)
+ /// .for_each(|slice| slice.reverse());
+ /// assert_eq!(array, [3, 2, 1, 4, 5]);
+ /// ```
+ #[track_caller]
+ fn par_chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> {
+ assert!(chunk_size != 0, "chunk_size must not be zero");
+ ChunksExactMut::new(chunk_size, self.as_parallel_slice_mut())
+ }
+
+ /// Returns a parallel iterator over at most `chunk_size` elements of `self` at a time,
+ /// starting at the end. The chunks are mutable and do not overlap.
+ ///
+ /// If the number of elements in the iterator is not divisible by
+ /// `chunk_size`, the last chunk may be shorter than `chunk_size`. All
+ /// other chunks will have that exact length.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ /// let mut array = [1, 2, 3, 4, 5];
+ /// array.par_rchunks_mut(2)
+ /// .for_each(|slice| slice.reverse());
+ /// assert_eq!(array, [1, 3, 2, 5, 4]);
+ /// ```
+ #[track_caller]
+ fn par_rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<'_, T> {
+ assert!(chunk_size != 0, "chunk_size must not be zero");
+ RChunksMut::new(chunk_size, self.as_parallel_slice_mut())
+ }
+
+ /// Returns a parallel iterator over `chunk_size` elements of `self` at a time,
+ /// starting at the end. The chunks are mutable and do not overlap.
+ ///
+ /// If `chunk_size` does not divide the length of the slice, then the
+ /// last up to `chunk_size-1` elements will be omitted and can be
+ /// retrieved from the remainder function of the iterator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ /// let mut array = [1, 2, 3, 4, 5];
+ /// array.par_rchunks_exact_mut(3)
+ /// .for_each(|slice| slice.reverse());
+ /// assert_eq!(array, [1, 2, 5, 4, 3]);
+ /// ```
+ #[track_caller]
+ fn par_rchunks_exact_mut(&mut self, chunk_size: usize) -> RChunksExactMut<'_, T> {
+ assert!(chunk_size != 0, "chunk_size must not be zero");
+ RChunksExactMut::new(chunk_size, self.as_parallel_slice_mut())
+ }
+
+ /// Sorts the slice in parallel.
+ ///
+ /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
+ ///
+ /// When applicable, unstable sorting is preferred because it is generally faster than stable
+ /// sorting and it doesn't allocate auxiliary memory.
+ /// See [`par_sort_unstable`](#method.par_sort_unstable).
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is an adaptive merge sort inspired by
+ /// [timsort](https://en.wikipedia.org/wiki/Timsort).
+ /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
+ /// two or more sorted sequences concatenated one after another.
+ ///
+ /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
+ /// non-allocating insertion sort is used instead.
+ ///
+ /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
+ /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
+ /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
+ /// parallel subdivision of chunks and parallel merge operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ ///
+ /// let mut v = [-5, 4, 1, -3, 2];
+ ///
+ /// v.par_sort();
+ /// assert_eq!(v, [-5, -3, 1, 2, 4]);
+ /// ```
+ fn par_sort(&mut self)
+ where
+ T: Ord,
+ {
+ par_mergesort(self.as_parallel_slice_mut(), T::lt);
+ }
+
+ /// Sorts the slice in parallel with a comparator function.
+ ///
+ /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
+ ///
+ /// The comparator function must define a total ordering for the elements in the slice. If
+ /// the ordering is not total, the order of the elements is unspecified. An order is a
+ /// total order if it is (for all `a`, `b` and `c`):
+ ///
+ /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
+ /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
+ ///
+ /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
+ /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ ///
+ /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
+ /// floats.par_sort_by(|a, b| a.partial_cmp(b).unwrap());
+ /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
+ /// ```
+ ///
+ /// When applicable, unstable sorting is preferred because it is generally faster than stable
+ /// sorting and it doesn't allocate auxiliary memory.
+ /// See [`par_sort_unstable_by`](#method.par_sort_unstable_by).
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is an adaptive merge sort inspired by
+ /// [timsort](https://en.wikipedia.org/wiki/Timsort).
+ /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
+ /// two or more sorted sequences concatenated one after another.
+ ///
+ /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
+ /// non-allocating insertion sort is used instead.
+ ///
+ /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
+ /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
+ /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
+ /// parallel subdivision of chunks and parallel merge operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ ///
+ /// let mut v = [5, 4, 1, 3, 2];
+ /// v.par_sort_by(|a, b| a.cmp(b));
+ /// assert_eq!(v, [1, 2, 3, 4, 5]);
+ ///
+ /// // reverse sorting
+ /// v.par_sort_by(|a, b| b.cmp(a));
+ /// assert_eq!(v, [5, 4, 3, 2, 1]);
+ /// ```
+ fn par_sort_by<F>(&mut self, compare: F)
+ where
+ F: Fn(&T, &T) -> Ordering + Sync,
+ {
+ par_mergesort(self.as_parallel_slice_mut(), |a, b| {
+ compare(a, b) == Ordering::Less
+ });
+ }
+
+ /// Sorts the slice in parallel with a key extraction function.
+ ///
+ /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* \* log(*n*))
+ /// worst-case, where the key function is *O*(*m*).
+ ///
+ /// For expensive key functions (e.g. functions that are not simple property accesses or
+ /// basic operations), [`par_sort_by_cached_key`](#method.par_sort_by_cached_key) is likely to
+ /// be significantly faster, as it does not recompute element keys.
+ ///
+ /// When applicable, unstable sorting is preferred because it is generally faster than stable
+ /// sorting and it doesn't allocate auxiliary memory.
+ /// See [`par_sort_unstable_by_key`](#method.par_sort_unstable_by_key).
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is an adaptive merge sort inspired by
+ /// [timsort](https://en.wikipedia.org/wiki/Timsort).
+ /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
+ /// two or more sorted sequences concatenated one after another.
+ ///
+ /// Also, it allocates temporary storage the same size as `self`, but for very short slices a
+ /// non-allocating insertion sort is used instead.
+ ///
+ /// In order to sort the slice in parallel, the slice is first divided into smaller chunks and
+ /// all chunks are sorted in parallel. Then, adjacent chunks that together form non-descending
+ /// or descending runs are concatenated. Finally, the remaining chunks are merged together using
+ /// parallel subdivision of chunks and parallel merge operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ ///
+ /// let mut v = [-5i32, 4, 1, -3, 2];
+ ///
+ /// v.par_sort_by_key(|k| k.abs());
+ /// assert_eq!(v, [1, 2, -3, 4, -5]);
+ /// ```
+ fn par_sort_by_key<K, F>(&mut self, f: F)
+ where
+ K: Ord,
+ F: Fn(&T) -> K + Sync,
+ {
+ par_mergesort(self.as_parallel_slice_mut(), |a, b| f(a).lt(&f(b)));
+ }
+
+ /// Sorts the slice in parallel with a key extraction function.
+ ///
+ /// During sorting, the key function is called at most once per element, by using
+ /// temporary storage to remember the results of key evaluation.
+ /// The key function is called in parallel, so the order of calls is completely unspecified.
+ ///
+ /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* + *n* \* log(*n*))
+ /// worst-case, where the key function is *O*(*m*).
+ ///
+ /// For simple key functions (e.g., functions that are property accesses or
+ /// basic operations), [`par_sort_by_key`](#method.par_sort_by_key) is likely to be
+ /// faster.
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
+ /// which combines the fast average case of randomized quicksort with the fast worst case of
+ /// heapsort, while achieving linear time on slices with certain patterns. It uses some
+ /// randomization to avoid degenerate cases, but with a fixed seed to always provide
+ /// deterministic behavior.
+ ///
+ /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the
+ /// length of the slice.
+ ///
+ /// All quicksorts work in two stages: partitioning into two halves followed by recursive
+ /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
+ /// parallel. Finally, after sorting the cached keys, the item positions are updated sequentially.
+ ///
+ /// [pdqsort]: https://github.com/orlp/pdqsort
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ ///
+ /// let mut v = [-5i32, 4, 32, -3, 2];
+ ///
+ /// v.par_sort_by_cached_key(|k| k.to_string());
+ /// assert!(v == [-3, -5, 2, 32, 4]);
+ /// ```
+ fn par_sort_by_cached_key<K, F>(&mut self, f: F)
+ where
+ F: Fn(&T) -> K + Sync,
+ K: Ord + Send,
+ {
+ let slice = self.as_parallel_slice_mut();
+ let len = slice.len();
+ if len < 2 {
+ return;
+ }
+
+ // Helper macro for indexing our vector by the smallest possible type, to reduce allocation.
+ macro_rules! sort_by_key {
+ ($t:ty) => {{
+ let mut indices: Vec<_> = slice
+ .par_iter_mut()
+ .enumerate()
+ .map(|(i, x)| (f(&*x), i as $t))
+ .collect();
+ // The elements of `indices` are unique, as they are indexed, so any sort will be
+ // stable with respect to the original slice. We use `sort_unstable` here because
+ // it requires less memory allocation.
+ indices.par_sort_unstable();
+ for i in 0..len {
+ let mut index = indices[i].1;
+ while (index as usize) < i {
+ index = indices[index as usize].1;
+ }
+ indices[i].1 = index;
+ slice.swap(i, index as usize);
+ }
+ }};
+ }
+
+ let sz_u8 = mem::size_of::<(K, u8)>();
+ let sz_u16 = mem::size_of::<(K, u16)>();
+ let sz_u32 = mem::size_of::<(K, u32)>();
+ let sz_usize = mem::size_of::<(K, usize)>();
+
+ if sz_u8 < sz_u16 && len <= (std::u8::MAX as usize) {
+ return sort_by_key!(u8);
+ }
+ if sz_u16 < sz_u32 && len <= (std::u16::MAX as usize) {
+ return sort_by_key!(u16);
+ }
+ if sz_u32 < sz_usize && len <= (std::u32::MAX as usize) {
+ return sort_by_key!(u32);
+ }
+ sort_by_key!(usize)
+ }
+
+ /// Sorts the slice in parallel, but might not preserve the order of equal elements.
+ ///
+ /// This sort is unstable (i.e., may reorder equal elements), in-place
+ /// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
+ /// which combines the fast average case of randomized quicksort with the fast worst case of
+ /// heapsort, while achieving linear time on slices with certain patterns. It uses some
+ /// randomization to avoid degenerate cases, but with a fixed seed to always provide
+ /// deterministic behavior.
+ ///
+ /// It is typically faster than stable sorting, except in a few special cases, e.g., when the
+ /// slice consists of several concatenated sorted sequences.
+ ///
+ /// All quicksorts work in two stages: partitioning into two halves followed by recursive
+ /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
+ /// parallel.
+ ///
+ /// [pdqsort]: https://github.com/orlp/pdqsort
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ ///
+ /// let mut v = [-5, 4, 1, -3, 2];
+ ///
+ /// v.par_sort_unstable();
+ /// assert_eq!(v, [-5, -3, 1, 2, 4]);
+ /// ```
+ fn par_sort_unstable(&mut self)
+ where
+ T: Ord,
+ {
+ par_quicksort(self.as_parallel_slice_mut(), T::lt);
+ }
+
+ /// Sorts the slice in parallel with a comparator function, but might not preserve the order of
+ /// equal elements.
+ ///
+ /// This sort is unstable (i.e., may reorder equal elements), in-place
+ /// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
+ ///
+ /// The comparator function must define a total ordering for the elements in the slice. If
+ /// the ordering is not total, the order of the elements is unspecified. An order is a
+ /// total order if it is (for all `a`, `b` and `c`):
+ ///
+ /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
+ /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
+ ///
+ /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
+ /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ ///
+ /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
+ /// floats.par_sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
+ /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
+ /// ```
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
+ /// which combines the fast average case of randomized quicksort with the fast worst case of
+ /// heapsort, while achieving linear time on slices with certain patterns. It uses some
+ /// randomization to avoid degenerate cases, but with a fixed seed to always provide
+ /// deterministic behavior.
+ ///
+ /// It is typically faster than stable sorting, except in a few special cases, e.g., when the
+ /// slice consists of several concatenated sorted sequences.
+ ///
+ /// All quicksorts work in two stages: partitioning into two halves followed by recursive
+ /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
+ /// parallel.
+ ///
+ /// [pdqsort]: https://github.com/orlp/pdqsort
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ ///
+ /// let mut v = [5, 4, 1, 3, 2];
+ /// v.par_sort_unstable_by(|a, b| a.cmp(b));
+ /// assert_eq!(v, [1, 2, 3, 4, 5]);
+ ///
+ /// // reverse sorting
+ /// v.par_sort_unstable_by(|a, b| b.cmp(a));
+ /// assert_eq!(v, [5, 4, 3, 2, 1]);
+ /// ```
+ fn par_sort_unstable_by<F>(&mut self, compare: F)
+ where
+ F: Fn(&T, &T) -> Ordering + Sync,
+ {
+ par_quicksort(self.as_parallel_slice_mut(), |a, b| {
+ compare(a, b) == Ordering::Less
+ });
+ }
+
+ /// Sorts the slice in parallel with a key extraction function, but might not preserve the order
+ /// of equal elements.
+ ///
+ /// This sort is unstable (i.e., may reorder equal elements), in-place
+ /// (i.e., does not allocate), and *O*(m \* *n* \* log(*n*)) worst-case,
+ /// where the key function is *O*(*m*).
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
+ /// which combines the fast average case of randomized quicksort with the fast worst case of
+ /// heapsort, while achieving linear time on slices with certain patterns. It uses some
+ /// randomization to avoid degenerate cases, but with a fixed seed to always provide
+ /// deterministic behavior.
+ ///
+ /// Due to its key calling strategy, `par_sort_unstable_by_key` is likely to be slower than
+ /// [`par_sort_by_cached_key`](#method.par_sort_by_cached_key) in cases where the key function
+ /// is expensive.
+ ///
+ /// All quicksorts work in two stages: partitioning into two halves followed by recursive
+ /// calls. The partitioning phase is sequential, but the two recursive calls are performed in
+ /// parallel.
+ ///
+ /// [pdqsort]: https://github.com/orlp/pdqsort
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use rayon::prelude::*;
+ ///
+ /// let mut v = [-5i32, 4, 1, -3, 2];
+ ///
+ /// v.par_sort_unstable_by_key(|k| k.abs());
+ /// assert_eq!(v, [1, 2, -3, 4, -5]);
+ /// ```
+ fn par_sort_unstable_by_key<K, F>(&mut self, f: F)
+ where
+ K: Ord,
+ F: Fn(&T) -> K + Sync,
+ {
+ par_quicksort(self.as_parallel_slice_mut(), |a, b| f(a).lt(&f(b)));
+ }
+}
+
+impl<T: Send> ParallelSliceMut<T> for [T] {
+ #[inline]
+ fn as_parallel_slice_mut(&mut self) -> &mut [T] {
+ self
+ }
+}
+
+impl<'data, T: Sync + 'data> IntoParallelIterator for &'data [T] {
+ type Item = &'data T;
+ type Iter = Iter<'data, T>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ Iter { slice: self }
+ }
+}
+
+impl<'data, T: Send + 'data> IntoParallelIterator for &'data mut [T] {
+ type Item = &'data mut T;
+ type Iter = IterMut<'data, T>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ IterMut { slice: self }
+ }
+}
+
+/// Parallel iterator over immutable items in a slice
+#[derive(Debug)]
+pub struct Iter<'data, T: Sync> {
+ slice: &'data [T],
+}
+
+impl<'data, T: Sync> Clone for Iter<'data, T> {
+ fn clone(&self) -> Self {
+ Iter { ..*self }
+ }
+}
+
+impl<'data, T: Sync + 'data> ParallelIterator for Iter<'data, T> {
+ type Item = &'data T;
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn opt_len(&self) -> Option<usize> {
+ Some(self.len())
+ }
+}
+
+impl<'data, T: Sync + 'data> IndexedParallelIterator for Iter<'data, T> {
+ fn drive<C>(self, consumer: C) -> C::Result
+ where
+ C: Consumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn len(&self) -> usize {
+ self.slice.len()
+ }
+
+ fn with_producer<CB>(self, callback: CB) -> CB::Output
+ where
+ CB: ProducerCallback<Self::Item>,
+ {
+ callback.callback(IterProducer { slice: self.slice })
+ }
+}
+
+struct IterProducer<'data, T: Sync> {
+ slice: &'data [T],
+}
+
+impl<'data, T: 'data + Sync> Producer for IterProducer<'data, T> {
+ type Item = &'data T;
+ type IntoIter = ::std::slice::Iter<'data, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.slice.iter()
+ }
+
+ fn split_at(self, index: usize) -> (Self, Self) {
+ let (left, right) = self.slice.split_at(index);
+ (IterProducer { slice: left }, IterProducer { slice: right })
+ }
+}
+
+/// Parallel iterator over immutable overlapping windows of a slice
+#[derive(Debug)]
+pub struct Windows<'data, T: Sync> {
+ window_size: usize,
+ slice: &'data [T],
+}
+
+impl<'data, T: Sync> Clone for Windows<'data, T> {
+ fn clone(&self) -> Self {
+ Windows { ..*self }
+ }
+}
+
+impl<'data, T: Sync + 'data> ParallelIterator for Windows<'data, T> {
+ type Item = &'data [T];
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn opt_len(&self) -> Option<usize> {
+ Some(self.len())
+ }
+}
+
+impl<'data, T: Sync + 'data> IndexedParallelIterator for Windows<'data, T> {
+ fn drive<C>(self, consumer: C) -> C::Result
+ where
+ C: Consumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn len(&self) -> usize {
+ assert!(self.window_size >= 1);
+ self.slice.len().saturating_sub(self.window_size - 1)
+ }
+
+ fn with_producer<CB>(self, callback: CB) -> CB::Output
+ where
+ CB: ProducerCallback<Self::Item>,
+ {
+ callback.callback(WindowsProducer {
+ window_size: self.window_size,
+ slice: self.slice,
+ })
+ }
+}
+
+struct WindowsProducer<'data, T: Sync> {
+ window_size: usize,
+ slice: &'data [T],
+}
+
+impl<'data, T: 'data + Sync> Producer for WindowsProducer<'data, T> {
+ type Item = &'data [T];
+ type IntoIter = ::std::slice::Windows<'data, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.slice.windows(self.window_size)
+ }
+
+ fn split_at(self, index: usize) -> (Self, Self) {
+ let left_index = cmp::min(self.slice.len(), index + (self.window_size - 1));
+ let left = &self.slice[..left_index];
+ let right = &self.slice[index..];
+ (
+ WindowsProducer {
+ window_size: self.window_size,
+ slice: left,
+ },
+ WindowsProducer {
+ window_size: self.window_size,
+ slice: right,
+ },
+ )
+ }
+}
+
+/// Parallel iterator over mutable items in a slice
+#[derive(Debug)]
+pub struct IterMut<'data, T: Send> {
+ slice: &'data mut [T],
+}
+
+impl<'data, T: Send + 'data> ParallelIterator for IterMut<'data, T> {
+ type Item = &'data mut T;
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn opt_len(&self) -> Option<usize> {
+ Some(self.len())
+ }
+}
+
+impl<'data, T: Send + 'data> IndexedParallelIterator for IterMut<'data, T> {
+ fn drive<C>(self, consumer: C) -> C::Result
+ where
+ C: Consumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn len(&self) -> usize {
+ self.slice.len()
+ }
+
+ fn with_producer<CB>(self, callback: CB) -> CB::Output
+ where
+ CB: ProducerCallback<Self::Item>,
+ {
+ callback.callback(IterMutProducer { slice: self.slice })
+ }
+}
+
+struct IterMutProducer<'data, T: Send> {
+ slice: &'data mut [T],
+}
+
+impl<'data, T: 'data + Send> Producer for IterMutProducer<'data, T> {
+ type Item = &'data mut T;
+ type IntoIter = ::std::slice::IterMut<'data, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.slice.iter_mut()
+ }
+
+ fn split_at(self, index: usize) -> (Self, Self) {
+ let (left, right) = self.slice.split_at_mut(index);
+ (
+ IterMutProducer { slice: left },
+ IterMutProducer { slice: right },
+ )
+ }
+}
+
+/// Parallel iterator over slices separated by a predicate
+pub struct Split<'data, T, P> {
+ slice: &'data [T],
+ separator: P,
+}
+
+impl<'data, T, P: Clone> Clone for Split<'data, T, P> {
+ fn clone(&self) -> Self {
+ Split {
+ separator: self.separator.clone(),
+ ..*self
+ }
+ }
+}
+
+impl<'data, T: Debug, P> Debug for Split<'data, T, P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Split").field("slice", &self.slice).finish()
+ }
+}
+
+impl<'data, T, P> ParallelIterator for Split<'data, T, P>
+where
+ P: Fn(&T) -> bool + Sync + Send,
+ T: Sync,
+{
+ type Item = &'data [T];
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ let producer = SplitProducer::new(self.slice, &self.separator);
+ bridge_unindexed(producer, consumer)
+ }
+}
+
+/// Implement support for `SplitProducer`.
+impl<'data, T, P> Fissile<P> for &'data [T]
+where
+ P: Fn(&T) -> bool,
+{
+ fn length(&self) -> usize {
+ self.len()
+ }
+
+ fn midpoint(&self, end: usize) -> usize {
+ end / 2
+ }
+
+ fn find(&self, separator: &P, start: usize, end: usize) -> Option<usize> {
+ self[start..end].iter().position(separator)
+ }
+
+ fn rfind(&self, separator: &P, end: usize) -> Option<usize> {
+ self[..end].iter().rposition(separator)
+ }
+
+ fn split_once(self, index: usize) -> (Self, Self) {
+ let (left, right) = self.split_at(index);
+ (left, &right[1..]) // skip the separator
+ }
+
+ fn fold_splits<F>(self, separator: &P, folder: F, skip_last: bool) -> F
+ where
+ F: Folder<Self>,
+ Self: Send,
+ {
+ let mut split = self.split(separator);
+ if skip_last {
+ split.next_back();
+ }
+ folder.consume_iter(split)
+ }
+}
+
+/// Parallel iterator over mutable slices separated by a predicate
+pub struct SplitMut<'data, T, P> {
+ slice: &'data mut [T],
+ separator: P,
+}
+
+impl<'data, T: Debug, P> Debug for SplitMut<'data, T, P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SplitMut")
+ .field("slice", &self.slice)
+ .finish()
+ }
+}
+
+impl<'data, T, P> ParallelIterator for SplitMut<'data, T, P>
+where
+ P: Fn(&T) -> bool + Sync + Send,
+ T: Send,
+{
+ type Item = &'data mut [T];
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ let producer = SplitProducer::new(self.slice, &self.separator);
+ bridge_unindexed(producer, consumer)
+ }
+}
+
+/// Implement support for `SplitProducer`.
+impl<'data, T, P> Fissile<P> for &'data mut [T]
+where
+ P: Fn(&T) -> bool,
+{
+ fn length(&self) -> usize {
+ self.len()
+ }
+
+ fn midpoint(&self, end: usize) -> usize {
+ end / 2
+ }
+
+ fn find(&self, separator: &P, start: usize, end: usize) -> Option<usize> {
+ self[start..end].iter().position(separator)
+ }
+
+ fn rfind(&self, separator: &P, end: usize) -> Option<usize> {
+ self[..end].iter().rposition(separator)
+ }
+
+ fn split_once(self, index: usize) -> (Self, Self) {
+ let (left, right) = self.split_at_mut(index);
+ (left, &mut right[1..]) // skip the separator
+ }
+
+ fn fold_splits<F>(self, separator: &P, folder: F, skip_last: bool) -> F
+ where
+ F: Folder<Self>,
+ Self: Send,
+ {
+ let mut split = self.split_mut(separator);
+ if skip_last {
+ split.next_back();
+ }
+ folder.consume_iter(split)
+ }
+}
diff --git a/third_party/rust/rayon/src/slice/quicksort.rs b/third_party/rust/rayon/src/slice/quicksort.rs
new file mode 100644
index 0000000000..d71079ec3b
--- /dev/null
+++ b/third_party/rust/rayon/src/slice/quicksort.rs
@@ -0,0 +1,897 @@
+//! Parallel quicksort.
+//!
+//! This implementation is copied verbatim from `std::slice::sort_unstable` and then parallelized.
+//! The only difference from the original is that calls to `recurse` are executed in parallel using
+//! `rayon_core::join`.
+
+use std::cmp;
+use std::mem::{self, MaybeUninit};
+use std::ptr;
+
+/// When dropped, copies from `src` into `dest`.
+struct CopyOnDrop<T> {
+ src: *const T,
+ dest: *mut T,
+}
+
+impl<T> Drop for CopyOnDrop<T> {
+ fn drop(&mut self) {
+ // SAFETY: This is a helper class.
+ // Please refer to its usage for correctness.
+ // Namely, one must be sure that `src` and `dst` does not overlap as required by `ptr::copy_nonoverlapping`.
+ unsafe {
+ ptr::copy_nonoverlapping(self.src, self.dest, 1);
+ }
+ }
+}
+
+/// Shifts the first element to the right until it encounters a greater or equal element.
+fn shift_head<T, F>(v: &mut [T], is_less: &F)
+where
+ F: Fn(&T, &T) -> bool,
+{
+ let len = v.len();
+ // SAFETY: The unsafe operations below involves indexing without a bounds check (by offsetting a
+ // pointer) and copying memory (`ptr::copy_nonoverlapping`).
+ //
+ // a. Indexing:
+ // 1. We checked the size of the array to >=2.
+ // 2. All the indexing that we will do is always between {0 <= index < len} at most.
+ //
+ // b. Memory copying
+ // 1. We are obtaining pointers to references which are guaranteed to be valid.
+ // 2. They cannot overlap because we obtain pointers to difference indices of the slice.
+ // Namely, `i` and `i-1`.
+ // 3. If the slice is properly aligned, the elements are properly aligned.
+ // It is the caller's responsibility to make sure the slice is properly aligned.
+ //
+ // See comments below for further detail.
+ unsafe {
+ // If the first two elements are out-of-order...
+ if len >= 2 && is_less(v.get_unchecked(1), v.get_unchecked(0)) {
+ // Read the first element into a stack-allocated variable. If a following comparison
+ // operation panics, `hole` will get dropped and automatically write the element back
+ // into the slice.
+ let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(0)));
+ let v = v.as_mut_ptr();
+ let mut hole = CopyOnDrop {
+ src: &*tmp,
+ dest: v.add(1),
+ };
+ ptr::copy_nonoverlapping(v.add(1), v.add(0), 1);
+
+ for i in 2..len {
+ if !is_less(&*v.add(i), &*tmp) {
+ break;
+ }
+
+ // Move `i`-th element one place to the left, thus shifting the hole to the right.
+ ptr::copy_nonoverlapping(v.add(i), v.add(i - 1), 1);
+ hole.dest = v.add(i);
+ }
+ // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
+ }
+ }
+}
+
+/// Shifts the last element to the left until it encounters a smaller or equal element.
+fn shift_tail<T, F>(v: &mut [T], is_less: &F)
+where
+ F: Fn(&T, &T) -> bool,
+{
+ let len = v.len();
+ // SAFETY: The unsafe operations below involves indexing without a bound check (by offsetting a
+ // pointer) and copying memory (`ptr::copy_nonoverlapping`).
+ //
+ // a. Indexing:
+ // 1. We checked the size of the array to >= 2.
+ // 2. All the indexing that we will do is always between `0 <= index < len-1` at most.
+ //
+ // b. Memory copying
+ // 1. We are obtaining pointers to references which are guaranteed to be valid.
+ // 2. They cannot overlap because we obtain pointers to difference indices of the slice.
+ // Namely, `i` and `i+1`.
+ // 3. If the slice is properly aligned, the elements are properly aligned.
+ // It is the caller's responsibility to make sure the slice is properly aligned.
+ //
+ // See comments below for further detail.
+ unsafe {
+ // If the last two elements are out-of-order...
+ if len >= 2 && is_less(v.get_unchecked(len - 1), v.get_unchecked(len - 2)) {
+ // Read the last element into a stack-allocated variable. If a following comparison
+ // operation panics, `hole` will get dropped and automatically write the element back
+ // into the slice.
+ let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(len - 1)));
+ let v = v.as_mut_ptr();
+ let mut hole = CopyOnDrop {
+ src: &*tmp,
+ dest: v.add(len - 2),
+ };
+ ptr::copy_nonoverlapping(v.add(len - 2), v.add(len - 1), 1);
+
+ for i in (0..len - 2).rev() {
+ if !is_less(&*tmp, &*v.add(i)) {
+ break;
+ }
+
+ // Move `i`-th element one place to the right, thus shifting the hole to the left.
+ ptr::copy_nonoverlapping(v.add(i), v.add(i + 1), 1);
+ hole.dest = v.add(i);
+ }
+ // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
+ }
+ }
+}
+
+/// Partially sorts a slice by shifting several out-of-order elements around.
+///
+/// Returns `true` if the slice is sorted at the end. This function is *O*(*n*) worst-case.
+#[cold]
+fn partial_insertion_sort<T, F>(v: &mut [T], is_less: &F) -> bool
+where
+ F: Fn(&T, &T) -> bool,
+{
+ // Maximum number of adjacent out-of-order pairs that will get shifted.
+ const MAX_STEPS: usize = 5;
+ // If the slice is shorter than this, don't shift any elements.
+ const SHORTEST_SHIFTING: usize = 50;
+
+ let len = v.len();
+ let mut i = 1;
+
+ for _ in 0..MAX_STEPS {
+ // SAFETY: We already explicitly did the bound checking with `i < len`.
+ // All our subsequent indexing is only in the range `0 <= index < len`
+ unsafe {
+ // Find the next pair of adjacent out-of-order elements.
+ while i < len && !is_less(v.get_unchecked(i), v.get_unchecked(i - 1)) {
+ i += 1;
+ }
+ }
+
+ // Are we done?
+ if i == len {
+ return true;
+ }
+
+ // Don't shift elements on short arrays, that has a performance cost.
+ if len < SHORTEST_SHIFTING {
+ return false;
+ }
+
+ // Swap the found pair of elements. This puts them in correct order.
+ v.swap(i - 1, i);
+
+ // Shift the smaller element to the left.
+ shift_tail(&mut v[..i], is_less);
+ // Shift the greater element to the right.
+ shift_head(&mut v[i..], is_less);
+ }
+
+ // Didn't manage to sort the slice in the limited number of steps.
+ false
+}
+
+/// Sorts a slice using insertion sort, which is *O*(*n*^2) worst-case.
+fn insertion_sort<T, F>(v: &mut [T], is_less: &F)
+where
+ F: Fn(&T, &T) -> bool,
+{
+ for i in 1..v.len() {
+ shift_tail(&mut v[..i + 1], is_less);
+ }
+}
+
+/// Sorts `v` using heapsort, which guarantees *O*(*n* \* log(*n*)) worst-case.
+#[cold]
+fn heapsort<T, F>(v: &mut [T], is_less: &F)
+where
+ F: Fn(&T, &T) -> bool,
+{
+ // This binary heap respects the invariant `parent >= child`.
+ let sift_down = |v: &mut [T], mut node| {
+ loop {
+ // Children of `node`.
+ let mut child = 2 * node + 1;
+ if child >= v.len() {
+ break;
+ }
+
+ // Choose the greater child.
+ if child + 1 < v.len() && is_less(&v[child], &v[child + 1]) {
+ child += 1;
+ }
+
+ // Stop if the invariant holds at `node`.
+ if !is_less(&v[node], &v[child]) {
+ break;
+ }
+
+ // Swap `node` with the greater child, move one step down, and continue sifting.
+ v.swap(node, child);
+ node = child;
+ }
+ };
+
+ // Build the heap in linear time.
+ for i in (0..v.len() / 2).rev() {
+ sift_down(v, i);
+ }
+
+ // Pop maximal elements from the heap.
+ for i in (1..v.len()).rev() {
+ v.swap(0, i);
+ sift_down(&mut v[..i], 0);
+ }
+}
+
+/// Partitions `v` into elements smaller than `pivot`, followed by elements greater than or equal
+/// to `pivot`.
+///
+/// Returns the number of elements smaller than `pivot`.
+///
+/// Partitioning is performed block-by-block in order to minimize the cost of branching operations.
+/// This idea is presented in the [BlockQuicksort][pdf] paper.
+///
+/// [pdf]: https://drops.dagstuhl.de/opus/volltexte/2016/6389/pdf/LIPIcs-ESA-2016-38.pdf
+fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &F) -> usize
+where
+ F: Fn(&T, &T) -> bool,
+{
+ // Number of elements in a typical block.
+ const BLOCK: usize = 128;
+
+ // The partitioning algorithm repeats the following steps until completion:
+ //
+ // 1. Trace a block from the left side to identify elements greater than or equal to the pivot.
+ // 2. Trace a block from the right side to identify elements smaller than the pivot.
+ // 3. Exchange the identified elements between the left and right side.
+ //
+ // We keep the following variables for a block of elements:
+ //
+ // 1. `block` - Number of elements in the block.
+ // 2. `start` - Start pointer into the `offsets` array.
+ // 3. `end` - End pointer into the `offsets` array.
+ // 4. `offsets - Indices of out-of-order elements within the block.
+
+ // The current block on the left side (from `l` to `l.add(block_l)`).
+ let mut l = v.as_mut_ptr();
+ let mut block_l = BLOCK;
+ let mut start_l = ptr::null_mut();
+ let mut end_l = ptr::null_mut();
+ let mut offsets_l = [MaybeUninit::<u8>::uninit(); BLOCK];
+
+ // The current block on the right side (from `r.sub(block_r)` to `r`).
+ // SAFETY: The documentation for .add() specifically mention that `vec.as_ptr().add(vec.len())` is always safe`
+ let mut r = unsafe { l.add(v.len()) };
+ let mut block_r = BLOCK;
+ let mut start_r = ptr::null_mut();
+ let mut end_r = ptr::null_mut();
+ let mut offsets_r = [MaybeUninit::<u8>::uninit(); BLOCK];
+
+ // FIXME: When we get VLAs, try creating one array of length `min(v.len(), 2 * BLOCK)` rather
+ // than two fixed-size arrays of length `BLOCK`. VLAs might be more cache-efficient.
+
+ // Returns the number of elements between pointers `l` (inclusive) and `r` (exclusive).
+ fn width<T>(l: *mut T, r: *mut T) -> usize {
+ assert!(mem::size_of::<T>() > 0);
+ // FIXME: this should *likely* use `offset_from`, but more
+ // investigation is needed (including running tests in miri).
+ // TODO unstable: (r.addr() - l.addr()) / mem::size_of::<T>()
+ (r as usize - l as usize) / mem::size_of::<T>()
+ }
+
+ loop {
+ // We are done with partitioning block-by-block when `l` and `r` get very close. Then we do
+ // some patch-up work in order to partition the remaining elements in between.
+ let is_done = width(l, r) <= 2 * BLOCK;
+
+ if is_done {
+ // Number of remaining elements (still not compared to the pivot).
+ let mut rem = width(l, r);
+ if start_l < end_l || start_r < end_r {
+ rem -= BLOCK;
+ }
+
+ // Adjust block sizes so that the left and right block don't overlap, but get perfectly
+ // aligned to cover the whole remaining gap.
+ if start_l < end_l {
+ block_r = rem;
+ } else if start_r < end_r {
+ block_l = rem;
+ } else {
+ // There were the same number of elements to switch on both blocks during the last
+ // iteration, so there are no remaining elements on either block. Cover the remaining
+ // items with roughly equally-sized blocks.
+ block_l = rem / 2;
+ block_r = rem - block_l;
+ }
+ debug_assert!(block_l <= BLOCK && block_r <= BLOCK);
+ debug_assert!(width(l, r) == block_l + block_r);
+ }
+
+ if start_l == end_l {
+ // Trace `block_l` elements from the left side.
+ // TODO unstable: start_l = MaybeUninit::slice_as_mut_ptr(&mut offsets_l);
+ start_l = offsets_l.as_mut_ptr() as *mut u8;
+ end_l = start_l;
+ let mut elem = l;
+
+ for i in 0..block_l {
+ // SAFETY: The unsafety operations below involve the usage of the `offset`.
+ // According to the conditions required by the function, we satisfy them because:
+ // 1. `offsets_l` is stack-allocated, and thus considered separate allocated object.
+ // 2. The function `is_less` returns a `bool`.
+ // Casting a `bool` will never overflow `isize`.
+ // 3. We have guaranteed that `block_l` will be `<= BLOCK`.
+ // Plus, `end_l` was initially set to the begin pointer of `offsets_` which was declared on the stack.
+ // Thus, we know that even in the worst case (all invocations of `is_less` returns false) we will only be at most 1 byte pass the end.
+ // Another unsafety operation here is dereferencing `elem`.
+ // However, `elem` was initially the begin pointer to the slice which is always valid.
+ unsafe {
+ // Branchless comparison.
+ *end_l = i as u8;
+ end_l = end_l.offset(!is_less(&*elem, pivot) as isize);
+ elem = elem.offset(1);
+ }
+ }
+ }
+
+ if start_r == end_r {
+ // Trace `block_r` elements from the right side.
+ // TODO unstable: start_r = MaybeUninit::slice_as_mut_ptr(&mut offsets_r);
+ start_r = offsets_r.as_mut_ptr() as *mut u8;
+ end_r = start_r;
+ let mut elem = r;
+
+ for i in 0..block_r {
+ // SAFETY: The unsafety operations below involve the usage of the `offset`.
+ // According to the conditions required by the function, we satisfy them because:
+ // 1. `offsets_r` is stack-allocated, and thus considered separate allocated object.
+ // 2. The function `is_less` returns a `bool`.
+ // Casting a `bool` will never overflow `isize`.
+ // 3. We have guaranteed that `block_r` will be `<= BLOCK`.
+ // Plus, `end_r` was initially set to the begin pointer of `offsets_` which was declared on the stack.
+ // Thus, we know that even in the worst case (all invocations of `is_less` returns true) we will only be at most 1 byte pass the end.
+ // Another unsafety operation here is dereferencing `elem`.
+ // However, `elem` was initially `1 * sizeof(T)` past the end and we decrement it by `1 * sizeof(T)` before accessing it.
+ // Plus, `block_r` was asserted to be less than `BLOCK` and `elem` will therefore at most be pointing to the beginning of the slice.
+ unsafe {
+ // Branchless comparison.
+ elem = elem.offset(-1);
+ *end_r = i as u8;
+ end_r = end_r.offset(is_less(&*elem, pivot) as isize);
+ }
+ }
+ }
+
+ // Number of out-of-order elements to swap between the left and right side.
+ let count = cmp::min(width(start_l, end_l), width(start_r, end_r));
+
+ if count > 0 {
+ macro_rules! left {
+ () => {
+ l.offset(*start_l as isize)
+ };
+ }
+ macro_rules! right {
+ () => {
+ r.offset(-(*start_r as isize) - 1)
+ };
+ }
+
+ // Instead of swapping one pair at the time, it is more efficient to perform a cyclic
+ // permutation. This is not strictly equivalent to swapping, but produces a similar
+ // result using fewer memory operations.
+
+ // SAFETY: The use of `ptr::read` is valid because there is at least one element in
+ // both `offsets_l` and `offsets_r`, so `left!` is a valid pointer to read from.
+ //
+ // The uses of `left!` involve calls to `offset` on `l`, which points to the
+ // beginning of `v`. All the offsets pointed-to by `start_l` are at most `block_l`, so
+ // these `offset` calls are safe as all reads are within the block. The same argument
+ // applies for the uses of `right!`.
+ //
+ // The calls to `start_l.offset` are valid because there are at most `count-1` of them,
+ // plus the final one at the end of the unsafe block, where `count` is the minimum number
+ // of collected offsets in `offsets_l` and `offsets_r`, so there is no risk of there not
+ // being enough elements. The same reasoning applies to the calls to `start_r.offset`.
+ //
+ // The calls to `copy_nonoverlapping` are safe because `left!` and `right!` are guaranteed
+ // not to overlap, and are valid because of the reasoning above.
+ unsafe {
+ let tmp = ptr::read(left!());
+ ptr::copy_nonoverlapping(right!(), left!(), 1);
+
+ for _ in 1..count {
+ start_l = start_l.offset(1);
+ ptr::copy_nonoverlapping(left!(), right!(), 1);
+ start_r = start_r.offset(1);
+ ptr::copy_nonoverlapping(right!(), left!(), 1);
+ }
+
+ ptr::copy_nonoverlapping(&tmp, right!(), 1);
+ mem::forget(tmp);
+ start_l = start_l.offset(1);
+ start_r = start_r.offset(1);
+ }
+ }
+
+ if start_l == end_l {
+ // All out-of-order elements in the left block were moved. Move to the next block.
+
+ // block-width-guarantee
+ // SAFETY: if `!is_done` then the slice width is guaranteed to be at least `2*BLOCK` wide. There
+ // are at most `BLOCK` elements in `offsets_l` because of its size, so the `offset` operation is
+ // safe. Otherwise, the debug assertions in the `is_done` case guarantee that
+ // `width(l, r) == block_l + block_r`, namely, that the block sizes have been adjusted to account
+ // for the smaller number of remaining elements.
+ l = unsafe { l.add(block_l) };
+ }
+
+ if start_r == end_r {
+ // All out-of-order elements in the right block were moved. Move to the previous block.
+
+ // SAFETY: Same argument as [block-width-guarantee]. Either this is a full block `2*BLOCK`-wide,
+ // or `block_r` has been adjusted for the last handful of elements.
+ r = unsafe { r.offset(-(block_r as isize)) };
+ }
+
+ if is_done {
+ break;
+ }
+ }
+
+ // All that remains now is at most one block (either the left or the right) with out-of-order
+ // elements that need to be moved. Such remaining elements can be simply shifted to the end
+ // within their block.
+
+ if start_l < end_l {
+ // The left block remains.
+ // Move its remaining out-of-order elements to the far right.
+ debug_assert_eq!(width(l, r), block_l);
+ while start_l < end_l {
+ // remaining-elements-safety
+ // SAFETY: while the loop condition holds there are still elements in `offsets_l`, so it
+ // is safe to point `end_l` to the previous element.
+ //
+ // The `ptr::swap` is safe if both its arguments are valid for reads and writes:
+ // - Per the debug assert above, the distance between `l` and `r` is `block_l`
+ // elements, so there can be at most `block_l` remaining offsets between `start_l`
+ // and `end_l`. This means `r` will be moved at most `block_l` steps back, which
+ // makes the `r.offset` calls valid (at that point `l == r`).
+ // - `offsets_l` contains valid offsets into `v` collected during the partitioning of
+ // the last block, so the `l.offset` calls are valid.
+ unsafe {
+ end_l = end_l.offset(-1);
+ ptr::swap(l.offset(*end_l as isize), r.offset(-1));
+ r = r.offset(-1);
+ }
+ }
+ width(v.as_mut_ptr(), r)
+ } else if start_r < end_r {
+ // The right block remains.
+ // Move its remaining out-of-order elements to the far left.
+ debug_assert_eq!(width(l, r), block_r);
+ while start_r < end_r {
+ // SAFETY: See the reasoning in [remaining-elements-safety].
+ unsafe {
+ end_r = end_r.offset(-1);
+ ptr::swap(l, r.offset(-(*end_r as isize) - 1));
+ l = l.offset(1);
+ }
+ }
+ width(v.as_mut_ptr(), l)
+ } else {
+ // Nothing else to do, we're done.
+ width(v.as_mut_ptr(), l)
+ }
+}
+
+/// Partitions `v` into elements smaller than `v[pivot]`, followed by elements greater than or
+/// equal to `v[pivot]`.
+///
+/// Returns a tuple of:
+///
+/// 1. Number of elements smaller than `v[pivot]`.
+/// 2. True if `v` was already partitioned.
+fn partition<T, F>(v: &mut [T], pivot: usize, is_less: &F) -> (usize, bool)
+where
+ F: Fn(&T, &T) -> bool,
+{
+ let (mid, was_partitioned) = {
+ // Place the pivot at the beginning of slice.
+ v.swap(0, pivot);
+ let (pivot, v) = v.split_at_mut(1);
+ let pivot = &mut pivot[0];
+
+ // Read the pivot into a stack-allocated variable for efficiency. If a following comparison
+ // operation panics, the pivot will be automatically written back into the slice.
+
+ // SAFETY: `pivot` is a reference to the first element of `v`, so `ptr::read` is safe.
+ let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
+ let _pivot_guard = CopyOnDrop {
+ src: &*tmp,
+ dest: pivot,
+ };
+ let pivot = &*tmp;
+
+ // Find the first pair of out-of-order elements.
+ let mut l = 0;
+ let mut r = v.len();
+
+ // SAFETY: The unsafety below involves indexing an array.
+ // For the first one: We already do the bounds checking here with `l < r`.
+ // For the second one: We initially have `l == 0` and `r == v.len()` and we checked that `l < r` at every indexing operation.
+ // From here we know that `r` must be at least `r == l` which was shown to be valid from the first one.
+ unsafe {
+ // Find the first element greater than or equal to the pivot.
+ while l < r && is_less(v.get_unchecked(l), pivot) {
+ l += 1;
+ }
+
+ // Find the last element smaller that the pivot.
+ while l < r && !is_less(v.get_unchecked(r - 1), pivot) {
+ r -= 1;
+ }
+ }
+
+ (
+ l + partition_in_blocks(&mut v[l..r], pivot, is_less),
+ l >= r,
+ )
+
+ // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated
+ // variable) back into the slice where it originally was. This step is critical in ensuring
+ // safety!
+ };
+
+ // Place the pivot between the two partitions.
+ v.swap(0, mid);
+
+ (mid, was_partitioned)
+}
+
+/// Partitions `v` into elements equal to `v[pivot]` followed by elements greater than `v[pivot]`.
+///
+/// Returns the number of elements equal to the pivot. It is assumed that `v` does not contain
+/// elements smaller than the pivot.
+fn partition_equal<T, F>(v: &mut [T], pivot: usize, is_less: &F) -> usize
+where
+ F: Fn(&T, &T) -> bool,
+{
+ // Place the pivot at the beginning of slice.
+ v.swap(0, pivot);
+ let (pivot, v) = v.split_at_mut(1);
+ let pivot = &mut pivot[0];
+
+ // Read the pivot into a stack-allocated variable for efficiency. If a following comparison
+ // operation panics, the pivot will be automatically written back into the slice.
+ // SAFETY: The pointer here is valid because it is obtained from a reference to a slice.
+ let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
+ let _pivot_guard = CopyOnDrop {
+ src: &*tmp,
+ dest: pivot,
+ };
+ let pivot = &*tmp;
+
+ // Now partition the slice.
+ let mut l = 0;
+ let mut r = v.len();
+ loop {
+ // SAFETY: The unsafety below involves indexing an array.
+ // For the first one: We already do the bounds checking here with `l < r`.
+ // For the second one: We initially have `l == 0` and `r == v.len()` and we checked that `l < r` at every indexing operation.
+ // From here we know that `r` must be at least `r == l` which was shown to be valid from the first one.
+ unsafe {
+ // Find the first element greater than the pivot.
+ while l < r && !is_less(pivot, v.get_unchecked(l)) {
+ l += 1;
+ }
+
+ // Find the last element equal to the pivot.
+ while l < r && is_less(pivot, v.get_unchecked(r - 1)) {
+ r -= 1;
+ }
+
+ // Are we done?
+ if l >= r {
+ break;
+ }
+
+ // Swap the found pair of out-of-order elements.
+ r -= 1;
+ let ptr = v.as_mut_ptr();
+ ptr::swap(ptr.add(l), ptr.add(r));
+ l += 1;
+ }
+ }
+
+ // We found `l` elements equal to the pivot. Add 1 to account for the pivot itself.
+ l + 1
+
+ // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated variable)
+ // back into the slice where it originally was. This step is critical in ensuring safety!
+}
+
+/// Scatters some elements around in an attempt to break patterns that might cause imbalanced
+/// partitions in quicksort.
+#[cold]
+fn break_patterns<T>(v: &mut [T]) {
+ let len = v.len();
+ if len >= 8 {
+ // Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia.
+ let mut random = len as u32;
+ let mut gen_u32 = || {
+ random ^= random << 13;
+ random ^= random >> 17;
+ random ^= random << 5;
+ random
+ };
+ let mut gen_usize = || {
+ if usize::BITS <= 32 {
+ gen_u32() as usize
+ } else {
+ (((gen_u32() as u64) << 32) | (gen_u32() as u64)) as usize
+ }
+ };
+
+ // Take random numbers modulo this number.
+ // The number fits into `usize` because `len` is not greater than `isize::MAX`.
+ let modulus = len.next_power_of_two();
+
+ // Some pivot candidates will be in the nearby of this index. Let's randomize them.
+ let pos = len / 4 * 2;
+
+ for i in 0..3 {
+ // Generate a random number modulo `len`. However, in order to avoid costly operations
+ // we first take it modulo a power of two, and then decrease by `len` until it fits
+ // into the range `[0, len - 1]`.
+ let mut other = gen_usize() & (modulus - 1);
+
+ // `other` is guaranteed to be less than `2 * len`.
+ if other >= len {
+ other -= len;
+ }
+
+ v.swap(pos - 1 + i, other);
+ }
+ }
+}
+
+/// Chooses a pivot in `v` and returns the index and `true` if the slice is likely already sorted.
+///
+/// Elements in `v` might be reordered in the process.
+fn choose_pivot<T, F>(v: &mut [T], is_less: &F) -> (usize, bool)
+where
+ F: Fn(&T, &T) -> bool,
+{
+ // Minimum length to choose the median-of-medians method.
+ // Shorter slices use the simple median-of-three method.
+ const SHORTEST_MEDIAN_OF_MEDIANS: usize = 50;
+ // Maximum number of swaps that can be performed in this function.
+ const MAX_SWAPS: usize = 4 * 3;
+
+ let len = v.len();
+
+ // Three indices near which we are going to choose a pivot.
+ #[allow(clippy::identity_op)]
+ let mut a = len / 4 * 1;
+ let mut b = len / 4 * 2;
+ let mut c = len / 4 * 3;
+
+ // Counts the total number of swaps we are about to perform while sorting indices.
+ let mut swaps = 0;
+
+ if len >= 8 {
+ // Swaps indices so that `v[a] <= v[b]`.
+ // SAFETY: `len >= 8` so there are at least two elements in the neighborhoods of
+ // `a`, `b` and `c`. This means the three calls to `sort_adjacent` result in
+ // corresponding calls to `sort3` with valid 3-item neighborhoods around each
+ // pointer, which in turn means the calls to `sort2` are done with valid
+ // references. Thus the `v.get_unchecked` calls are safe, as is the `ptr::swap`
+ // call.
+ let mut sort2 = |a: &mut usize, b: &mut usize| unsafe {
+ if is_less(v.get_unchecked(*b), v.get_unchecked(*a)) {
+ ptr::swap(a, b);
+ swaps += 1;
+ }
+ };
+
+ // Swaps indices so that `v[a] <= v[b] <= v[c]`.
+ let mut sort3 = |a: &mut usize, b: &mut usize, c: &mut usize| {
+ sort2(a, b);
+ sort2(b, c);
+ sort2(a, b);
+ };
+
+ if len >= SHORTEST_MEDIAN_OF_MEDIANS {
+ // Finds the median of `v[a - 1], v[a], v[a + 1]` and stores the index into `a`.
+ let mut sort_adjacent = |a: &mut usize| {
+ let tmp = *a;
+ sort3(&mut (tmp - 1), a, &mut (tmp + 1));
+ };
+
+ // Find medians in the neighborhoods of `a`, `b`, and `c`.
+ sort_adjacent(&mut a);
+ sort_adjacent(&mut b);
+ sort_adjacent(&mut c);
+ }
+
+ // Find the median among `a`, `b`, and `c`.
+ sort3(&mut a, &mut b, &mut c);
+ }
+
+ if swaps < MAX_SWAPS {
+ (b, swaps == 0)
+ } else {
+ // The maximum number of swaps was performed. Chances are the slice is descending or mostly
+ // descending, so reversing will probably help sort it faster.
+ v.reverse();
+ (len - 1 - b, true)
+ }
+}
+
+/// Sorts `v` recursively.
+///
+/// If the slice had a predecessor in the original array, it is specified as `pred`.
+///
+/// `limit` is the number of allowed imbalanced partitions before switching to `heapsort`. If zero,
+/// this function will immediately switch to heapsort.
+fn recurse<'a, T, F>(mut v: &'a mut [T], is_less: &F, mut pred: Option<&'a mut T>, mut limit: u32)
+where
+ T: Send,
+ F: Fn(&T, &T) -> bool + Sync,
+{
+ // Slices of up to this length get sorted using insertion sort.
+ const MAX_INSERTION: usize = 20;
+ // If both partitions are up to this length, we continue sequentially. This number is as small
+ // as possible but so that the overhead of Rayon's task scheduling is still negligible.
+ const MAX_SEQUENTIAL: usize = 2000;
+
+ // True if the last partitioning was reasonably balanced.
+ let mut was_balanced = true;
+ // True if the last partitioning didn't shuffle elements (the slice was already partitioned).
+ let mut was_partitioned = true;
+
+ loop {
+ let len = v.len();
+
+ // Very short slices get sorted using insertion sort.
+ if len <= MAX_INSERTION {
+ insertion_sort(v, is_less);
+ return;
+ }
+
+ // If too many bad pivot choices were made, simply fall back to heapsort in order to
+ // guarantee `O(n * log(n))` worst-case.
+ if limit == 0 {
+ heapsort(v, is_less);
+ return;
+ }
+
+ // If the last partitioning was imbalanced, try breaking patterns in the slice by shuffling
+ // some elements around. Hopefully we'll choose a better pivot this time.
+ if !was_balanced {
+ break_patterns(v);
+ limit -= 1;
+ }
+
+ // Choose a pivot and try guessing whether the slice is already sorted.
+ let (pivot, likely_sorted) = choose_pivot(v, is_less);
+
+ // If the last partitioning was decently balanced and didn't shuffle elements, and if pivot
+ // selection predicts the slice is likely already sorted...
+ if was_balanced && was_partitioned && likely_sorted {
+ // Try identifying several out-of-order elements and shifting them to correct
+ // positions. If the slice ends up being completely sorted, we're done.
+ if partial_insertion_sort(v, is_less) {
+ return;
+ }
+ }
+
+ // If the chosen pivot is equal to the predecessor, then it's the smallest element in the
+ // slice. Partition the slice into elements equal to and elements greater than the pivot.
+ // This case is usually hit when the slice contains many duplicate elements.
+ if let Some(ref p) = pred {
+ if !is_less(p, &v[pivot]) {
+ let mid = partition_equal(v, pivot, is_less);
+
+ // Continue sorting elements greater than the pivot.
+ v = &mut v[mid..];
+ continue;
+ }
+ }
+
+ // Partition the slice.
+ let (mid, was_p) = partition(v, pivot, is_less);
+ was_balanced = cmp::min(mid, len - mid) >= len / 8;
+ was_partitioned = was_p;
+
+ // Split the slice into `left`, `pivot`, and `right`.
+ let (left, right) = v.split_at_mut(mid);
+ let (pivot, right) = right.split_at_mut(1);
+ let pivot = &mut pivot[0];
+
+ if cmp::max(left.len(), right.len()) <= MAX_SEQUENTIAL {
+ // Recurse into the shorter side only in order to minimize the total number of recursive
+ // calls and consume less stack space. Then just continue with the longer side (this is
+ // akin to tail recursion).
+ if left.len() < right.len() {
+ recurse(left, is_less, pred, limit);
+ v = right;
+ pred = Some(pivot);
+ } else {
+ recurse(right, is_less, Some(pivot), limit);
+ v = left;
+ }
+ } else {
+ // Sort the left and right half in parallel.
+ rayon_core::join(
+ || recurse(left, is_less, pred, limit),
+ || recurse(right, is_less, Some(pivot), limit),
+ );
+ break;
+ }
+ }
+}
+
+/// Sorts `v` using pattern-defeating quicksort in parallel.
+///
+/// The algorithm is unstable, in-place, and *O*(*n* \* log(*n*)) worst-case.
+pub(super) fn par_quicksort<T, F>(v: &mut [T], is_less: F)
+where
+ T: Send,
+ F: Fn(&T, &T) -> bool + Sync,
+{
+ // Sorting has no meaningful behavior on zero-sized types.
+ if mem::size_of::<T>() == 0 {
+ return;
+ }
+
+ // Limit the number of imbalanced partitions to `floor(log2(len)) + 1`.
+ let limit = usize::BITS - v.len().leading_zeros();
+
+ recurse(v, &is_less, None, limit);
+}
+
+#[cfg(test)]
+mod tests {
+ use super::heapsort;
+ use rand::distributions::Uniform;
+ use rand::{thread_rng, Rng};
+
+ #[test]
+ fn test_heapsort() {
+ let rng = &mut thread_rng();
+
+ for len in (0..25).chain(500..501) {
+ for &modulus in &[5, 10, 100] {
+ let dist = Uniform::new(0, modulus);
+ for _ in 0..100 {
+ let v: Vec<i32> = rng.sample_iter(&dist).take(len).collect();
+
+ // Test heapsort using `<` operator.
+ let mut tmp = v.clone();
+ heapsort(&mut tmp, &|a, b| a < b);
+ assert!(tmp.windows(2).all(|w| w[0] <= w[1]));
+
+ // Test heapsort using `>` operator.
+ let mut tmp = v.clone();
+ heapsort(&mut tmp, &|a, b| a > b);
+ assert!(tmp.windows(2).all(|w| w[0] >= w[1]));
+ }
+ }
+ }
+
+ // Sort using a completely random comparison function.
+ // This will reorder the elements *somehow*, but won't panic.
+ let mut v: Vec<_> = (0..100).collect();
+ heapsort(&mut v, &|_, _| thread_rng().gen());
+ heapsort(&mut v, &|a, b| a < b);
+
+ for (i, &entry) in v.iter().enumerate() {
+ assert_eq!(entry, i);
+ }
+ }
+}
diff --git a/third_party/rust/rayon/src/slice/rchunks.rs b/third_party/rust/rayon/src/slice/rchunks.rs
new file mode 100644
index 0000000000..63e765d77e
--- /dev/null
+++ b/third_party/rust/rayon/src/slice/rchunks.rs
@@ -0,0 +1,386 @@
+use crate::iter::plumbing::*;
+use crate::iter::*;
+use crate::math::div_round_up;
+
+/// Parallel iterator over immutable non-overlapping chunks of a slice, starting at the end.
+#[derive(Debug)]
+pub struct RChunks<'data, T: Sync> {
+ chunk_size: usize,
+ slice: &'data [T],
+}
+
+impl<'data, T: Sync> RChunks<'data, T> {
+ pub(super) fn new(chunk_size: usize, slice: &'data [T]) -> Self {
+ Self { chunk_size, slice }
+ }
+}
+
+impl<'data, T: Sync> Clone for RChunks<'data, T> {
+ fn clone(&self) -> Self {
+ RChunks { ..*self }
+ }
+}
+
+impl<'data, T: Sync + 'data> ParallelIterator for RChunks<'data, T> {
+ type Item = &'data [T];
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn opt_len(&self) -> Option<usize> {
+ Some(self.len())
+ }
+}
+
+impl<'data, T: Sync + 'data> IndexedParallelIterator for RChunks<'data, T> {
+ fn drive<C>(self, consumer: C) -> C::Result
+ where
+ C: Consumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn len(&self) -> usize {
+ div_round_up(self.slice.len(), self.chunk_size)
+ }
+
+ fn with_producer<CB>(self, callback: CB) -> CB::Output
+ where
+ CB: ProducerCallback<Self::Item>,
+ {
+ callback.callback(RChunksProducer {
+ chunk_size: self.chunk_size,
+ slice: self.slice,
+ })
+ }
+}
+
+struct RChunksProducer<'data, T: Sync> {
+ chunk_size: usize,
+ slice: &'data [T],
+}
+
+impl<'data, T: 'data + Sync> Producer for RChunksProducer<'data, T> {
+ type Item = &'data [T];
+ type IntoIter = ::std::slice::RChunks<'data, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.slice.rchunks(self.chunk_size)
+ }
+
+ fn split_at(self, index: usize) -> (Self, Self) {
+ let elem_index = self.slice.len().saturating_sub(index * self.chunk_size);
+ let (left, right) = self.slice.split_at(elem_index);
+ (
+ RChunksProducer {
+ chunk_size: self.chunk_size,
+ slice: right,
+ },
+ RChunksProducer {
+ chunk_size: self.chunk_size,
+ slice: left,
+ },
+ )
+ }
+}
+
+/// Parallel iterator over immutable non-overlapping chunks of a slice, starting at the end.
+#[derive(Debug)]
+pub struct RChunksExact<'data, T: Sync> {
+ chunk_size: usize,
+ slice: &'data [T],
+ rem: &'data [T],
+}
+
+impl<'data, T: Sync> RChunksExact<'data, T> {
+ pub(super) fn new(chunk_size: usize, slice: &'data [T]) -> Self {
+ let rem_len = slice.len() % chunk_size;
+ let (rem, slice) = slice.split_at(rem_len);
+ Self {
+ chunk_size,
+ slice,
+ rem,
+ }
+ }
+
+ /// Return the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `chunk_size-1`
+ /// elements.
+ pub fn remainder(&self) -> &'data [T] {
+ self.rem
+ }
+}
+
+impl<'data, T: Sync> Clone for RChunksExact<'data, T> {
+ fn clone(&self) -> Self {
+ RChunksExact { ..*self }
+ }
+}
+
+impl<'data, T: Sync + 'data> ParallelIterator for RChunksExact<'data, T> {
+ type Item = &'data [T];
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn opt_len(&self) -> Option<usize> {
+ Some(self.len())
+ }
+}
+
+impl<'data, T: Sync + 'data> IndexedParallelIterator for RChunksExact<'data, T> {
+ fn drive<C>(self, consumer: C) -> C::Result
+ where
+ C: Consumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn len(&self) -> usize {
+ self.slice.len() / self.chunk_size
+ }
+
+ fn with_producer<CB>(self, callback: CB) -> CB::Output
+ where
+ CB: ProducerCallback<Self::Item>,
+ {
+ callback.callback(RChunksExactProducer {
+ chunk_size: self.chunk_size,
+ slice: self.slice,
+ })
+ }
+}
+
+struct RChunksExactProducer<'data, T: Sync> {
+ chunk_size: usize,
+ slice: &'data [T],
+}
+
+impl<'data, T: 'data + Sync> Producer for RChunksExactProducer<'data, T> {
+ type Item = &'data [T];
+ type IntoIter = ::std::slice::RChunksExact<'data, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.slice.rchunks_exact(self.chunk_size)
+ }
+
+ fn split_at(self, index: usize) -> (Self, Self) {
+ let elem_index = self.slice.len() - index * self.chunk_size;
+ let (left, right) = self.slice.split_at(elem_index);
+ (
+ RChunksExactProducer {
+ chunk_size: self.chunk_size,
+ slice: right,
+ },
+ RChunksExactProducer {
+ chunk_size: self.chunk_size,
+ slice: left,
+ },
+ )
+ }
+}
+
+/// Parallel iterator over mutable non-overlapping chunks of a slice, starting at the end.
+#[derive(Debug)]
+pub struct RChunksMut<'data, T: Send> {
+ chunk_size: usize,
+ slice: &'data mut [T],
+}
+
+impl<'data, T: Send> RChunksMut<'data, T> {
+ pub(super) fn new(chunk_size: usize, slice: &'data mut [T]) -> Self {
+ Self { chunk_size, slice }
+ }
+}
+
+impl<'data, T: Send + 'data> ParallelIterator for RChunksMut<'data, T> {
+ type Item = &'data mut [T];
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn opt_len(&self) -> Option<usize> {
+ Some(self.len())
+ }
+}
+
+impl<'data, T: Send + 'data> IndexedParallelIterator for RChunksMut<'data, T> {
+ fn drive<C>(self, consumer: C) -> C::Result
+ where
+ C: Consumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn len(&self) -> usize {
+ div_round_up(self.slice.len(), self.chunk_size)
+ }
+
+ fn with_producer<CB>(self, callback: CB) -> CB::Output
+ where
+ CB: ProducerCallback<Self::Item>,
+ {
+ callback.callback(RChunksMutProducer {
+ chunk_size: self.chunk_size,
+ slice: self.slice,
+ })
+ }
+}
+
+struct RChunksMutProducer<'data, T: Send> {
+ chunk_size: usize,
+ slice: &'data mut [T],
+}
+
+impl<'data, T: 'data + Send> Producer for RChunksMutProducer<'data, T> {
+ type Item = &'data mut [T];
+ type IntoIter = ::std::slice::RChunksMut<'data, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.slice.rchunks_mut(self.chunk_size)
+ }
+
+ fn split_at(self, index: usize) -> (Self, Self) {
+ let elem_index = self.slice.len().saturating_sub(index * self.chunk_size);
+ let (left, right) = self.slice.split_at_mut(elem_index);
+ (
+ RChunksMutProducer {
+ chunk_size: self.chunk_size,
+ slice: right,
+ },
+ RChunksMutProducer {
+ chunk_size: self.chunk_size,
+ slice: left,
+ },
+ )
+ }
+}
+
+/// Parallel iterator over mutable non-overlapping chunks of a slice, starting at the end.
+#[derive(Debug)]
+pub struct RChunksExactMut<'data, T: Send> {
+ chunk_size: usize,
+ slice: &'data mut [T],
+ rem: &'data mut [T],
+}
+
+impl<'data, T: Send> RChunksExactMut<'data, T> {
+ pub(super) fn new(chunk_size: usize, slice: &'data mut [T]) -> Self {
+ let rem_len = slice.len() % chunk_size;
+ let (rem, slice) = slice.split_at_mut(rem_len);
+ Self {
+ chunk_size,
+ slice,
+ rem,
+ }
+ }
+
+ /// Return the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `chunk_size-1`
+ /// elements.
+ ///
+ /// Note that this has to consume `self` to return the original lifetime of
+ /// the data, which prevents this from actually being used as a parallel
+ /// iterator since that also consumes. This method is provided for parity
+ /// with `std::iter::RChunksExactMut`, but consider calling `remainder()` or
+ /// `take_remainder()` as alternatives.
+ pub fn into_remainder(self) -> &'data mut [T] {
+ self.rem
+ }
+
+ /// Return the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `chunk_size-1`
+ /// elements.
+ ///
+ /// Consider `take_remainder()` if you need access to the data with its
+ /// original lifetime, rather than borrowing through `&mut self` here.
+ pub fn remainder(&mut self) -> &mut [T] {
+ self.rem
+ }
+
+ /// Return the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `chunk_size-1`
+ /// elements. Subsequent calls will return an empty slice.
+ pub fn take_remainder(&mut self) -> &'data mut [T] {
+ std::mem::take(&mut self.rem)
+ }
+}
+
+impl<'data, T: Send + 'data> ParallelIterator for RChunksExactMut<'data, T> {
+ type Item = &'data mut [T];
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn opt_len(&self) -> Option<usize> {
+ Some(self.len())
+ }
+}
+
+impl<'data, T: Send + 'data> IndexedParallelIterator for RChunksExactMut<'data, T> {
+ fn drive<C>(self, consumer: C) -> C::Result
+ where
+ C: Consumer<Self::Item>,
+ {
+ bridge(self, consumer)
+ }
+
+ fn len(&self) -> usize {
+ self.slice.len() / self.chunk_size
+ }
+
+ fn with_producer<CB>(self, callback: CB) -> CB::Output
+ where
+ CB: ProducerCallback<Self::Item>,
+ {
+ callback.callback(RChunksExactMutProducer {
+ chunk_size: self.chunk_size,
+ slice: self.slice,
+ })
+ }
+}
+
+struct RChunksExactMutProducer<'data, T: Send> {
+ chunk_size: usize,
+ slice: &'data mut [T],
+}
+
+impl<'data, T: 'data + Send> Producer for RChunksExactMutProducer<'data, T> {
+ type Item = &'data mut [T];
+ type IntoIter = ::std::slice::RChunksExactMut<'data, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.slice.rchunks_exact_mut(self.chunk_size)
+ }
+
+ fn split_at(self, index: usize) -> (Self, Self) {
+ let elem_index = self.slice.len() - index * self.chunk_size;
+ let (left, right) = self.slice.split_at_mut(elem_index);
+ (
+ RChunksExactMutProducer {
+ chunk_size: self.chunk_size,
+ slice: right,
+ },
+ RChunksExactMutProducer {
+ chunk_size: self.chunk_size,
+ slice: left,
+ },
+ )
+ }
+}
diff --git a/third_party/rust/rayon/src/slice/test.rs b/third_party/rust/rayon/src/slice/test.rs
new file mode 100644
index 0000000000..f74ca0f743
--- /dev/null
+++ b/third_party/rust/rayon/src/slice/test.rs
@@ -0,0 +1,170 @@
+#![cfg(test)]
+
+use crate::prelude::*;
+use rand::distributions::Uniform;
+use rand::seq::SliceRandom;
+use rand::{thread_rng, Rng};
+use std::cmp::Ordering::{Equal, Greater, Less};
+
+macro_rules! sort {
+ ($f:ident, $name:ident) => {
+ #[test]
+ fn $name() {
+ let rng = &mut thread_rng();
+
+ for len in (0..25).chain(500..501) {
+ for &modulus in &[5, 10, 100] {
+ let dist = Uniform::new(0, modulus);
+ for _ in 0..100 {
+ let v: Vec<i32> = rng.sample_iter(&dist).take(len).collect();
+
+ // Test sort using `<` operator.
+ let mut tmp = v.clone();
+ tmp.$f(|a, b| a.cmp(b));
+ assert!(tmp.windows(2).all(|w| w[0] <= w[1]));
+
+ // Test sort using `>` operator.
+ let mut tmp = v.clone();
+ tmp.$f(|a, b| b.cmp(a));
+ assert!(tmp.windows(2).all(|w| w[0] >= w[1]));
+ }
+ }
+ }
+
+ // Test sort with many duplicates.
+ for &len in &[1_000, 10_000, 100_000] {
+ for &modulus in &[5, 10, 100, 10_000] {
+ let dist = Uniform::new(0, modulus);
+ let mut v: Vec<i32> = rng.sample_iter(&dist).take(len).collect();
+
+ v.$f(|a, b| a.cmp(b));
+ assert!(v.windows(2).all(|w| w[0] <= w[1]));
+ }
+ }
+
+ // Test sort with many pre-sorted runs.
+ for &len in &[1_000, 10_000, 100_000] {
+ let len_dist = Uniform::new(0, len);
+ for &modulus in &[5, 10, 1000, 50_000] {
+ let dist = Uniform::new(0, modulus);
+ let mut v: Vec<i32> = rng.sample_iter(&dist).take(len).collect();
+
+ v.sort();
+ v.reverse();
+
+ for _ in 0..5 {
+ let a = rng.sample(&len_dist);
+ let b = rng.sample(&len_dist);
+ if a < b {
+ v[a..b].reverse();
+ } else {
+ v.swap(a, b);
+ }
+ }
+
+ v.$f(|a, b| a.cmp(b));
+ assert!(v.windows(2).all(|w| w[0] <= w[1]));
+ }
+ }
+
+ // Sort using a completely random comparison function.
+ // This will reorder the elements *somehow*, but won't panic.
+ let mut v: Vec<_> = (0..100).collect();
+ v.$f(|_, _| *[Less, Equal, Greater].choose(&mut thread_rng()).unwrap());
+ v.$f(|a, b| a.cmp(b));
+ for i in 0..v.len() {
+ assert_eq!(v[i], i);
+ }
+
+ // Should not panic.
+ [0i32; 0].$f(|a, b| a.cmp(b));
+ [(); 10].$f(|a, b| a.cmp(b));
+ [(); 100].$f(|a, b| a.cmp(b));
+
+ let mut v = [0xDEAD_BEEFu64];
+ v.$f(|a, b| a.cmp(b));
+ assert!(v == [0xDEAD_BEEF]);
+ }
+ };
+}
+
+sort!(par_sort_by, test_par_sort);
+sort!(par_sort_unstable_by, test_par_sort_unstable);
+
+#[test]
+fn test_par_sort_stability() {
+ for len in (2..25).chain(500..510).chain(50_000..50_010) {
+ for _ in 0..10 {
+ let mut counts = [0; 10];
+
+ // Create a vector like [(6, 1), (5, 1), (6, 2), ...],
+ // where the first item of each tuple is random, but
+ // the second item represents which occurrence of that
+ // number this element is, i.e. the second elements
+ // will occur in sorted order.
+ let mut rng = thread_rng();
+ let mut v: Vec<_> = (0..len)
+ .map(|_| {
+ let n: usize = rng.gen_range(0..10);
+ counts[n] += 1;
+ (n, counts[n])
+ })
+ .collect();
+
+ // Only sort on the first element, so an unstable sort
+ // may mix up the counts.
+ v.par_sort_by(|&(a, _), &(b, _)| a.cmp(&b));
+
+ // This comparison includes the count (the second item
+ // of the tuple), so elements with equal first items
+ // will need to be ordered with increasing
+ // counts... i.e. exactly asserting that this sort is
+ // stable.
+ assert!(v.windows(2).all(|w| w[0] <= w[1]));
+ }
+ }
+}
+
+#[test]
+fn test_par_chunks_exact_remainder() {
+ let v: &[i32] = &[0, 1, 2, 3, 4];
+ let c = v.par_chunks_exact(2);
+ assert_eq!(c.remainder(), &[4]);
+ assert_eq!(c.len(), 2);
+}
+
+#[test]
+fn test_par_chunks_exact_mut_remainder() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c = v.par_chunks_exact_mut(2);
+ assert_eq!(c.remainder(), &[4]);
+ assert_eq!(c.len(), 2);
+ assert_eq!(c.into_remainder(), &[4]);
+
+ let mut c = v.par_chunks_exact_mut(2);
+ assert_eq!(c.take_remainder(), &[4]);
+ assert_eq!(c.take_remainder(), &[]);
+ assert_eq!(c.len(), 2);
+}
+
+#[test]
+fn test_par_rchunks_exact_remainder() {
+ let v: &[i32] = &[0, 1, 2, 3, 4];
+ let c = v.par_rchunks_exact(2);
+ assert_eq!(c.remainder(), &[0]);
+ assert_eq!(c.len(), 2);
+}
+
+#[test]
+fn test_par_rchunks_exact_mut_remainder() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c = v.par_rchunks_exact_mut(2);
+ assert_eq!(c.remainder(), &[0]);
+ assert_eq!(c.len(), 2);
+ assert_eq!(c.into_remainder(), &[0]);
+
+ let mut c = v.par_rchunks_exact_mut(2);
+ assert_eq!(c.take_remainder(), &[0]);
+ assert_eq!(c.take_remainder(), &[]);
+ assert_eq!(c.len(), 2);
+}