diff options
Diffstat (limited to 'vendor/rustc-rayon/src/slice')
-rw-r--r-- | vendor/rustc-rayon/src/slice/chunks.rs | 2 | ||||
-rw-r--r-- | vendor/rustc-rayon/src/slice/mergesort.rs | 22 | ||||
-rw-r--r-- | vendor/rustc-rayon/src/slice/mod.rs | 8 | ||||
-rw-r--r-- | vendor/rustc-rayon/src/slice/quicksort.rs | 79 | ||||
-rw-r--r-- | vendor/rustc-rayon/src/slice/rchunks.rs | 2 | ||||
-rw-r--r-- | vendor/rustc-rayon/src/slice/test.rs | 2 |
6 files changed, 64 insertions, 51 deletions
diff --git a/vendor/rustc-rayon/src/slice/chunks.rs b/vendor/rustc-rayon/src/slice/chunks.rs index 5b145bdd8..a3cc70953 100644 --- a/vendor/rustc-rayon/src/slice/chunks.rs +++ b/vendor/rustc-rayon/src/slice/chunks.rs @@ -317,7 +317,7 @@ impl<'data, T: Send> ChunksExactMut<'data, T> { /// returned by the iterator. The returned slice has at most `chunk_size-1` /// elements. Subsequent calls will return an empty slice. pub fn take_remainder(&mut self) -> &'data mut [T] { - std::mem::replace(&mut self.rem, &mut []) + std::mem::take(&mut self.rem) } } diff --git a/vendor/rustc-rayon/src/slice/mergesort.rs b/vendor/rustc-rayon/src/slice/mergesort.rs index a24ba65b7..fec309d27 100644 --- a/vendor/rustc-rayon/src/slice/mergesort.rs +++ b/vendor/rustc-rayon/src/slice/mergesort.rs @@ -205,9 +205,8 @@ where impl<T> Drop for MergeHole<T> { fn drop(&mut self) { // `T` is not a zero-sized type, so it's okay to divide by its size. - let len = (self.end as usize - self.start as usize) / size_of::<T>(); unsafe { - // TODO 1.47: let len = self.end.offset_from(self.start) as usize; + let len = self.end.offset_from(self.start) as usize; ptr::copy_nonoverlapping(self.start, self.dest, len); } } @@ -491,8 +490,8 @@ where let dest_l = SendPtr(dest); let dest_r = SendPtr(dest.add(left_l.len() + right_l.len())); rayon_core::join( - || par_merge(left_l, right_l, dest_l.0, is_less), - || par_merge(left_r, right_r, dest_r.0, is_less), + move || par_merge(left_l, right_l, dest_l.get(), is_less), + move || par_merge(left_r, right_r, dest_r.get(), is_less), ); } // Finally, `s` gets dropped if we used sequential merge, thus copying the remaining elements @@ -570,7 +569,7 @@ unsafe fn recurse<T, F>( // After recursive calls finish we'll have to merge chunks `(start, mid)` and `(mid, end)` from // `src` into `dest`. If the current invocation has to store the result into `buf`, we'll - // merge chunks from `v` into `buf`, and viceversa. + // merge chunks from `v` into `buf`, and vice versa. // // Recursive calls flip `into_buf` at each level of recursion. More concretely, `par_merge` // merges chunks from `buf` into `v` at the first level, from `v` into `buf` at the second @@ -593,8 +592,8 @@ unsafe fn recurse<T, F>( let v = SendPtr(v); let buf = SendPtr(buf); rayon_core::join( - || recurse(v.0, buf.0, left, !into_buf, is_less), - || recurse(v.0, buf.0, right, !into_buf, is_less), + move || recurse(v.get(), buf.get(), left, !into_buf, is_less), + move || recurse(v.get(), buf.get(), right, !into_buf, is_less), ); // Everything went all right - recursive calls didn't panic. @@ -661,16 +660,17 @@ where // Wrap pointer in SendPtr so that it can be sent to another thread // See the documentation of SendPtr for a full explanation let buf = SendPtr(buf); + let is_less = &is_less; v.par_chunks_mut(CHUNK_LENGTH) .with_max_len(1) .enumerate() - .map(|(i, chunk)| { + .map(move |(i, chunk)| { let l = CHUNK_LENGTH * i; let r = l + chunk.len(); unsafe { - let buf = buf.0.add(l); - (l, r, mergesort(chunk, buf, &is_less)) + let buf = buf.get().add(l); + (l, r, mergesort(chunk, buf, is_less)) } }) .collect::<Vec<_>>() @@ -731,7 +731,7 @@ mod tests { check(&[1, 2, 2, 2, 2, 3], &[]); check(&[], &[1, 2, 2, 2, 2, 3]); - let ref mut rng = thread_rng(); + let rng = &mut thread_rng(); for _ in 0..100 { let limit: u32 = rng.gen_range(1..21); diff --git a/vendor/rustc-rayon/src/slice/mod.rs b/vendor/rustc-rayon/src/slice/mod.rs index a4e777374..dab56deb3 100644 --- a/vendor/rustc-rayon/src/slice/mod.rs +++ b/vendor/rustc-rayon/src/slice/mod.rs @@ -85,6 +85,7 @@ pub trait ParallelSlice<T: Sync> { /// let chunks: Vec<_> = [1, 2, 3, 4, 5].par_chunks(2).collect(); /// assert_eq!(chunks, vec![&[1, 2][..], &[3, 4], &[5]]); /// ``` + #[track_caller] fn par_chunks(&self, chunk_size: usize) -> Chunks<'_, T> { assert!(chunk_size != 0, "chunk_size must not be zero"); Chunks::new(chunk_size, self.as_parallel_slice()) @@ -104,6 +105,7 @@ pub trait ParallelSlice<T: Sync> { /// let chunks: Vec<_> = [1, 2, 3, 4, 5].par_chunks_exact(2).collect(); /// assert_eq!(chunks, vec![&[1, 2][..], &[3, 4]]); /// ``` + #[track_caller] fn par_chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> { assert!(chunk_size != 0, "chunk_size must not be zero"); ChunksExact::new(chunk_size, self.as_parallel_slice()) @@ -123,6 +125,7 @@ pub trait ParallelSlice<T: Sync> { /// let chunks: Vec<_> = [1, 2, 3, 4, 5].par_rchunks(2).collect(); /// assert_eq!(chunks, vec![&[4, 5][..], &[2, 3], &[1]]); /// ``` + #[track_caller] fn par_rchunks(&self, chunk_size: usize) -> RChunks<'_, T> { assert!(chunk_size != 0, "chunk_size must not be zero"); RChunks::new(chunk_size, self.as_parallel_slice()) @@ -142,6 +145,7 @@ pub trait ParallelSlice<T: Sync> { /// let chunks: Vec<_> = [1, 2, 3, 4, 5].par_rchunks_exact(2).collect(); /// assert_eq!(chunks, vec![&[4, 5][..], &[2, 3]]); /// ``` + #[track_caller] fn par_rchunks_exact(&self, chunk_size: usize) -> RChunksExact<'_, T> { assert!(chunk_size != 0, "chunk_size must not be zero"); RChunksExact::new(chunk_size, self.as_parallel_slice()) @@ -199,6 +203,7 @@ pub trait ParallelSliceMut<T: Send> { /// .for_each(|slice| slice.reverse()); /// assert_eq!(array, [2, 1, 4, 3, 5]); /// ``` + #[track_caller] fn par_chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> { assert!(chunk_size != 0, "chunk_size must not be zero"); ChunksMut::new(chunk_size, self.as_parallel_slice_mut()) @@ -220,6 +225,7 @@ pub trait ParallelSliceMut<T: Send> { /// .for_each(|slice| slice.reverse()); /// assert_eq!(array, [3, 2, 1, 4, 5]); /// ``` + #[track_caller] fn par_chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> { assert!(chunk_size != 0, "chunk_size must not be zero"); ChunksExactMut::new(chunk_size, self.as_parallel_slice_mut()) @@ -241,6 +247,7 @@ pub trait ParallelSliceMut<T: Send> { /// .for_each(|slice| slice.reverse()); /// assert_eq!(array, [1, 3, 2, 5, 4]); /// ``` + #[track_caller] fn par_rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<'_, T> { assert!(chunk_size != 0, "chunk_size must not be zero"); RChunksMut::new(chunk_size, self.as_parallel_slice_mut()) @@ -262,6 +269,7 @@ pub trait ParallelSliceMut<T: Send> { /// .for_each(|slice| slice.reverse()); /// assert_eq!(array, [1, 2, 5, 4, 3]); /// ``` + #[track_caller] fn par_rchunks_exact_mut(&mut self, chunk_size: usize) -> RChunksExactMut<'_, T> { assert!(chunk_size != 0, "chunk_size must not be zero"); RChunksExactMut::new(chunk_size, self.as_parallel_slice_mut()) diff --git a/vendor/rustc-rayon/src/slice/quicksort.rs b/vendor/rustc-rayon/src/slice/quicksort.rs index 87fb724c3..2bfc35032 100644 --- a/vendor/rustc-rayon/src/slice/quicksort.rs +++ b/vendor/rustc-rayon/src/slice/quicksort.rs @@ -5,16 +5,34 @@ //! `rayon_core::join`. use std::cmp; +use std::marker::PhantomData; use std::mem::{self, MaybeUninit}; use std::ptr; /// When dropped, copies from `src` into `dest`. -struct CopyOnDrop<T> { +#[must_use] +struct CopyOnDrop<'a, T> { src: *const T, dest: *mut T, + /// `src` is often a local pointer here, make sure we have appropriate + /// PhantomData so that dropck can protect us. + marker: PhantomData<&'a mut T>, } -impl<T> Drop for CopyOnDrop<T> { +impl<'a, T> CopyOnDrop<'a, T> { + /// Construct from a source pointer and a destination + /// Assumes dest lives longer than src, since there is no easy way to + /// copy down lifetime information from another pointer + unsafe fn new(src: &'a T, dest: *mut T) -> Self { + CopyOnDrop { + src, + dest, + marker: PhantomData, + } + } +} + +impl<T> Drop for CopyOnDrop<'_, T> { fn drop(&mut self) { // SAFETY: This is a helper class. // Please refer to its usage for correctness. @@ -54,10 +72,7 @@ where // into the slice. let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(0))); let v = v.as_mut_ptr(); - let mut hole = CopyOnDrop { - src: &*tmp, - dest: v.add(1), - }; + let mut hole = CopyOnDrop::new(&*tmp, v.add(1)); ptr::copy_nonoverlapping(v.add(1), v.add(0), 1); for i in 2..len { @@ -103,10 +118,7 @@ where // into the slice. let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(len - 1))); let v = v.as_mut_ptr(); - let mut hole = CopyOnDrop { - src: &*tmp, - dest: v.add(len - 2), - }; + let mut hole = CopyOnDrop::new(&*tmp, v.add(len - 2)); ptr::copy_nonoverlapping(v.add(len - 2), v.add(len - 1), 1); for i in (0..len - 2).rev() { @@ -191,25 +203,25 @@ where // This binary heap respects the invariant `parent >= child`. let sift_down = |v: &mut [T], mut node| { loop { - // Children of `node`: - let left = 2 * node + 1; - let right = 2 * node + 2; + // Children of `node`. + let mut child = 2 * node + 1; + if child >= v.len() { + break; + } // Choose the greater child. - let greater = if right < v.len() && is_less(&v[left], &v[right]) { - right - } else { - left - }; + if child + 1 < v.len() && is_less(&v[child], &v[child + 1]) { + child += 1; + } // Stop if the invariant holds at `node`. - if greater >= v.len() || !is_less(&v[node], &v[greater]) { + if !is_less(&v[node], &v[child]) { break; } // Swap `node` with the greater child, move one step down, and continue sifting. - v.swap(node, greater); - node = greater; + v.swap(node, child); + node = child; } }; @@ -426,7 +438,7 @@ where // safe. Otherwise, the debug assertions in the `is_done` case guarantee that // `width(l, r) == block_l + block_r`, namely, that the block sizes have been adjusted to account // for the smaller number of remaining elements. - l = unsafe { l.offset(block_l as isize) }; + l = unsafe { l.add(block_l) }; } if start_r == end_r { @@ -510,10 +522,7 @@ where // SAFETY: `pivot` is a reference to the first element of `v`, so `ptr::read` is safe. let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); - let _pivot_guard = CopyOnDrop { - src: &*tmp, - dest: pivot, - }; + let _pivot_guard = unsafe { CopyOnDrop::new(&*tmp, pivot) }; let pivot = &*tmp; // Find the first pair of out-of-order elements. @@ -569,10 +578,7 @@ where // operation panics, the pivot will be automatically written back into the slice. // SAFETY: The pointer here is valid because it is obtained from a reference to a slice. let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); - let _pivot_guard = CopyOnDrop { - src: &*tmp, - dest: pivot, - }; + let _pivot_guard = unsafe { CopyOnDrop::new(&*tmp, pivot) }; let pivot = &*tmp; // Now partition the slice. @@ -629,8 +635,7 @@ fn break_patterns<T>(v: &mut [T]) { random }; let mut gen_usize = || { - // TODO 1.53: if usize::BITS <= 32 { - if mem::size_of::<usize>() <= 4 { + if usize::BITS <= 32 { gen_u32() as usize } else { (((gen_u32() as u64) << 32) | (gen_u32() as u64)) as usize @@ -676,6 +681,7 @@ where let len = v.len(); // Three indices near which we are going to choose a pivot. + #[allow(clippy::identity_op)] let mut a = len / 4 * 1; let mut b = len / 4 * 2; let mut c = len / 4 * 3; @@ -850,8 +856,7 @@ where } // Limit the number of imbalanced partitions to `floor(log2(len)) + 1`. - // TODO 1.53: let limit = usize::BITS - v.len().leading_zeros(); - let limit = mem::size_of::<usize>() as u32 * 8 - v.len().leading_zeros(); + let limit = usize::BITS - v.len().leading_zeros(); recurse(v, &is_less, None, limit); } @@ -864,7 +869,7 @@ mod tests { #[test] fn test_heapsort() { - let ref mut rng = thread_rng(); + let rng = &mut thread_rng(); for len in (0..25).chain(500..501) { for &modulus in &[5, 10, 100] { @@ -891,8 +896,8 @@ mod tests { heapsort(&mut v, &|_, _| thread_rng().gen()); heapsort(&mut v, &|a, b| a < b); - for i in 0..v.len() { - assert_eq!(v[i], i); + for (i, &entry) in v.iter().enumerate() { + assert_eq!(entry, i); } } } diff --git a/vendor/rustc-rayon/src/slice/rchunks.rs b/vendor/rustc-rayon/src/slice/rchunks.rs index 4ff712548..63e765d77 100644 --- a/vendor/rustc-rayon/src/slice/rchunks.rs +++ b/vendor/rustc-rayon/src/slice/rchunks.rs @@ -314,7 +314,7 @@ impl<'data, T: Send> RChunksExactMut<'data, T> { /// returned by the iterator. The returned slice has at most `chunk_size-1` /// elements. Subsequent calls will return an empty slice. pub fn take_remainder(&mut self) -> &'data mut [T] { - std::mem::replace(&mut self.rem, &mut []) + std::mem::take(&mut self.rem) } } diff --git a/vendor/rustc-rayon/src/slice/test.rs b/vendor/rustc-rayon/src/slice/test.rs index 4325aa88d..f74ca0f74 100644 --- a/vendor/rustc-rayon/src/slice/test.rs +++ b/vendor/rustc-rayon/src/slice/test.rs @@ -10,7 +10,7 @@ macro_rules! sort { ($f:ident, $name:ident) => { #[test] fn $name() { - let ref mut rng = thread_rng(); + let rng = &mut thread_rng(); for len in (0..25).chain(500..501) { for &modulus in &[5, 10, 100] { |