summaryrefslogtreecommitdiffstats
path: root/library/core/src/slice
diff options
context:
space:
mode:
Diffstat (limited to 'library/core/src/slice')
-rw-r--r--library/core/src/slice/ascii.rs2
-rw-r--r--library/core/src/slice/index.rs20
-rw-r--r--library/core/src/slice/iter.rs8
-rw-r--r--library/core/src/slice/iter/macros.rs18
-rw-r--r--library/core/src/slice/memchr.rs37
-rw-r--r--library/core/src/slice/mod.rs53
-rw-r--r--library/core/src/slice/raw.rs4
-rw-r--r--library/core/src/slice/sort.rs36
8 files changed, 109 insertions, 69 deletions
diff --git a/library/core/src/slice/ascii.rs b/library/core/src/slice/ascii.rs
index 63715a6b8..5e5399acc 100644
--- a/library/core/src/slice/ascii.rs
+++ b/library/core/src/slice/ascii.rs
@@ -215,8 +215,6 @@ impl<'a> iter::DoubleEndedIterator for EscapeAscii<'a> {
}
}
#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
-impl<'a> iter::ExactSizeIterator for EscapeAscii<'a> {}
-#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
impl<'a> iter::FusedIterator for EscapeAscii<'a> {}
#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
impl<'a> fmt::Display for EscapeAscii<'a> {
diff --git a/library/core/src/slice/index.rs b/library/core/src/slice/index.rs
index fd7ecf3da..3403a5a86 100644
--- a/library/core/src/slice/index.rs
+++ b/library/core/src/slice/index.rs
@@ -48,10 +48,12 @@ const fn slice_start_index_len_fail(index: usize, len: usize) -> ! {
}
// FIXME const-hack
+#[track_caller]
fn slice_start_index_len_fail_rt(index: usize, len: usize) -> ! {
panic!("range start index {index} out of range for slice of length {len}");
}
+#[track_caller]
const fn slice_start_index_len_fail_ct(_: usize, _: usize) -> ! {
panic!("slice start index is out of range for slice");
}
@@ -69,10 +71,12 @@ const fn slice_end_index_len_fail(index: usize, len: usize) -> ! {
}
// FIXME const-hack
+#[track_caller]
fn slice_end_index_len_fail_rt(index: usize, len: usize) -> ! {
panic!("range end index {index} out of range for slice of length {len}");
}
+#[track_caller]
const fn slice_end_index_len_fail_ct(_: usize, _: usize) -> ! {
panic!("slice end index is out of range for slice");
}
@@ -88,10 +92,12 @@ const fn slice_index_order_fail(index: usize, end: usize) -> ! {
}
// FIXME const-hack
+#[track_caller]
fn slice_index_order_fail_rt(index: usize, end: usize) -> ! {
panic!("slice index starts at {index} but ends at {end}");
}
+#[track_caller]
const fn slice_index_order_fail_ct(_: usize, _: usize) -> ! {
panic!("slice index start is larger than end");
}
@@ -217,21 +223,23 @@ unsafe impl<T> const SliceIndex<[T]> for usize {
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const T {
+ let this = self;
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
unsafe {
- assert_unsafe_precondition!(self < slice.len());
+ assert_unsafe_precondition!([T](this: usize, slice: *const [T]) => this < slice.len());
slice.as_ptr().add(self)
}
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut T {
+ let this = self;
// SAFETY: see comments for `get_unchecked` above.
unsafe {
- assert_unsafe_precondition!(self < slice.len());
+ assert_unsafe_precondition!([T](this: usize, slice: *mut [T]) => this < slice.len());
slice.as_mut_ptr().add(self)
}
}
@@ -276,22 +284,26 @@ unsafe impl<T> const SliceIndex<[T]> for ops::Range<usize> {
#[inline]
unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
+ let this = ops::Range { start: self.start, end: self.end };
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
unsafe {
- assert_unsafe_precondition!(self.end >= self.start && self.end <= slice.len());
+ assert_unsafe_precondition!([T](this: ops::Range<usize>, slice: *const [T]) =>
+ this.end >= this.start && this.end <= slice.len());
ptr::slice_from_raw_parts(slice.as_ptr().add(self.start), self.end - self.start)
}
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
+ let this = ops::Range { start: self.start, end: self.end };
// SAFETY: see comments for `get_unchecked` above.
unsafe {
- assert_unsafe_precondition!(self.end >= self.start && self.end <= slice.len());
+ assert_unsafe_precondition!([T](this: ops::Range<usize>, slice: *mut [T]) =>
+ this.end >= this.start && this.end <= slice.len());
ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start), self.end - self.start)
}
}
diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs
index f1e659309..395c56784 100644
--- a/library/core/src/slice/iter.rs
+++ b/library/core/src/slice/iter.rs
@@ -92,7 +92,7 @@ impl<'a, T> Iter<'a, T> {
assume(!ptr.is_null());
let end = if mem::size_of::<T>() == 0 {
- (ptr as *const u8).wrapping_add(slice.len()) as *const T
+ ptr.wrapping_byte_add(slice.len())
} else {
ptr.add(slice.len())
};
@@ -228,7 +228,7 @@ impl<'a, T> IterMut<'a, T> {
assume(!ptr.is_null());
let end = if mem::size_of::<T>() == 0 {
- (ptr as *mut u8).wrapping_add(slice.len()) as *mut T
+ ptr.wrapping_byte_add(slice.len())
} else {
ptr.add(slice.len())
};
@@ -2754,10 +2754,10 @@ impl<'a, T> Iterator for RChunksMut<'a, T> {
None => 0,
};
// SAFETY: This type ensures that self.v is a valid pointer with a correct len.
- // Therefore the bounds check in split_at_mut guarantess the split point is inbounds.
+ // Therefore the bounds check in split_at_mut guarantees the split point is inbounds.
let (head, tail) = unsafe { self.v.split_at_mut(start) };
// SAFETY: This type ensures that self.v is a valid pointer with a correct len.
- // Therefore the bounds check in split_at_mut guarantess the split point is inbounds.
+ // Therefore the bounds check in split_at_mut guarantees the split point is inbounds.
let (nth, _) = unsafe { tail.split_at_mut(end - start) };
self.v = head;
// SAFETY: Nothing else points to or will point to the contents of this slice.
diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs
index c05242222..6c9e7574e 100644
--- a/library/core/src/slice/iter/macros.rs
+++ b/library/core/src/slice/iter/macros.rs
@@ -64,7 +64,7 @@ macro_rules! iterator {
// backwards by `n`. `n` must not exceed `self.len()`.
macro_rules! zst_shrink {
($self: ident, $n: ident) => {
- $self.end = ($self.end as * $raw_mut u8).wrapping_offset(-$n) as * $raw_mut T;
+ $self.end = $self.end.wrapping_byte_sub($n);
}
}
@@ -82,7 +82,7 @@ macro_rules! iterator {
// returning the old start.
// Unsafe because the offset must not exceed `self.len()`.
#[inline(always)]
- unsafe fn post_inc_start(&mut self, offset: isize) -> * $raw_mut T {
+ unsafe fn post_inc_start(&mut self, offset: usize) -> * $raw_mut T {
if mem::size_of::<T>() == 0 {
zst_shrink!(self, offset);
self.ptr.as_ptr()
@@ -90,7 +90,7 @@ macro_rules! iterator {
let old = self.ptr.as_ptr();
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
// so this new pointer is inside `self` and thus guaranteed to be non-null.
- self.ptr = unsafe { NonNull::new_unchecked(self.ptr.as_ptr().offset(offset)) };
+ self.ptr = unsafe { NonNull::new_unchecked(self.ptr.as_ptr().add(offset)) };
old
}
}
@@ -99,7 +99,7 @@ macro_rules! iterator {
// returning the new end.
// Unsafe because the offset must not exceed `self.len()`.
#[inline(always)]
- unsafe fn pre_dec_end(&mut self, offset: isize) -> * $raw_mut T {
+ unsafe fn pre_dec_end(&mut self, offset: usize) -> * $raw_mut T {
if mem::size_of::<T>() == 0 {
zst_shrink!(self, offset);
self.ptr.as_ptr()
@@ -107,7 +107,7 @@ macro_rules! iterator {
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
// which is guaranteed to not overflow an `isize`. Also, the resulting pointer
// is in bounds of `slice`, which fulfills the other requirements for `offset`.
- self.end = unsafe { self.end.offset(-offset) };
+ self.end = unsafe { self.end.sub(offset) };
self.end
}
}
@@ -180,7 +180,7 @@ macro_rules! iterator {
}
// SAFETY: We are in bounds. `post_inc_start` does the right thing even for ZSTs.
unsafe {
- self.post_inc_start(n as isize);
+ self.post_inc_start(n);
Some(next_unchecked!(self))
}
}
@@ -189,7 +189,7 @@ macro_rules! iterator {
fn advance_by(&mut self, n: usize) -> Result<(), usize> {
let advance = cmp::min(len!(self), n);
// SAFETY: By construction, `advance` does not exceed `self.len()`.
- unsafe { self.post_inc_start(advance as isize) };
+ unsafe { self.post_inc_start(advance) };
if advance == n { Ok(()) } else { Err(advance) }
}
@@ -375,7 +375,7 @@ macro_rules! iterator {
}
// SAFETY: We are in bounds. `pre_dec_end` does the right thing even for ZSTs.
unsafe {
- self.pre_dec_end(n as isize);
+ self.pre_dec_end(n);
Some(next_back_unchecked!(self))
}
}
@@ -384,7 +384,7 @@ macro_rules! iterator {
fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
let advance = cmp::min(len!(self), n);
// SAFETY: By construction, `advance` does not exceed `self.len()`.
- unsafe { self.pre_dec_end(advance as isize) };
+ unsafe { self.pre_dec_end(advance) };
if advance == n { Ok(()) } else { Err(advance) }
}
}
diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs
index dffeaf6a8..7de1f48e6 100644
--- a/library/core/src/slice/memchr.rs
+++ b/library/core/src/slice/memchr.rs
@@ -16,35 +16,51 @@ const USIZE_BYTES: usize = mem::size_of::<usize>();
/// bytes where the borrow propagated all the way to the most significant
/// bit."
#[inline]
-fn contains_zero_byte(x: usize) -> bool {
+const fn contains_zero_byte(x: usize) -> bool {
x.wrapping_sub(LO_USIZE) & !x & HI_USIZE != 0
}
#[cfg(target_pointer_width = "16")]
#[inline]
-fn repeat_byte(b: u8) -> usize {
+const fn repeat_byte(b: u8) -> usize {
(b as usize) << 8 | b as usize
}
#[cfg(not(target_pointer_width = "16"))]
#[inline]
-fn repeat_byte(b: u8) -> usize {
+const fn repeat_byte(b: u8) -> usize {
(b as usize) * (usize::MAX / 255)
}
/// Returns the first index matching the byte `x` in `text`.
#[must_use]
#[inline]
-pub fn memchr(x: u8, text: &[u8]) -> Option<usize> {
- // Fast path for small slices
+pub const fn memchr(x: u8, text: &[u8]) -> Option<usize> {
+ // Fast path for small slices.
if text.len() < 2 * USIZE_BYTES {
- return text.iter().position(|elt| *elt == x);
+ return memchr_naive(x, text);
}
- memchr_general_case(x, text)
+ memchr_aligned(x, text)
}
-fn memchr_general_case(x: u8, text: &[u8]) -> Option<usize> {
+#[inline]
+const fn memchr_naive(x: u8, text: &[u8]) -> Option<usize> {
+ let mut i = 0;
+
+ // FIXME(const-hack): Replace with `text.iter().pos(|c| *c == x)`.
+ while i < text.len() {
+ if text[i] == x {
+ return Some(i);
+ }
+
+ i += 1;
+ }
+
+ None
+}
+
+const fn memchr_aligned(x: u8, text: &[u8]) -> Option<usize> {
// Scan for a single byte value by reading two `usize` words at a time.
//
// Split `text` in three parts
@@ -59,7 +75,7 @@ fn memchr_general_case(x: u8, text: &[u8]) -> Option<usize> {
if offset > 0 {
offset = cmp::min(offset, len);
- if let Some(index) = text[..offset].iter().position(|elt| *elt == x) {
+ if let Some(index) = memchr_naive(x, &text[..offset]) {
return Some(index);
}
}
@@ -84,7 +100,8 @@ fn memchr_general_case(x: u8, text: &[u8]) -> Option<usize> {
}
// Find the byte after the point the body loop stopped.
- text[offset..].iter().position(|elt| *elt == x).map(|i| offset + i)
+ // FIXME(const-hack): Use `?` instead.
+ if let Some(i) = memchr_naive(x, &text[offset..]) { Some(offset + i) } else { None }
}
/// Returns the last index matching the byte `x` in `text`.
diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs
index e6ca6ef82..6a7150d29 100644
--- a/library/core/src/slice/mod.rs
+++ b/library/core/src/slice/mod.rs
@@ -656,10 +656,11 @@ impl<T> [T] {
#[unstable(feature = "slice_swap_unchecked", issue = "88539")]
#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
pub const unsafe fn swap_unchecked(&mut self, a: usize, b: usize) {
- let ptr = self.as_mut_ptr();
+ let this = self;
+ let ptr = this.as_mut_ptr();
// SAFETY: caller has to guarantee that `a < self.len()` and `b < self.len()`
unsafe {
- assert_unsafe_precondition!(a < self.len() && b < self.len());
+ assert_unsafe_precondition!([T](a: usize, b: usize, this: &mut [T]) => a < this.len() && b < this.len());
ptr::swap(ptr.add(a), ptr.add(b));
}
}
@@ -674,8 +675,9 @@ impl<T> [T] {
/// assert!(v == [3, 2, 1]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_reverse", issue = "100784")]
#[inline]
- pub fn reverse(&mut self) {
+ pub const fn reverse(&mut self) {
let half_len = self.len() / 2;
let Range { start, end } = self.as_mut_ptr_range();
@@ -698,9 +700,9 @@ impl<T> [T] {
revswap(front_half, back_half, half_len);
#[inline]
- fn revswap<T>(a: &mut [T], b: &mut [T], n: usize) {
- debug_assert_eq!(a.len(), n);
- debug_assert_eq!(b.len(), n);
+ const fn revswap<T>(a: &mut [T], b: &mut [T], n: usize) {
+ debug_assert!(a.len() == n);
+ debug_assert!(b.len() == n);
// Because this function is first compiled in isolation,
// this check tells LLVM that the indexing below is
@@ -708,8 +710,10 @@ impl<T> [T] {
// lengths of the slices are known -- it's removed.
let (a, b) = (&mut a[..n], &mut b[..n]);
- for i in 0..n {
+ let mut i = 0;
+ while i < n {
mem::swap(&mut a[i], &mut b[n - 1 - i]);
+ i += 1;
}
}
}
@@ -969,9 +973,10 @@ impl<T> [T] {
#[inline]
#[must_use]
pub unsafe fn as_chunks_unchecked<const N: usize>(&self) -> &[[T; N]] {
+ let this = self;
// SAFETY: Caller must guarantee that `N` is nonzero and exactly divides the slice length
let new_len = unsafe {
- assert_unsafe_precondition!(N != 0 && self.len() % N == 0);
+ assert_unsafe_precondition!([T](this: &[T], N: usize) => N != 0 && this.len() % N == 0);
exact_div(self.len(), N)
};
// SAFETY: We cast a slice of `new_len * N` elements into
@@ -1108,10 +1113,11 @@ impl<T> [T] {
#[inline]
#[must_use]
pub unsafe fn as_chunks_unchecked_mut<const N: usize>(&mut self) -> &mut [[T; N]] {
+ let this = &*self;
// SAFETY: Caller must guarantee that `N` is nonzero and exactly divides the slice length
let new_len = unsafe {
- assert_unsafe_precondition!(N != 0 && self.len() % N == 0);
- exact_div(self.len(), N)
+ assert_unsafe_precondition!([T](this: &[T], N: usize) => N != 0 && this.len() % N == 0);
+ exact_div(this.len(), N)
};
// SAFETY: We cast a slice of `new_len * N` elements into
// a slice of `new_len` many `N` elements chunks.
@@ -1538,13 +1544,14 @@ impl<T> [T] {
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_slice_split_at_not_mut", issue = "101158")]
#[inline]
#[track_caller]
#[must_use]
- pub fn split_at(&self, mid: usize) -> (&[T], &[T]) {
+ pub const fn split_at(&self, mid: usize) -> (&[T], &[T]) {
assert!(mid <= self.len());
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
- // fulfills the requirements of `from_raw_parts_mut`.
+ // fulfills the requirements of `split_at_unchecked`.
unsafe { self.split_at_unchecked(mid) }
}
@@ -1623,11 +1630,19 @@ impl<T> [T] {
/// }
/// ```
#[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
+ #[rustc_const_unstable(feature = "slice_split_at_unchecked", issue = "76014")]
#[inline]
#[must_use]
- pub unsafe fn split_at_unchecked(&self, mid: usize) -> (&[T], &[T]) {
+ pub const unsafe fn split_at_unchecked(&self, mid: usize) -> (&[T], &[T]) {
+ // HACK: the const function `from_raw_parts` is used to make this
+ // function const; previously the implementation used
+ // `(self.get_unchecked(..mid), self.get_unchecked(mid..))`
+
+ let len = self.len();
+ let ptr = self.as_ptr();
+
// SAFETY: Caller has to check that `0 <= mid <= self.len()`
- unsafe { (self.get_unchecked(..mid), self.get_unchecked(mid..)) }
+ unsafe { (from_raw_parts(ptr, mid), from_raw_parts(ptr.add(mid), len - mid)) }
}
/// Divides one mutable slice into two at an index, without doing bounds checking.
@@ -1675,7 +1690,7 @@ impl<T> [T] {
// `[ptr; mid]` and `[mid; len]` are not overlapping, so returning a mutable reference
// is fine.
unsafe {
- assert_unsafe_precondition!(mid <= len);
+ assert_unsafe_precondition!((mid: usize, len: usize) => mid <= len);
(from_raw_parts_mut(ptr, mid), from_raw_parts_mut(ptr.add(mid), len - mid))
}
}
@@ -2309,7 +2324,7 @@ impl<T> [T] {
}
/// Binary searches this slice for a given element.
- /// This behaves similary to [`contains`] if this slice is sorted.
+ /// This behaves similarly to [`contains`] if this slice is sorted.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
@@ -2921,7 +2936,7 @@ impl<T> [T] {
let prev_ptr_write = ptr.add(next_write - 1);
if !same_bucket(&mut *ptr_read, &mut *prev_ptr_write) {
if next_read != next_write {
- let ptr_write = prev_ptr_write.offset(1);
+ let ptr_write = prev_ptr_write.add(1);
mem::swap(&mut *ptr_read, &mut *ptr_write);
}
next_write += 1;
@@ -3518,7 +3533,7 @@ impl<T> [T] {
// alignment targeted for U.
// `crate::ptr::align_offset` is called with a correctly aligned and
// valid pointer `ptr` (it comes from a reference to `self`) and with
- // a size that is a power of two (since it comes from the alignement for U),
+ // a size that is a power of two (since it comes from the alignment for U),
// satisfying its safety constraints.
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
if offset > self.len() {
@@ -4101,7 +4116,6 @@ impl<T, const N: usize> [[T; N]] {
}
}
-#[cfg(not(bootstrap))]
#[cfg(not(test))]
impl [f32] {
/// Sorts the slice of floats.
@@ -4131,7 +4145,6 @@ impl [f32] {
}
}
-#[cfg(not(bootstrap))]
#[cfg(not(test))]
impl [f64] {
/// Sorts the slice of floats.
diff --git a/library/core/src/slice/raw.rs b/library/core/src/slice/raw.rs
index 107e71ab6..f1e8bc79b 100644
--- a/library/core/src/slice/raw.rs
+++ b/library/core/src/slice/raw.rs
@@ -90,7 +90,7 @@ use crate::ptr;
pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] {
// SAFETY: the caller must uphold the safety contract for `from_raw_parts`.
unsafe {
- assert_unsafe_precondition!(
+ assert_unsafe_precondition!([T](data: *const T, len: usize) =>
is_aligned_and_not_null(data)
&& crate::mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize
);
@@ -134,7 +134,7 @@ pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T]
pub const unsafe fn from_raw_parts_mut<'a, T>(data: *mut T, len: usize) -> &'a mut [T] {
// SAFETY: the caller must uphold the safety contract for `from_raw_parts_mut`.
unsafe {
- assert_unsafe_precondition!(
+ assert_unsafe_precondition!([T](data: *mut T, len: usize) =>
is_aligned_and_not_null(data)
&& crate::mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize
);
diff --git a/library/core/src/slice/sort.rs b/library/core/src/slice/sort.rs
index 6a201834b..c6c03c0b0 100644
--- a/library/core/src/slice/sort.rs
+++ b/library/core/src/slice/sort.rs
@@ -326,8 +326,8 @@ where
unsafe {
// Branchless comparison.
*end_l = i as u8;
- end_l = end_l.offset(!is_less(&*elem, pivot) as isize);
- elem = elem.offset(1);
+ end_l = end_l.add(!is_less(&*elem, pivot) as usize);
+ elem = elem.add(1);
}
}
}
@@ -352,9 +352,9 @@ where
// Plus, `block_r` was asserted to be less than `BLOCK` and `elem` will therefore at most be pointing to the beginning of the slice.
unsafe {
// Branchless comparison.
- elem = elem.offset(-1);
+ elem = elem.sub(1);
*end_r = i as u8;
- end_r = end_r.offset(is_less(&*elem, pivot) as isize);
+ end_r = end_r.add(is_less(&*elem, pivot) as usize);
}
}
}
@@ -365,12 +365,12 @@ where
if count > 0 {
macro_rules! left {
() => {
- l.offset(*start_l as isize)
+ l.add(usize::from(*start_l))
};
}
macro_rules! right {
() => {
- r.offset(-(*start_r as isize) - 1)
+ r.sub(usize::from(*start_r) + 1)
};
}
@@ -398,16 +398,16 @@ where
ptr::copy_nonoverlapping(right!(), left!(), 1);
for _ in 1..count {
- start_l = start_l.offset(1);
+ start_l = start_l.add(1);
ptr::copy_nonoverlapping(left!(), right!(), 1);
- start_r = start_r.offset(1);
+ start_r = start_r.add(1);
ptr::copy_nonoverlapping(right!(), left!(), 1);
}
ptr::copy_nonoverlapping(&tmp, right!(), 1);
mem::forget(tmp);
- start_l = start_l.offset(1);
- start_r = start_r.offset(1);
+ start_l = start_l.add(1);
+ start_r = start_r.add(1);
}
}
@@ -420,7 +420,7 @@ where
// safe. Otherwise, the debug assertions in the `is_done` case guarantee that
// `width(l, r) == block_l + block_r`, namely, that the block sizes have been adjusted to account
// for the smaller number of remaining elements.
- l = unsafe { l.offset(block_l as isize) };
+ l = unsafe { l.add(block_l) };
}
if start_r == end_r {
@@ -428,7 +428,7 @@ where
// SAFETY: Same argument as [block-width-guarantee]. Either this is a full block `2*BLOCK`-wide,
// or `block_r` has been adjusted for the last handful of elements.
- r = unsafe { r.offset(-(block_r as isize)) };
+ r = unsafe { r.sub(block_r) };
}
if is_done {
@@ -457,9 +457,9 @@ where
// - `offsets_l` contains valid offsets into `v` collected during the partitioning of
// the last block, so the `l.offset` calls are valid.
unsafe {
- end_l = end_l.offset(-1);
- ptr::swap(l.offset(*end_l as isize), r.offset(-1));
- r = r.offset(-1);
+ end_l = end_l.sub(1);
+ ptr::swap(l.add(usize::from(*end_l)), r.sub(1));
+ r = r.sub(1);
}
}
width(v.as_mut_ptr(), r)
@@ -470,9 +470,9 @@ where
while start_r < end_r {
// SAFETY: See the reasoning in [remaining-elements-safety].
unsafe {
- end_r = end_r.offset(-1);
- ptr::swap(l, r.offset(-(*end_r as isize) - 1));
- l = l.offset(1);
+ end_r = end_r.sub(1);
+ ptr::swap(l, r.sub(usize::from(*end_r) + 1));
+ l = l.add(1);
}
}
width(v.as_mut_ptr(), l)