diff options
Diffstat (limited to 'library')
312 files changed, 12053 insertions, 8870 deletions
diff --git a/library/alloc/benches/vec_deque.rs b/library/alloc/benches/vec_deque.rs index 7c78561eb..313a97ed1 100644 --- a/library/alloc/benches/vec_deque.rs +++ b/library/alloc/benches/vec_deque.rs @@ -1,4 +1,8 @@ -use std::collections::VecDeque; +use core::iter::Iterator; +use std::{ + collections::{vec_deque, VecDeque}, + mem, +}; use test::{black_box, Bencher}; #[bench] @@ -53,6 +57,146 @@ fn bench_try_fold(b: &mut Bencher) { b.iter(|| black_box(ring.iter().try_fold(0, |a, b| Some(a + b)))) } +/// does the memory bookkeeping to reuse the buffer of the Vec between iterations. +/// `setup` must not modify its argument's length or capacity. `g` must not move out of its argument. +fn into_iter_helper< + T: Copy, + F: FnOnce(&mut VecDeque<T>), + G: FnOnce(&mut vec_deque::IntoIter<T>), +>( + v: &mut Vec<T>, + setup: F, + g: G, +) { + let ptr = v.as_mut_ptr(); + let len = v.len(); + // ensure that the vec is full, to make sure that any wrapping from the deque doesn't + // access uninitialized memory. + assert_eq!(v.len(), v.capacity()); + + let mut deque = VecDeque::from(mem::take(v)); + setup(&mut deque); + + let mut it = deque.into_iter(); + g(&mut it); + + mem::forget(it); + + // SAFETY: the provided functions are not allowed to modify the allocation, so the buffer is still alive. + // len and capacity are accurate due to the above assertion. + // All the elements in the buffer are still valid, because of `T: Copy` which implies `T: !Drop`. + mem::forget(mem::replace(v, unsafe { Vec::from_raw_parts(ptr, len, len) })); +} + +#[bench] +fn bench_into_iter(b: &mut Bencher) { + let len = 1024; + // we reuse this allocation for every run + let mut vec: Vec<usize> = (0..len).collect(); + vec.shrink_to_fit(); + + b.iter(|| { + let mut sum = 0; + into_iter_helper( + &mut vec, + |_| {}, + |it| { + for i in it { + sum += i; + } + }, + ); + black_box(sum); + + let mut sum = 0; + // rotating a full deque doesn't move any memory. + into_iter_helper( + &mut vec, + |d| d.rotate_left(len / 2), + |it| { + for i in it { + sum += i; + } + }, + ); + black_box(sum); + }); +} + +#[bench] +fn bench_into_iter_fold(b: &mut Bencher) { + let len = 1024; + + // because `fold` takes ownership of the iterator, + // we can't prevent it from dropping the memory, + // so we have to bite the bullet and reallocate + // for every iteration. + b.iter(|| { + let deque: VecDeque<usize> = (0..len).collect(); + assert_eq!(deque.len(), deque.capacity()); + let sum = deque.into_iter().fold(0, |a, b| a + b); + black_box(sum); + + // rotating a full deque doesn't move any memory. + let mut deque: VecDeque<usize> = (0..len).collect(); + assert_eq!(deque.len(), deque.capacity()); + deque.rotate_left(len / 2); + let sum = deque.into_iter().fold(0, |a, b| a + b); + black_box(sum); + }); +} + +#[bench] +fn bench_into_iter_try_fold(b: &mut Bencher) { + let len = 1024; + // we reuse this allocation for every run + let mut vec: Vec<usize> = (0..len).collect(); + vec.shrink_to_fit(); + + // Iterator::any uses Iterator::try_fold under the hood + b.iter(|| { + let mut b = false; + into_iter_helper(&mut vec, |_| {}, |it| b = it.any(|i| i == len - 1)); + black_box(b); + + into_iter_helper(&mut vec, |d| d.rotate_left(len / 2), |it| b = it.any(|i| i == len - 1)); + black_box(b); + }); +} + +#[bench] +fn bench_into_iter_next_chunk(b: &mut Bencher) { + let len = 1024; + // we reuse this allocation for every run + let mut vec: Vec<usize> = (0..len).collect(); + vec.shrink_to_fit(); + + b.iter(|| { + let mut buf = [0; 64]; + into_iter_helper( + &mut vec, + |_| {}, + |it| { + while let Ok(a) = it.next_chunk() { + buf = a; + } + }, + ); + black_box(buf); + + into_iter_helper( + &mut vec, + |d| d.rotate_left(len / 2), + |it| { + while let Ok(a) = it.next_chunk() { + buf = a; + } + }, + ); + black_box(buf); + }); +} + #[bench] fn bench_from_array_1000(b: &mut Bencher) { const N: usize = 1000; diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs index a563b2587..44a378990 100644 --- a/library/alloc/src/boxed.rs +++ b/library/alloc/src/boxed.rs @@ -283,9 +283,7 @@ impl<T> Box<T> { #[must_use] #[inline(always)] pub fn pin(x: T) -> Pin<Box<T>> { - (#[rustc_box] - Box::new(x)) - .into() + Box::new(x).into() } /// Allocates memory on the heap then places `x` into it, @@ -1242,8 +1240,8 @@ unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Box<T, A> { #[stable(feature = "rust1", since = "1.0.0")] impl<T: Default> Default for Box<T> { /// Creates a `Box<T>`, with the `Default` value for T. + #[inline] fn default() -> Self { - #[rustc_box] Box::new(T::default()) } } @@ -1252,6 +1250,7 @@ impl<T: Default> Default for Box<T> { #[stable(feature = "rust1", since = "1.0.0")] #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")] impl<T> const Default for Box<[T]> { + #[inline] fn default() -> Self { let ptr: Unique<[T]> = Unique::<[T; 0]>::dangling(); Box(ptr, Global) @@ -1262,6 +1261,7 @@ impl<T> const Default for Box<[T]> { #[stable(feature = "default_box_extra", since = "1.17.0")] #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")] impl const Default for Box<str> { + #[inline] fn default() -> Self { // SAFETY: This is the same as `Unique::cast<U>` but with an unsized `U = str`. let ptr: Unique<str> = unsafe { @@ -1616,7 +1616,6 @@ impl<T, const N: usize> From<[T; N]> for Box<[T]> { /// println!("{boxed:?}"); /// ``` fn from(array: [T; N]) -> Box<[T]> { - #[rustc_box] Box::new(array) } } diff --git a/library/alloc/src/boxed/thin.rs b/library/alloc/src/boxed/thin.rs index c1a82e452..ad48315fd 100644 --- a/library/alloc/src/boxed/thin.rs +++ b/library/alloc/src/boxed/thin.rs @@ -48,7 +48,7 @@ unsafe impl<T: ?Sized + Sync> Sync for ThinBox<T> {} #[unstable(feature = "thin_box", issue = "92791")] impl<T> ThinBox<T> { - /// Moves a type to the heap with its `Metadata` stored in the heap allocation instead of on + /// Moves a type to the heap with its [`Metadata`] stored in the heap allocation instead of on /// the stack. /// /// # Examples @@ -59,6 +59,8 @@ impl<T> ThinBox<T> { /// /// let five = ThinBox::new(5); /// ``` + /// + /// [`Metadata`]: core::ptr::Pointee::Metadata #[cfg(not(no_global_oom_handling))] pub fn new(value: T) -> Self { let meta = ptr::metadata(&value); @@ -69,7 +71,7 @@ impl<T> ThinBox<T> { #[unstable(feature = "thin_box", issue = "92791")] impl<Dyn: ?Sized> ThinBox<Dyn> { - /// Moves a type to the heap with its `Metadata` stored in the heap allocation instead of on + /// Moves a type to the heap with its [`Metadata`] stored in the heap allocation instead of on /// the stack. /// /// # Examples @@ -80,6 +82,8 @@ impl<Dyn: ?Sized> ThinBox<Dyn> { /// /// let thin_slice = ThinBox::<[i32]>::new_unsize([1, 2, 3, 4]); /// ``` + /// + /// [`Metadata`]: core::ptr::Pointee::Metadata #[cfg(not(no_global_oom_handling))] pub fn new_unsize<T>(value: T) -> Self where diff --git a/library/alloc/src/collections/binary_heap/mod.rs b/library/alloc/src/collections/binary_heap/mod.rs index 0b73b1af4..f1d0a305d 100644 --- a/library/alloc/src/collections/binary_heap/mod.rs +++ b/library/alloc/src/collections/binary_heap/mod.rs @@ -851,18 +851,30 @@ impl<T: Ord> BinaryHeap<T> { where F: FnMut(&T) -> bool, { - let mut first_removed = self.len(); + struct RebuildOnDrop<'a, T: Ord> { + heap: &'a mut BinaryHeap<T>, + first_removed: usize, + } + + let mut guard = RebuildOnDrop { first_removed: self.len(), heap: self }; + let mut i = 0; - self.data.retain(|e| { + guard.heap.data.retain(|e| { let keep = f(e); - if !keep && i < first_removed { - first_removed = i; + if !keep && i < guard.first_removed { + guard.first_removed = i; } i += 1; keep }); - // data[0..first_removed] is untouched, so we only need to rebuild the tail: - self.rebuild_tail(first_removed); + + impl<'a, T: Ord> Drop for RebuildOnDrop<'a, T> { + fn drop(&mut self) { + // data[..first_removed] is untouched, so we only need to + // rebuild the tail: + self.heap.rebuild_tail(self.first_removed); + } + } } } diff --git a/library/alloc/src/collections/binary_heap/tests.rs b/library/alloc/src/collections/binary_heap/tests.rs index ffbb6c80a..500caa356 100644 --- a/library/alloc/src/collections/binary_heap/tests.rs +++ b/library/alloc/src/collections/binary_heap/tests.rs @@ -474,6 +474,25 @@ fn test_retain() { assert!(a.is_empty()); } +#[test] +fn test_retain_catch_unwind() { + let mut heap = BinaryHeap::from(vec![3, 1, 2]); + + // Removes the 3, then unwinds out of retain. + let _ = catch_unwind(AssertUnwindSafe(|| { + heap.retain(|e| { + if *e == 1 { + panic!(); + } + false + }); + })); + + // Naively this would be [1, 2] (an invalid heap) if BinaryHeap delegates to + // Vec's retain impl and then does not rebuild the heap after that unwinds. + assert_eq!(heap.into_vec(), [2, 1]); +} + // old binaryheap failed this test // // Integrity means that all elements are present after a comparison panics, diff --git a/library/alloc/src/collections/btree/borrow.rs b/library/alloc/src/collections/btree/borrow.rs index 016f139a5..000b9bd0f 100644 --- a/library/alloc/src/collections/btree/borrow.rs +++ b/library/alloc/src/collections/btree/borrow.rs @@ -41,6 +41,28 @@ impl<'a, T> DormantMutRef<'a, T> { // SAFETY: our own safety conditions imply this reference is again unique. unsafe { &mut *self.ptr.as_ptr() } } + + /// Borrows a new mutable reference from the unique borrow initially captured. + /// + /// # Safety + /// + /// The reborrow must have ended, i.e., the reference returned by `new` and + /// all pointers and references derived from it, must not be used anymore. + pub unsafe fn reborrow(&mut self) -> &'a mut T { + // SAFETY: our own safety conditions imply this reference is again unique. + unsafe { &mut *self.ptr.as_ptr() } + } + + /// Borrows a new shared reference from the unique borrow initially captured. + /// + /// # Safety + /// + /// The reborrow must have ended, i.e., the reference returned by `new` and + /// all pointers and references derived from it, must not be used anymore. + pub unsafe fn reborrow_shared(&self) -> &'a T { + // SAFETY: our own safety conditions imply this reference is again unique. + unsafe { &*self.ptr.as_ptr() } + } } #[cfg(test)] diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs index 1d9c4460e..386cd1a16 100644 --- a/library/alloc/src/collections/btree/map.rs +++ b/library/alloc/src/collections/btree/map.rs @@ -6,7 +6,7 @@ use core::hash::{Hash, Hasher}; use core::iter::{FromIterator, FusedIterator}; use core::marker::PhantomData; use core::mem::{self, ManuallyDrop}; -use core::ops::{Index, RangeBounds}; +use core::ops::{Bound, Index, RangeBounds}; use core::ptr; use crate::alloc::{Allocator, Global}; @@ -15,7 +15,7 @@ use super::borrow::DormantMutRef; use super::dedup_sorted_iter::DedupSortedIter; use super::navigate::{LazyLeafRange, LeafRange}; use super::node::{self, marker, ForceResult::*, Handle, NodeRef, Root}; -use super::search::SearchResult::*; +use super::search::{SearchBound, SearchResult::*}; use super::set_val::SetValZST; mod entry; @@ -2422,6 +2422,732 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { pub const fn is_empty(&self) -> bool { self.len() == 0 } + + /// Returns a [`Cursor`] pointing at the first element that is above the + /// given bound. + /// + /// If no such element exists then a cursor pointing at the "ghost" + /// non-element is returned. + /// + /// Passing [`Bound::Unbounded`] will return a cursor pointing at the first + /// element of the map. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(btree_cursors)] + /// + /// use std::collections::BTreeMap; + /// use std::ops::Bound; + /// + /// let mut a = BTreeMap::new(); + /// a.insert(1, "a"); + /// a.insert(2, "b"); + /// a.insert(3, "c"); + /// a.insert(4, "c"); + /// let cursor = a.lower_bound(Bound::Excluded(&2)); + /// assert_eq!(cursor.key(), Some(&3)); + /// ``` + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn lower_bound<Q>(&self, bound: Bound<&Q>) -> Cursor<'_, K, V> + where + K: Borrow<Q> + Ord, + Q: Ord, + { + let root_node = match self.root.as_ref() { + None => return Cursor { current: None, root: None }, + Some(root) => root.reborrow(), + }; + let edge = root_node.lower_bound(SearchBound::from_range(bound)); + Cursor { current: edge.next_kv().ok(), root: self.root.as_ref() } + } + + /// Returns a [`CursorMut`] pointing at the first element that is above the + /// given bound. + /// + /// If no such element exists then a cursor pointing at the "ghost" + /// non-element is returned. + /// + /// Passing [`Bound::Unbounded`] will return a cursor pointing at the first + /// element of the map. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(btree_cursors)] + /// + /// use std::collections::BTreeMap; + /// use std::ops::Bound; + /// + /// let mut a = BTreeMap::new(); + /// a.insert(1, "a"); + /// a.insert(2, "b"); + /// a.insert(3, "c"); + /// a.insert(4, "c"); + /// let cursor = a.lower_bound_mut(Bound::Excluded(&2)); + /// assert_eq!(cursor.key(), Some(&3)); + /// ``` + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn lower_bound_mut<Q>(&mut self, bound: Bound<&Q>) -> CursorMut<'_, K, V, A> + where + K: Borrow<Q> + Ord, + Q: Ord, + { + let (root, dormant_root) = DormantMutRef::new(&mut self.root); + let root_node = match root.as_mut() { + None => { + return CursorMut { + current: None, + root: dormant_root, + length: &mut self.length, + alloc: &mut *self.alloc, + }; + } + Some(root) => root.borrow_mut(), + }; + let edge = root_node.lower_bound(SearchBound::from_range(bound)); + CursorMut { + current: edge.next_kv().ok(), + root: dormant_root, + length: &mut self.length, + alloc: &mut *self.alloc, + } + } + + /// Returns a [`Cursor`] pointing at the last element that is below the + /// given bound. + /// + /// If no such element exists then a cursor pointing at the "ghost" + /// non-element is returned. + /// + /// Passing [`Bound::Unbounded`] will return a cursor pointing at the last + /// element of the map. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(btree_cursors)] + /// + /// use std::collections::BTreeMap; + /// use std::ops::Bound; + /// + /// let mut a = BTreeMap::new(); + /// a.insert(1, "a"); + /// a.insert(2, "b"); + /// a.insert(3, "c"); + /// a.insert(4, "c"); + /// let cursor = a.upper_bound(Bound::Excluded(&3)); + /// assert_eq!(cursor.key(), Some(&2)); + /// ``` + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn upper_bound<Q>(&self, bound: Bound<&Q>) -> Cursor<'_, K, V> + where + K: Borrow<Q> + Ord, + Q: Ord, + { + let root_node = match self.root.as_ref() { + None => return Cursor { current: None, root: None }, + Some(root) => root.reborrow(), + }; + let edge = root_node.upper_bound(SearchBound::from_range(bound)); + Cursor { current: edge.next_back_kv().ok(), root: self.root.as_ref() } + } + + /// Returns a [`CursorMut`] pointing at the last element that is below the + /// given bound. + /// + /// If no such element exists then a cursor pointing at the "ghost" + /// non-element is returned. + /// + /// Passing [`Bound::Unbounded`] will return a cursor pointing at the last + /// element of the map. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(btree_cursors)] + /// + /// use std::collections::BTreeMap; + /// use std::ops::Bound; + /// + /// let mut a = BTreeMap::new(); + /// a.insert(1, "a"); + /// a.insert(2, "b"); + /// a.insert(3, "c"); + /// a.insert(4, "c"); + /// let cursor = a.upper_bound_mut(Bound::Excluded(&3)); + /// assert_eq!(cursor.key(), Some(&2)); + /// ``` + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn upper_bound_mut<Q>(&mut self, bound: Bound<&Q>) -> CursorMut<'_, K, V, A> + where + K: Borrow<Q> + Ord, + Q: Ord, + { + let (root, dormant_root) = DormantMutRef::new(&mut self.root); + let root_node = match root.as_mut() { + None => { + return CursorMut { + current: None, + root: dormant_root, + length: &mut self.length, + alloc: &mut *self.alloc, + }; + } + Some(root) => root.borrow_mut(), + }; + let edge = root_node.upper_bound(SearchBound::from_range(bound)); + CursorMut { + current: edge.next_back_kv().ok(), + root: dormant_root, + length: &mut self.length, + alloc: &mut *self.alloc, + } + } +} + +/// A cursor over a `BTreeMap`. +/// +/// A `Cursor` is like an iterator, except that it can freely seek back-and-forth. +/// +/// Cursors always point to an element in the tree, and index in a logically circular way. +/// To accommodate this, there is a "ghost" non-element that yields `None` between the last and +/// first elements of the tree. +/// +/// A `Cursor` is created with the [`BTreeMap::lower_bound`] and [`BTreeMap::upper_bound`] methods. +#[unstable(feature = "btree_cursors", issue = "107540")] +pub struct Cursor<'a, K: 'a, V: 'a> { + current: Option<Handle<NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal>, marker::KV>>, + root: Option<&'a node::Root<K, V>>, +} + +#[unstable(feature = "btree_cursors", issue = "107540")] +impl<K, V> Clone for Cursor<'_, K, V> { + fn clone(&self) -> Self { + let Cursor { current, root } = *self; + Cursor { current, root } + } +} + +#[unstable(feature = "btree_cursors", issue = "107540")] +impl<K: Debug, V: Debug> Debug for Cursor<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("Cursor").field(&self.key_value()).finish() + } +} + +/// A cursor over a `BTreeMap` with editing operations. +/// +/// A `Cursor` is like an iterator, except that it can freely seek back-and-forth, and can +/// safely mutate the tree during iteration. This is because the lifetime of its yielded +/// references is tied to its own lifetime, instead of just the underlying tree. This means +/// cursors cannot yield multiple elements at once. +/// +/// Cursors always point to an element in the tree, and index in a logically circular way. +/// To accommodate this, there is a "ghost" non-element that yields `None` between the last and +/// first elements of the tree. +/// +/// A `Cursor` is created with the [`BTreeMap::lower_bound_mut`] and [`BTreeMap::upper_bound_mut`] +/// methods. +#[unstable(feature = "btree_cursors", issue = "107540")] +pub struct CursorMut< + 'a, + K: 'a, + V: 'a, + #[unstable(feature = "allocator_api", issue = "32838")] A = Global, +> { + current: Option<Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::KV>>, + root: DormantMutRef<'a, Option<node::Root<K, V>>>, + length: &'a mut usize, + alloc: &'a mut A, +} + +#[unstable(feature = "btree_cursors", issue = "107540")] +impl<K: Debug, V: Debug, A> Debug for CursorMut<'_, K, V, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("CursorMut").field(&self.key_value()).finish() + } +} + +impl<'a, K, V> Cursor<'a, K, V> { + /// Moves the cursor to the next element of the `BTreeMap`. + /// + /// If the cursor is pointing to the "ghost" non-element then this will move it to + /// the first element of the `BTreeMap`. If it is pointing to the last + /// element of the `BTreeMap` then this will move it to the "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn move_next(&mut self) { + match self.current.take() { + None => { + self.current = self.root.and_then(|root| { + root.reborrow().first_leaf_edge().forget_node_type().right_kv().ok() + }); + } + Some(current) => { + self.current = current.next_leaf_edge().next_kv().ok(); + } + } + } + + /// Moves the cursor to the previous element of the `BTreeMap`. + /// + /// If the cursor is pointing to the "ghost" non-element then this will move it to + /// the last element of the `BTreeMap`. If it is pointing to the first + /// element of the `BTreeMap` then this will move it to the "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn move_prev(&mut self) { + match self.current.take() { + None => { + self.current = self.root.and_then(|root| { + root.reborrow().last_leaf_edge().forget_node_type().left_kv().ok() + }); + } + Some(current) => { + self.current = current.next_back_leaf_edge().next_back_kv().ok(); + } + } + } + + /// Returns a reference to the key of the element that the cursor is + /// currently pointing to. + /// + /// This returns `None` if the cursor is currently pointing to the + /// "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn key(&self) -> Option<&'a K> { + self.current.as_ref().map(|current| current.into_kv().0) + } + + /// Returns a reference to the value of the element that the cursor is + /// currently pointing to. + /// + /// This returns `None` if the cursor is currently pointing to the + /// "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn value(&self) -> Option<&'a V> { + self.current.as_ref().map(|current| current.into_kv().1) + } + + /// Returns a reference to the key and value of the element that the cursor + /// is currently pointing to. + /// + /// This returns `None` if the cursor is currently pointing to the + /// "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn key_value(&self) -> Option<(&'a K, &'a V)> { + self.current.as_ref().map(|current| current.into_kv()) + } + + /// Returns a reference to the next element. + /// + /// If the cursor is pointing to the "ghost" non-element then this returns + /// the first element of the `BTreeMap`. If it is pointing to the last + /// element of the `BTreeMap` then this returns `None`. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn peek_next(&self) -> Option<(&'a K, &'a V)> { + let mut next = self.clone(); + next.move_next(); + next.current.as_ref().map(|current| current.into_kv()) + } + + /// Returns a reference to the previous element. + /// + /// If the cursor is pointing to the "ghost" non-element then this returns + /// the last element of the `BTreeMap`. If it is pointing to the first + /// element of the `BTreeMap` then this returns `None`. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn peek_prev(&self) -> Option<(&'a K, &'a V)> { + let mut prev = self.clone(); + prev.move_prev(); + prev.current.as_ref().map(|current| current.into_kv()) + } +} + +impl<'a, K, V, A> CursorMut<'a, K, V, A> { + /// Moves the cursor to the next element of the `BTreeMap`. + /// + /// If the cursor is pointing to the "ghost" non-element then this will move it to + /// the first element of the `BTreeMap`. If it is pointing to the last + /// element of the `BTreeMap` then this will move it to the "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn move_next(&mut self) { + match self.current.take() { + None => { + // SAFETY: The previous borrow of root has ended. + self.current = unsafe { self.root.reborrow() }.as_mut().and_then(|root| { + root.borrow_mut().first_leaf_edge().forget_node_type().right_kv().ok() + }); + } + Some(current) => { + self.current = current.next_leaf_edge().next_kv().ok(); + } + } + } + + /// Moves the cursor to the previous element of the `BTreeMap`. + /// + /// If the cursor is pointing to the "ghost" non-element then this will move it to + /// the last element of the `BTreeMap`. If it is pointing to the first + /// element of the `BTreeMap` then this will move it to the "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn move_prev(&mut self) { + match self.current.take() { + None => { + // SAFETY: The previous borrow of root has ended. + self.current = unsafe { self.root.reborrow() }.as_mut().and_then(|root| { + root.borrow_mut().last_leaf_edge().forget_node_type().left_kv().ok() + }); + } + Some(current) => { + self.current = current.next_back_leaf_edge().next_back_kv().ok(); + } + } + } + + /// Returns a reference to the key of the element that the cursor is + /// currently pointing to. + /// + /// This returns `None` if the cursor is currently pointing to the + /// "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn key(&self) -> Option<&K> { + self.current.as_ref().map(|current| current.reborrow().into_kv().0) + } + + /// Returns a reference to the value of the element that the cursor is + /// currently pointing to. + /// + /// This returns `None` if the cursor is currently pointing to the + /// "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn value(&self) -> Option<&V> { + self.current.as_ref().map(|current| current.reborrow().into_kv().1) + } + + /// Returns a reference to the key and value of the element that the cursor + /// is currently pointing to. + /// + /// This returns `None` if the cursor is currently pointing to the + /// "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn key_value(&self) -> Option<(&K, &V)> { + self.current.as_ref().map(|current| current.reborrow().into_kv()) + } + + /// Returns a mutable reference to the value of the element that the cursor + /// is currently pointing to. + /// + /// This returns `None` if the cursor is currently pointing to the + /// "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn value_mut(&mut self) -> Option<&mut V> { + self.current.as_mut().map(|current| current.kv_mut().1) + } + + /// Returns a reference to the key and mutable reference to the value of the + /// element that the cursor is currently pointing to. + /// + /// This returns `None` if the cursor is currently pointing to the + /// "ghost" non-element. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn key_value_mut(&mut self) -> Option<(&K, &mut V)> { + self.current.as_mut().map(|current| { + let (k, v) = current.kv_mut(); + (&*k, v) + }) + } + + /// Returns a mutable reference to the of the element that the cursor is + /// currently pointing to. + /// + /// This returns `None` if the cursor is currently pointing to the + /// "ghost" non-element. + /// + /// # Safety + /// + /// This can be used to modify the key, but you must ensure that the + /// `BTreeMap` invariants are maintained. Specifically: + /// + /// * The key must remain unique within the tree. + /// * The key must remain in sorted order with regards to other elements in + /// the tree. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub unsafe fn key_mut_unchecked(&mut self) -> Option<&mut K> { + self.current.as_mut().map(|current| current.kv_mut().0) + } + + /// Returns a reference to the key and value of the next element. + /// + /// If the cursor is pointing to the "ghost" non-element then this returns + /// the first element of the `BTreeMap`. If it is pointing to the last + /// element of the `BTreeMap` then this returns `None`. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn peek_next(&mut self) -> Option<(&K, &mut V)> { + let (k, v) = match self.current { + None => { + // SAFETY: The previous borrow of root has ended. + unsafe { self.root.reborrow() } + .as_mut()? + .borrow_mut() + .first_leaf_edge() + .next_kv() + .ok()? + .into_kv_valmut() + } + // SAFETY: We're not using this to mutate the tree. + Some(ref mut current) => { + unsafe { current.reborrow_mut() }.next_leaf_edge().next_kv().ok()?.into_kv_valmut() + } + }; + Some((k, v)) + } + + /// Returns a reference to the key and value of the previous element. + /// + /// If the cursor is pointing to the "ghost" non-element then this returns + /// the last element of the `BTreeMap`. If it is pointing to the first + /// element of the `BTreeMap` then this returns `None`. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn peek_prev(&mut self) -> Option<(&K, &mut V)> { + let (k, v) = match self.current.as_mut() { + None => { + // SAFETY: The previous borrow of root has ended. + unsafe { self.root.reborrow() } + .as_mut()? + .borrow_mut() + .first_leaf_edge() + .next_kv() + .ok()? + .into_kv_valmut() + } + Some(current) => { + // SAFETY: We're not using this to mutate the tree. + unsafe { current.reborrow_mut() } + .next_back_leaf_edge() + .next_back_kv() + .ok()? + .into_kv_valmut() + } + }; + Some((k, v)) + } + + /// Returns a read-only cursor pointing to the current element. + /// + /// The lifetime of the returned `Cursor` is bound to that of the + /// `CursorMut`, which means it cannot outlive the `CursorMut` and that the + /// `CursorMut` is frozen for the lifetime of the `Cursor`. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn as_cursor(&self) -> Cursor<'_, K, V> { + Cursor { + // SAFETY: The tree is immutable while the cursor exists. + root: unsafe { self.root.reborrow_shared().as_ref() }, + current: self.current.as_ref().map(|current| current.reborrow()), + } + } +} + +// Now the tree editing operations +impl<'a, K: Ord, V, A: Allocator + Clone> CursorMut<'a, K, V, A> { + /// Inserts a new element into the `BTreeMap` after the current one. + /// + /// If the cursor is pointing at the "ghost" non-element then the new element is + /// inserted at the front of the `BTreeMap`. + /// + /// # Safety + /// + /// You must ensure that the `BTreeMap` invariants are maintained. + /// Specifically: + /// + /// * The key of the newly inserted element must be unique in the tree. + /// * All keys in the tree must remain in sorted order. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub unsafe fn insert_after_unchecked(&mut self, key: K, value: V) { + let edge = match self.current.take() { + None => { + // SAFETY: We have no other reference to the tree. + match unsafe { self.root.reborrow() } { + root @ None => { + // Tree is empty, allocate a new root. + let mut node = NodeRef::new_leaf(self.alloc.clone()); + node.borrow_mut().push(key, value); + *root = Some(node.forget_type()); + *self.length += 1; + return; + } + Some(root) => root.borrow_mut().first_leaf_edge(), + } + } + Some(current) => current.next_leaf_edge(), + }; + + let handle = edge.insert_recursing(key, value, self.alloc.clone(), |ins| { + drop(ins.left); + // SAFETY: The handle to the newly inserted value is always on a + // leaf node, so adding a new root node doesn't invalidate it. + let root = unsafe { self.root.reborrow().as_mut().unwrap() }; + root.push_internal_level(self.alloc.clone()).push(ins.kv.0, ins.kv.1, ins.right) + }); + self.current = handle.left_edge().next_back_kv().ok(); + *self.length += 1; + } + + /// Inserts a new element into the `BTreeMap` before the current one. + /// + /// If the cursor is pointing at the "ghost" non-element then the new element is + /// inserted at the end of the `BTreeMap`. + /// + /// # Safety + /// + /// You must ensure that the `BTreeMap` invariants are maintained. + /// Specifically: + /// + /// * The key of the newly inserted element must be unique in the tree. + /// * All keys in the tree must remain in sorted order. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub unsafe fn insert_before_unchecked(&mut self, key: K, value: V) { + let edge = match self.current.take() { + None => { + // SAFETY: We have no other reference to the tree. + match unsafe { self.root.reborrow() } { + root @ None => { + // Tree is empty, allocate a new root. + let mut node = NodeRef::new_leaf(self.alloc.clone()); + node.borrow_mut().push(key, value); + *root = Some(node.forget_type()); + *self.length += 1; + return; + } + Some(root) => root.borrow_mut().last_leaf_edge(), + } + } + Some(current) => current.next_back_leaf_edge(), + }; + + let handle = edge.insert_recursing(key, value, self.alloc.clone(), |ins| { + drop(ins.left); + // SAFETY: The handle to the newly inserted value is always on a + // leaf node, so adding a new root node doesn't invalidate it. + let root = unsafe { self.root.reborrow().as_mut().unwrap() }; + root.push_internal_level(self.alloc.clone()).push(ins.kv.0, ins.kv.1, ins.right) + }); + self.current = handle.right_edge().next_kv().ok(); + *self.length += 1; + } + + /// Inserts a new element into the `BTreeMap` after the current one. + /// + /// If the cursor is pointing at the "ghost" non-element then the new element is + /// inserted at the front of the `BTreeMap`. + /// + /// # Panics + /// + /// This function panics if: + /// - the given key compares less than or equal to the current element (if + /// any). + /// - the given key compares greater than or equal to the next element (if + /// any). + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn insert_after(&mut self, key: K, value: V) { + if let Some(current) = self.key() { + if &key <= current { + panic!("key must be ordered above the current element"); + } + } + if let Some((next, _)) = self.peek_prev() { + if &key >= next { + panic!("key must be ordered below the next element"); + } + } + unsafe { + self.insert_after_unchecked(key, value); + } + } + + /// Inserts a new element into the `BTreeMap` before the current one. + /// + /// If the cursor is pointing at the "ghost" non-element then the new element is + /// inserted at the end of the `BTreeMap`. + /// + /// # Panics + /// + /// This function panics if: + /// - the given key compares greater than or equal to the current element + /// (if any). + /// - the given key compares less than or equal to the previous element (if + /// any). + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn insert_before(&mut self, key: K, value: V) { + if let Some(current) = self.key() { + if &key >= current { + panic!("key must be ordered below the current element"); + } + } + if let Some((prev, _)) = self.peek_prev() { + if &key <= prev { + panic!("key must be ordered above the previous element"); + } + } + unsafe { + self.insert_before_unchecked(key, value); + } + } + + /// Removes the current element from the `BTreeMap`. + /// + /// The element that was removed is returned, and the cursor is + /// moved to point to the next element in the `BTreeMap`. + /// + /// If the cursor is currently pointing to the "ghost" non-element then no element + /// is removed and `None` is returned. The cursor is not moved in this case. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn remove_current(&mut self) -> Option<(K, V)> { + let current = self.current.take()?; + let mut emptied_internal_root = false; + let (kv, pos) = + current.remove_kv_tracking(|| emptied_internal_root = true, self.alloc.clone()); + self.current = pos.next_kv().ok(); + *self.length -= 1; + if emptied_internal_root { + // SAFETY: This is safe since current does not point within the now + // empty root node. + let root = unsafe { self.root.reborrow().as_mut().unwrap() }; + root.pop_internal_level(self.alloc.clone()); + } + Some(kv) + } + + /// Removes the current element from the `BTreeMap`. + /// + /// The element that was removed is returned, and the cursor is + /// moved to point to the previous element in the `BTreeMap`. + /// + /// If the cursor is currently pointing to the "ghost" non-element then no element + /// is removed and `None` is returned. The cursor is not moved in this case. + #[unstable(feature = "btree_cursors", issue = "107540")] + pub fn remove_current_and_move_back(&mut self) -> Option<(K, V)> { + let current = self.current.take()?; + let mut emptied_internal_root = false; + let (kv, pos) = + current.remove_kv_tracking(|| emptied_internal_root = true, self.alloc.clone()); + self.current = pos.next_back_kv().ok(); + *self.length -= 1; + if emptied_internal_root { + // SAFETY: This is safe since current does not point within the now + // empty root node. + let root = unsafe { self.root.reborrow().as_mut().unwrap() }; + root.pop_internal_level(self.alloc.clone()); + } + Some(kv) + } } #[cfg(test)] diff --git a/library/alloc/src/collections/btree/map/entry.rs b/library/alloc/src/collections/btree/map/entry.rs index 370b58864..e9366eec9 100644 --- a/library/alloc/src/collections/btree/map/entry.rs +++ b/library/alloc/src/collections/btree/map/entry.rs @@ -347,7 +347,7 @@ impl<'a, K: Ord, V, A: Allocator + Clone> VacantEntry<'a, K, V, A> { /// assert_eq!(map["poneyland"], 37); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn insert(self, value: V) -> &'a mut V { + pub fn insert(mut self, value: V) -> &'a mut V { let out_ptr = match self.handle { None => { // SAFETY: There is no tree yet so no reference to it exists. @@ -358,25 +358,27 @@ impl<'a, K: Ord, V, A: Allocator + Clone> VacantEntry<'a, K, V, A> { map.length = 1; val_ptr } - Some(handle) => match handle.insert_recursing(self.key, value, self.alloc.clone()) { - (None, val_ptr) => { - // SAFETY: We have consumed self.handle. - let map = unsafe { self.dormant_map.awaken() }; - map.length += 1; - val_ptr - } - (Some(ins), val_ptr) => { - drop(ins.left); - // SAFETY: We have consumed self.handle and dropped the - // remaining reference to the tree, ins.left. - let map = unsafe { self.dormant_map.awaken() }; - let root = map.root.as_mut().unwrap(); // same as ins.left - root.push_internal_level(self.alloc).push(ins.kv.0, ins.kv.1, ins.right); - map.length += 1; - val_ptr - } - }, + Some(handle) => { + let new_handle = + handle.insert_recursing(self.key, value, self.alloc.clone(), |ins| { + drop(ins.left); + // SAFETY: Pushing a new root node doesn't invalidate + // handles to existing nodes. + let map = unsafe { self.dormant_map.reborrow() }; + let root = map.root.as_mut().unwrap(); // same as ins.left + root.push_internal_level(self.alloc).push(ins.kv.0, ins.kv.1, ins.right) + }); + + // Get the pointer to the value + let val_ptr = new_handle.into_val_mut(); + + // SAFETY: We have consumed self.handle. + let map = unsafe { self.dormant_map.awaken() }; + map.length += 1; + val_ptr + } }; + // Now that we have finished growing the tree using borrowed references, // dereference the pointer to a part of it, that we picked up along the way. unsafe { &mut *out_ptr } diff --git a/library/alloc/src/collections/btree/map/tests.rs b/library/alloc/src/collections/btree/map/tests.rs index 700b1463b..76c2f27b4 100644 --- a/library/alloc/src/collections/btree/map/tests.rs +++ b/library/alloc/src/collections/btree/map/tests.rs @@ -2336,3 +2336,52 @@ fn from_array() { let unordered_duplicates = BTreeMap::from([(3, 4), (1, 2), (1, 2)]); assert_eq!(map, unordered_duplicates); } + +#[test] +fn test_cursor() { + let map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]); + + let mut cur = map.lower_bound(Bound::Unbounded); + assert_eq!(cur.key(), Some(&1)); + cur.move_next(); + assert_eq!(cur.key(), Some(&2)); + assert_eq!(cur.peek_next(), Some((&3, &'c'))); + cur.move_prev(); + assert_eq!(cur.key(), Some(&1)); + assert_eq!(cur.peek_prev(), None); + + let mut cur = map.upper_bound(Bound::Excluded(&1)); + assert_eq!(cur.key(), None); + cur.move_next(); + assert_eq!(cur.key(), Some(&1)); + cur.move_prev(); + assert_eq!(cur.key(), None); + assert_eq!(cur.peek_prev(), Some((&3, &'c'))); +} + +#[test] +fn test_cursor_mut() { + let mut map = BTreeMap::from([(1, 'a'), (3, 'c'), (5, 'e')]); + let mut cur = map.lower_bound_mut(Bound::Excluded(&3)); + assert_eq!(cur.key(), Some(&5)); + cur.insert_before(4, 'd'); + assert_eq!(cur.key(), Some(&5)); + assert_eq!(cur.peek_prev(), Some((&4, &mut 'd'))); + cur.move_next(); + assert_eq!(cur.key(), None); + cur.insert_before(6, 'f'); + assert_eq!(cur.key(), None); + assert_eq!(cur.remove_current(), None); + assert_eq!(cur.key(), None); + cur.insert_after(0, '?'); + assert_eq!(cur.key(), None); + assert_eq!(map, BTreeMap::from([(0, '?'), (1, 'a'), (3, 'c'), (4, 'd'), (5, 'e'), (6, 'f')])); + + let mut cur = map.upper_bound_mut(Bound::Included(&5)); + assert_eq!(cur.key(), Some(&5)); + assert_eq!(cur.remove_current(), Some((5, 'e'))); + assert_eq!(cur.key(), Some(&6)); + assert_eq!(cur.remove_current_and_move_back(), Some((6, 'f'))); + assert_eq!(cur.key(), Some(&4)); + assert_eq!(map, BTreeMap::from([(0, '?'), (1, 'a'), (3, 'c'), (4, 'd')])); +} diff --git a/library/alloc/src/collections/btree/navigate.rs b/library/alloc/src/collections/btree/navigate.rs index 1e33c1e64..b890717e5 100644 --- a/library/alloc/src/collections/btree/navigate.rs +++ b/library/alloc/src/collections/btree/navigate.rs @@ -4,6 +4,7 @@ use core::ops::RangeBounds; use core::ptr; use super::node::{marker, ForceResult::*, Handle, NodeRef}; +use super::search::SearchBound; use crate::alloc::Allocator; // `front` and `back` are always both `None` or both `Some`. @@ -386,7 +387,7 @@ impl<BorrowType: marker::BorrowType, K, V> /// Given a leaf edge handle, returns [`Result::Ok`] with a handle to the neighboring KV /// on the left side, which is either in the same leaf node or in an ancestor node. /// If the leaf edge is the first one in the tree, returns [`Result::Err`] with the root node. - fn next_back_kv( + pub fn next_back_kv( self, ) -> Result< Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>, @@ -707,7 +708,9 @@ impl<BorrowType: marker::BorrowType, K, V> } /// Returns the leaf edge closest to a KV for backward navigation. - fn next_back_leaf_edge(self) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> { + pub fn next_back_leaf_edge( + self, + ) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> { match self.force() { Leaf(leaf_kv) => leaf_kv.left_edge(), Internal(internal_kv) => { @@ -717,3 +720,51 @@ impl<BorrowType: marker::BorrowType, K, V> } } } + +impl<BorrowType: marker::BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> { + /// Returns the leaf edge corresponding to the first point at which the + /// given bound is true. + pub fn lower_bound<Q: ?Sized>( + self, + mut bound: SearchBound<&Q>, + ) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> + where + Q: Ord, + K: Borrow<Q>, + { + let mut node = self; + loop { + let (edge, new_bound) = node.find_lower_bound_edge(bound); + match edge.force() { + Leaf(edge) => return edge, + Internal(edge) => { + node = edge.descend(); + bound = new_bound; + } + } + } + } + + /// Returns the leaf edge corresponding to the last point at which the + /// given bound is true. + pub fn upper_bound<Q: ?Sized>( + self, + mut bound: SearchBound<&Q>, + ) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> + where + Q: Ord, + K: Borrow<Q>, + { + let mut node = self; + loop { + let (edge, new_bound) = node.find_upper_bound_edge(bound); + match edge.force() { + Leaf(edge) => return edge, + Internal(edge) => { + node = edge.descend(); + bound = new_bound; + } + } + } + } +} diff --git a/library/alloc/src/collections/btree/node.rs b/library/alloc/src/collections/btree/node.rs index 691246644..3233a575e 100644 --- a/library/alloc/src/collections/btree/node.rs +++ b/library/alloc/src/collections/btree/node.rs @@ -442,6 +442,24 @@ impl<'a, K, V, Type> NodeRef<marker::Mut<'a>, K, V, Type> { // SAFETY: we have exclusive access to the entire node. unsafe { &mut *ptr } } + + /// Returns a dormant copy of this node with its lifetime erased which can + /// be reawakened later. + pub fn dormant(&self) -> NodeRef<marker::DormantMut, K, V, Type> { + NodeRef { height: self.height, node: self.node, _marker: PhantomData } + } +} + +impl<K, V, Type> NodeRef<marker::DormantMut, K, V, Type> { + /// Revert to the unique borrow initially captured. + /// + /// # Safety + /// + /// The reborrow must have ended, i.e., the reference returned by `new` and + /// all pointers and references derived from it, must not be used anymore. + pub unsafe fn awaken<'a>(self) -> NodeRef<marker::Mut<'a>, K, V, Type> { + NodeRef { height: self.height, node: self.node, _marker: PhantomData } + } } impl<K, V, Type> NodeRef<marker::Dying, K, V, Type> { @@ -798,6 +816,25 @@ impl<'a, K, V, NodeType, HandleType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeT // We can't use Handle::new_kv or Handle::new_edge because we don't know our type Handle { node: unsafe { self.node.reborrow_mut() }, idx: self.idx, _marker: PhantomData } } + + /// Returns a dormant copy of this handle which can be reawakened later. + /// + /// See `DormantMutRef` for more details. + pub fn dormant(&self) -> Handle<NodeRef<marker::DormantMut, K, V, NodeType>, HandleType> { + Handle { node: self.node.dormant(), idx: self.idx, _marker: PhantomData } + } +} + +impl<K, V, NodeType, HandleType> Handle<NodeRef<marker::DormantMut, K, V, NodeType>, HandleType> { + /// Revert to the unique borrow initially captured. + /// + /// # Safety + /// + /// The reborrow must have ended, i.e., the reference returned by `new` and + /// all pointers and references derived from it, must not be used anymore. + pub unsafe fn awaken<'a>(self) -> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, HandleType> { + Handle { node: unsafe { self.node.awaken() }, idx: self.idx, _marker: PhantomData } + } } impl<BorrowType, K, V, NodeType> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::Edge> { @@ -851,9 +888,11 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, mark /// Inserts a new key-value pair between the key-value pairs to the right and left of /// this edge. This method assumes that there is enough space in the node for the new /// pair to fit. - /// - /// The returned pointer points to the inserted value. - fn insert_fit(&mut self, key: K, val: V) -> *mut V { + unsafe fn insert_fit( + mut self, + key: K, + val: V, + ) -> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV> { debug_assert!(self.node.len() < CAPACITY); let new_len = self.node.len() + 1; @@ -862,7 +901,7 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, mark slice_insert(self.node.val_area_mut(..new_len), self.idx, val); *self.node.len_mut() = new_len as u16; - self.node.val_area_mut(self.idx).assume_init_mut() + Handle::new_kv(self.node, self.idx) } } } @@ -871,21 +910,26 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, mark /// Inserts a new key-value pair between the key-value pairs to the right and left of /// this edge. This method splits the node if there isn't enough room. /// - /// The returned pointer points to the inserted value. + /// Returns a dormant handle to the inserted node which can be reawakened + /// once splitting is complete. fn insert<A: Allocator + Clone>( - mut self, + self, key: K, val: V, alloc: A, - ) -> (Option<SplitResult<'a, K, V, marker::Leaf>>, *mut V) { + ) -> ( + Option<SplitResult<'a, K, V, marker::Leaf>>, + Handle<NodeRef<marker::DormantMut, K, V, marker::Leaf>, marker::KV>, + ) { if self.node.len() < CAPACITY { - let val_ptr = self.insert_fit(key, val); - (None, val_ptr) + // SAFETY: There is enough space in the node for insertion. + let handle = unsafe { self.insert_fit(key, val) }; + (None, handle.dormant()) } else { let (middle_kv_idx, insertion) = splitpoint(self.idx); let middle = unsafe { Handle::new_kv(self.node, middle_kv_idx) }; let mut result = middle.split(alloc); - let mut insertion_edge = match insertion { + let insertion_edge = match insertion { LeftOrRight::Left(insert_idx) => unsafe { Handle::new_edge(result.left.reborrow_mut(), insert_idx) }, @@ -893,8 +937,10 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, mark Handle::new_edge(result.right.borrow_mut(), insert_idx) }, }; - let val_ptr = insertion_edge.insert_fit(key, val); - (Some(result), val_ptr) + // SAFETY: We just split the node, so there is enough space for + // insertion. + let handle = unsafe { insertion_edge.insert_fit(key, val).dormant() }; + (Some(result), handle) } } } @@ -976,21 +1022,31 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, mark key: K, value: V, alloc: A, - ) -> (Option<SplitResult<'a, K, V, marker::LeafOrInternal>>, *mut V) { - let (mut split, val_ptr) = match self.insert(key, value, alloc.clone()) { - (None, val_ptr) => return (None, val_ptr), - (Some(split), val_ptr) => (split.forget_node_type(), val_ptr), + split_root: impl FnOnce(SplitResult<'a, K, V, marker::LeafOrInternal>), + ) -> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV> { + let (mut split, handle) = match self.insert(key, value, alloc.clone()) { + // SAFETY: we have finished splitting and can now re-awaken the + // handle to the inserted element. + (None, handle) => return unsafe { handle.awaken() }, + (Some(split), handle) => (split.forget_node_type(), handle), }; loop { split = match split.left.ascend() { Ok(parent) => { match parent.insert(split.kv.0, split.kv.1, split.right, alloc.clone()) { - None => return (None, val_ptr), + // SAFETY: we have finished splitting and can now re-awaken the + // handle to the inserted element. + None => return unsafe { handle.awaken() }, Some(split) => split.forget_node_type(), } } - Err(root) => return (Some(SplitResult { left: root, ..split }), val_ptr), + Err(root) => { + split_root(SplitResult { left: root, ..split }); + // SAFETY: we have finished splitting and can now re-awaken the + // handle to the inserted element. + return unsafe { handle.awaken() }; + } }; } } @@ -1043,6 +1099,14 @@ impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType> let leaf = self.node.into_leaf_mut(); unsafe { leaf.vals.get_unchecked_mut(self.idx).assume_init_mut() } } + + pub fn into_kv_valmut(self) -> (&'a K, &'a mut V) { + debug_assert!(self.idx < self.node.len()); + let leaf = self.node.into_leaf_mut(); + let k = unsafe { leaf.keys.get_unchecked(self.idx).assume_init_ref() }; + let v = unsafe { leaf.vals.get_unchecked_mut(self.idx).assume_init_mut() }; + (k, v) + } } impl<'a, K, V, NodeType> Handle<NodeRef<marker::ValMut<'a>, K, V, NodeType>, marker::KV> { @@ -1667,6 +1731,7 @@ pub mod marker { pub enum Owned {} pub enum Dying {} + pub enum DormantMut {} pub struct Immut<'a>(PhantomData<&'a ()>); pub struct Mut<'a>(PhantomData<&'a mut ()>); pub struct ValMut<'a>(PhantomData<&'a mut ()>); @@ -1688,6 +1753,7 @@ pub mod marker { impl<'a> BorrowType for Immut<'a> {} impl<'a> BorrowType for Mut<'a> {} impl<'a> BorrowType for ValMut<'a> {} + impl BorrowType for DormantMut {} pub enum KV {} pub enum Edge {} diff --git a/library/alloc/src/collections/vec_deque/into_iter.rs b/library/alloc/src/collections/vec_deque/into_iter.rs index e54880e86..34bc0ce91 100644 --- a/library/alloc/src/collections/vec_deque/into_iter.rs +++ b/library/alloc/src/collections/vec_deque/into_iter.rs @@ -1,5 +1,5 @@ -use core::fmt; use core::iter::{FusedIterator, TrustedLen}; +use core::{array, fmt, mem::MaybeUninit, ops::Try, ptr}; use crate::alloc::{Allocator, Global}; @@ -52,6 +52,126 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> { let len = self.inner.len(); (len, Some(len)) } + + #[inline] + fn advance_by(&mut self, n: usize) -> Result<(), usize> { + if self.inner.len < n { + let len = self.inner.len; + self.inner.clear(); + Err(len) + } else { + self.inner.drain(..n); + Ok(()) + } + } + + #[inline] + fn count(self) -> usize { + self.inner.len + } + + fn try_fold<B, F, R>(&mut self, mut init: B, mut f: F) -> R + where + F: FnMut(B, Self::Item) -> R, + R: Try<Output = B>, + { + struct Guard<'a, T, A: Allocator> { + deque: &'a mut VecDeque<T, A>, + // `consumed <= deque.len` always holds. + consumed: usize, + } + + impl<'a, T, A: Allocator> Drop for Guard<'a, T, A> { + fn drop(&mut self) { + self.deque.len -= self.consumed; + self.deque.head = self.deque.to_physical_idx(self.consumed); + } + } + + let mut guard = Guard { deque: &mut self.inner, consumed: 0 }; + + let (head, tail) = guard.deque.as_slices(); + + init = head + .iter() + .map(|elem| { + guard.consumed += 1; + // SAFETY: Because we incremented `guard.consumed`, the + // deque effectively forgot the element, so we can take + // ownership + unsafe { ptr::read(elem) } + }) + .try_fold(init, &mut f)?; + + tail.iter() + .map(|elem| { + guard.consumed += 1; + // SAFETY: Same as above. + unsafe { ptr::read(elem) } + }) + .try_fold(init, &mut f) + } + + #[inline] + fn fold<B, F>(mut self, init: B, mut f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + match self.try_fold(init, |b, item| Ok::<B, !>(f(b, item))) { + Ok(b) => b, + Err(e) => match e {}, + } + } + + #[inline] + fn last(mut self) -> Option<Self::Item> { + self.inner.pop_back() + } + + fn next_chunk<const N: usize>( + &mut self, + ) -> Result<[Self::Item; N], array::IntoIter<Self::Item, N>> { + let mut raw_arr = MaybeUninit::uninit_array(); + let raw_arr_ptr = raw_arr.as_mut_ptr().cast(); + let (head, tail) = self.inner.as_slices(); + + if head.len() >= N { + // SAFETY: By manually adjusting the head and length of the deque, we effectively + // make it forget the first `N` elements, so taking ownership of them is safe. + unsafe { ptr::copy_nonoverlapping(head.as_ptr(), raw_arr_ptr, N) }; + self.inner.head = self.inner.to_physical_idx(N); + self.inner.len -= N; + // SAFETY: We initialized the entire array with items from `head` + return Ok(unsafe { raw_arr.transpose().assume_init() }); + } + + // SAFETY: Same argument as above. + unsafe { ptr::copy_nonoverlapping(head.as_ptr(), raw_arr_ptr, head.len()) }; + let remaining = N - head.len(); + + if tail.len() >= remaining { + // SAFETY: Same argument as above. + unsafe { + ptr::copy_nonoverlapping(tail.as_ptr(), raw_arr_ptr.add(head.len()), remaining) + }; + self.inner.head = self.inner.to_physical_idx(N); + self.inner.len -= N; + // SAFETY: We initialized the entire array with items from `head` and `tail` + Ok(unsafe { raw_arr.transpose().assume_init() }) + } else { + // SAFETY: Same argument as above. + unsafe { + ptr::copy_nonoverlapping(tail.as_ptr(), raw_arr_ptr.add(head.len()), tail.len()) + }; + let init = head.len() + tail.len(); + // We completely drained all the deques elements. + self.inner.head = 0; + self.inner.len = 0; + // SAFETY: We copied all elements from both slices to the beginning of the array, so + // the given range is initialized. + Err(unsafe { array::IntoIter::new_unchecked(raw_arr, 0..init) }) + } + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -60,10 +180,73 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> { fn next_back(&mut self) -> Option<T> { self.inner.pop_back() } + + #[inline] + fn advance_back_by(&mut self, n: usize) -> Result<(), usize> { + let len = self.inner.len; + if len >= n { + self.inner.truncate(len - n); + Ok(()) + } else { + self.inner.clear(); + Err(len) + } + } + + fn try_rfold<B, F, R>(&mut self, mut init: B, mut f: F) -> R + where + F: FnMut(B, Self::Item) -> R, + R: Try<Output = B>, + { + struct Guard<'a, T, A: Allocator> { + deque: &'a mut VecDeque<T, A>, + // `consumed <= deque.len` always holds. + consumed: usize, + } + + impl<'a, T, A: Allocator> Drop for Guard<'a, T, A> { + fn drop(&mut self) { + self.deque.len -= self.consumed; + } + } + + let mut guard = Guard { deque: &mut self.inner, consumed: 0 }; + + let (head, tail) = guard.deque.as_slices(); + + init = tail + .iter() + .map(|elem| { + guard.consumed += 1; + // SAFETY: See `try_fold`'s safety comment. + unsafe { ptr::read(elem) } + }) + .try_rfold(init, &mut f)?; + + head.iter() + .map(|elem| { + guard.consumed += 1; + // SAFETY: Same as above. + unsafe { ptr::read(elem) } + }) + .try_rfold(init, &mut f) + } + + #[inline] + fn rfold<B, F>(mut self, init: B, mut f: F) -> B + where + F: FnMut(B, Self::Item) -> B, + { + match self.try_rfold(init, |b, item| Ok::<B, !>(f(b, item))) { + Ok(b) => b, + Err(e) => match e {}, + } + } } #[stable(feature = "rust1", since = "1.0.0")] impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> { + #[inline] fn is_empty(&self) -> bool { self.inner.is_empty() } diff --git a/library/alloc/src/ffi/c_str.rs b/library/alloc/src/ffi/c_str.rs index 11bd4c4dc..f99395c72 100644 --- a/library/alloc/src/ffi/c_str.rs +++ b/library/alloc/src/ffi/c_str.rs @@ -991,12 +991,6 @@ impl IntoStringError { pub fn utf8_error(&self) -> Utf8Error { self.error } - - #[doc(hidden)] - #[unstable(feature = "cstr_internals", issue = "none")] - pub fn __source(&self) -> &Utf8Error { - &self.error - } } impl IntoStringError { @@ -1141,6 +1135,6 @@ impl core::error::Error for IntoStringError { } fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - Some(self.__source()) + Some(&self.error) } } diff --git a/library/alloc/src/fmt.rs b/library/alloc/src/fmt.rs index eadb35cb9..1da86e1a4 100644 --- a/library/alloc/src/fmt.rs +++ b/library/alloc/src/fmt.rs @@ -558,7 +558,7 @@ pub use core::fmt::Alignment; #[stable(feature = "rust1", since = "1.0.0")] pub use core::fmt::Error; #[stable(feature = "rust1", since = "1.0.0")] -pub use core::fmt::{write, ArgumentV1, Arguments}; +pub use core::fmt::{write, Arguments}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::fmt::{Binary, Octal}; #[stable(feature = "rust1", since = "1.0.0")] diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index ca75c3895..e9cc3875f 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -87,6 +87,7 @@ #![warn(missing_debug_implementations)] #![warn(missing_docs)] #![allow(explicit_outlives_requirements)] +#![cfg_attr(not(bootstrap), warn(multiple_supertrait_upcastable))] // // Library features: #![feature(alloc_layout_extra)] @@ -115,7 +116,6 @@ #![feature(const_eval_select)] #![feature(const_pin)] #![feature(const_waker)] -#![feature(cstr_from_bytes_until_nul)] #![feature(dispatch_from_dyn)] #![feature(error_generic_member_access)] #![feature(error_in_core)] @@ -195,6 +195,7 @@ #![feature(c_unwind)] #![feature(with_negative_coherence)] #![cfg_attr(test, feature(panic_update_hook))] +#![cfg_attr(not(bootstrap), feature(multiple_supertrait_upcastable))] // // Rustdoc features: #![feature(doc_cfg)] diff --git a/library/alloc/src/macros.rs b/library/alloc/src/macros.rs index 5198bf297..4c6ae8f25 100644 --- a/library/alloc/src/macros.rs +++ b/library/alloc/src/macros.rs @@ -48,6 +48,8 @@ macro_rules! vec { ); ($($x:expr),+ $(,)?) => ( $crate::__rust_force_expr!(<[_]>::into_vec( + // This rustc_box is not required, but it produces a dramatic improvement in compile + // time when constructing arrays with many elements. #[rustc_box] $crate::boxed::Box::new([$($x),+]) )) diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs index 5a10121bb..3751f2a24 100644 --- a/library/alloc/src/raw_vec.rs +++ b/library/alloc/src/raw_vec.rs @@ -241,10 +241,15 @@ impl<T, A: Allocator> RawVec<T, A> { if T::IS_ZST || self.cap == 0 { None } else { - // We have an allocated chunk of memory, so we can bypass runtime - // checks to get our current layout. + // We could use Layout::array here which ensures the absence of isize and usize overflows + // and could hypothetically handle differences between stride and size, but this memory + // has already been allocated so we know it can't overflow and currently rust does not + // support such types. So we can do better by skipping some checks and avoid an unwrap. + let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) }; unsafe { - let layout = Layout::array::<T>(self.cap).unwrap_unchecked(); + let align = mem::align_of::<T>(); + let size = mem::size_of::<T>().unchecked_mul(self.cap); + let layout = Layout::from_size_align_unchecked(size, align); Some((self.ptr.cast().into(), layout)) } } @@ -426,11 +431,13 @@ impl<T, A: Allocator> RawVec<T, A> { assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity"); let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) }; - + // See current_memory() why this assert is here + let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) }; let ptr = unsafe { // `Layout::array` cannot overflow here because it would have // overflowed earlier when capacity was larger. - let new_layout = Layout::array::<T>(cap).unwrap_unchecked(); + let new_size = mem::size_of::<T>().unchecked_mul(cap); + let new_layout = Layout::from_size_align_unchecked(new_size, layout.align()); self.alloc .shrink(ptr, layout, new_layout) .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })? diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs index c9aa23fc4..932a537c5 100644 --- a/library/alloc/src/rc.rs +++ b/library/alloc/src/rc.rs @@ -1092,7 +1092,7 @@ impl<T: ?Sized> Rc<T> { /// # Safety /// /// If any other `Rc` or [`Weak`] pointers to the same allocation exist, then - /// they must be must not be dereferenced or have active borrows for the duration + /// they must not be dereferenced or have active borrows for the duration /// of the returned borrow, and their inner type must be exactly the same as the /// inner type of this Rc (including lifetimes). This is trivially the case if no /// such pointers exist, for example immediately after `Rc::new`. @@ -2145,7 +2145,7 @@ impl<T, I: iter::TrustedLen<Item = T>> ToRcSlice<T> for I { Rc::from_iter_exact(self, low) } } else { - // TrustedLen contract guarantees that `upper_bound == `None` implies an iterator + // TrustedLen contract guarantees that `upper_bound == None` implies an iterator // length exceeding `usize::MAX`. // The default implementation would collect into a vec which would panic. // Thus we panic here immediately without invoking `Vec` code. diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs index fecacc2bb..093dcbbe8 100644 --- a/library/alloc/src/slice.rs +++ b/library/alloc/src/slice.rs @@ -782,6 +782,38 @@ impl<T, A: Allocator> BorrowMut<[T]> for Vec<T, A> { } } +// Specializable trait for implementing ToOwned::clone_into. This is +// public in the crate and has the Allocator parameter so that +// vec::clone_from use it too. +#[cfg(not(no_global_oom_handling))] +pub(crate) trait SpecCloneIntoVec<T, A: Allocator> { + fn clone_into(&self, target: &mut Vec<T, A>); +} + +#[cfg(not(no_global_oom_handling))] +impl<T: Clone, A: Allocator> SpecCloneIntoVec<T, A> for [T] { + default fn clone_into(&self, target: &mut Vec<T, A>) { + // drop anything in target that will not be overwritten + target.truncate(self.len()); + + // target.len <= self.len due to the truncate above, so the + // slices here are always in-bounds. + let (init, tail) = self.split_at(target.len()); + + // reuse the contained values' allocations/resources. + target.clone_from_slice(init); + target.extend_from_slice(tail); + } +} + +#[cfg(not(no_global_oom_handling))] +impl<T: Copy, A: Allocator> SpecCloneIntoVec<T, A> for [T] { + fn clone_into(&self, target: &mut Vec<T, A>) { + target.clear(); + target.extend_from_slice(self); + } +} + #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] impl<T: Clone> ToOwned for [T] { @@ -797,16 +829,7 @@ impl<T: Clone> ToOwned for [T] { } fn clone_into(&self, target: &mut Vec<T>) { - // drop anything in target that will not be overwritten - target.truncate(self.len()); - - // target.len <= self.len due to the truncate above, so the - // slices here are always in-bounds. - let (init, tail) = self.split_at(target.len()); - - // reuse the contained values' allocations/resources. - target.clone_from_slice(init); - target.extend_from_slice(tail); + SpecCloneIntoVec::clone_into(self, target); } } diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs index ca182c810..c7e7ed3e9 100644 --- a/library/alloc/src/string.rs +++ b/library/alloc/src/string.rs @@ -42,8 +42,6 @@ #![stable(feature = "rust1", since = "1.0.0")] -#[cfg(not(no_global_oom_handling))] -use core::char::{decode_utf16, REPLACEMENT_CHARACTER}; use core::error::Error; use core::fmt; use core::hash; @@ -683,7 +681,7 @@ impl String { // This isn't done via collect::<Result<_, _>>() for performance reasons. // FIXME: the function can be simplified again when #48994 is closed. let mut ret = String::with_capacity(v.len()); - for c in decode_utf16(v.iter().cloned()) { + for c in char::decode_utf16(v.iter().cloned()) { if let Ok(c) = c { ret.push(c); } else { @@ -722,7 +720,9 @@ impl String { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn from_utf16_lossy(v: &[u16]) -> String { - decode_utf16(v.iter().cloned()).map(|r| r.unwrap_or(REPLACEMENT_CHARACTER)).collect() + char::decode_utf16(v.iter().cloned()) + .map(|r| r.unwrap_or(char::REPLACEMENT_CHARACTER)) + .collect() } /// Decomposes a `String` into its raw components. @@ -928,12 +928,12 @@ impl String { /// Copies elements from `src` range to the end of the string. /// - /// ## Panics + /// # Panics /// /// Panics if the starting point or end point do not lie on a [`char`] /// boundary, or if they're out of bounds. /// - /// ## Examples + /// # Examples /// /// ``` /// #![feature(string_extend_from_within)] @@ -2213,10 +2213,6 @@ impl PartialEq for String { fn eq(&self, other: &String) -> bool { PartialEq::eq(&self[..], &other[..]) } - #[inline] - fn ne(&self, other: &String) -> bool { - PartialEq::ne(&self[..], &other[..]) - } } macro_rules! impl_eq { diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs index bab7f5f53..fdd341a06 100644 --- a/library/alloc/src/sync.rs +++ b/library/alloc/src/sync.rs @@ -654,6 +654,20 @@ impl<T> Arc<T> { /// /// This will succeed even if there are outstanding weak references. /// + // FIXME: when `Arc::into_inner` is stabilized, add this paragraph: + /* + /// It is strongly recommended to use [`Arc::into_inner`] instead if you don't + /// want to keep the `Arc` in the [`Err`] case. + /// Immediately dropping the [`Err`] payload, like in the expression + /// `Arc::try_unwrap(this).ok()`, can still cause the strong count to + /// drop to zero and the inner value of the `Arc` to be dropped: + /// For instance if two threads execute this expression in parallel, then + /// there is a race condition. The threads could first both check whether they + /// have the last clone of their `Arc` via `Arc::try_unwrap`, and then + /// both drop their `Arc` in the call to [`ok`][`Result::ok`], + /// taking the strong count from two down to zero. + /// + */ /// # Examples /// /// ``` @@ -685,6 +699,137 @@ impl<T> Arc<T> { Ok(elem) } } + + /// Returns the inner value, if the `Arc` has exactly one strong reference. + /// + /// Otherwise, [`None`] is returned and the `Arc` is dropped. + /// + /// This will succeed even if there are outstanding weak references. + /// + /// If `Arc::into_inner` is called on every clone of this `Arc`, + /// it is guaranteed that exactly one of the calls returns the inner value. + /// This means in particular that the inner value is not dropped. + /// + /// The similar expression `Arc::try_unwrap(this).ok()` does not + /// offer such a guarantee. See the last example below. + // + // FIXME: when `Arc::into_inner` is stabilized, add this to end + // of the previous sentence: + /* + /// and the documentation of [`Arc::try_unwrap`]. + */ + /// + /// # Examples + /// + /// Minimal example demonstrating the guarantee that `Arc::into_inner` gives. + /// ``` + /// #![feature(arc_into_inner)] + /// + /// use std::sync::Arc; + /// + /// let x = Arc::new(3); + /// let y = Arc::clone(&x); + /// + /// // Two threads calling `Arc::into_inner` on both clones of an `Arc`: + /// let x_thread = std::thread::spawn(|| Arc::into_inner(x)); + /// let y_thread = std::thread::spawn(|| Arc::into_inner(y)); + /// + /// let x_inner_value = x_thread.join().unwrap(); + /// let y_inner_value = y_thread.join().unwrap(); + /// + /// // One of the threads is guaranteed to receive the inner value: + /// assert!(matches!( + /// (x_inner_value, y_inner_value), + /// (None, Some(3)) | (Some(3), None) + /// )); + /// // The result could also be `(None, None)` if the threads called + /// // `Arc::try_unwrap(x).ok()` and `Arc::try_unwrap(y).ok()` instead. + /// ``` + /// + /// A more practical example demonstrating the need for `Arc::into_inner`: + /// ``` + /// #![feature(arc_into_inner)] + /// + /// use std::sync::Arc; + /// + /// // Definition of a simple singly linked list using `Arc`: + /// #[derive(Clone)] + /// struct LinkedList<T>(Option<Arc<Node<T>>>); + /// struct Node<T>(T, Option<Arc<Node<T>>>); + /// + /// // Dropping a long `LinkedList<T>` relying on the destructor of `Arc` + /// // can cause a stack overflow. To prevent this, we can provide a + /// // manual `Drop` implementation that does the destruction in a loop: + /// impl<T> Drop for LinkedList<T> { + /// fn drop(&mut self) { + /// let mut link = self.0.take(); + /// while let Some(arc_node) = link.take() { + /// if let Some(Node(_value, next)) = Arc::into_inner(arc_node) { + /// link = next; + /// } + /// } + /// } + /// } + /// + /// // Implementation of `new` and `push` omitted + /// impl<T> LinkedList<T> { + /// /* ... */ + /// # fn new() -> Self { + /// # LinkedList(None) + /// # } + /// # fn push(&mut self, x: T) { + /// # self.0 = Some(Arc::new(Node(x, self.0.take()))); + /// # } + /// } + /// + /// // The following code could have still caused a stack overflow + /// // despite the manual `Drop` impl if that `Drop` impl had used + /// // `Arc::try_unwrap(arc).ok()` instead of `Arc::into_inner(arc)`. + /// + /// // Create a long list and clone it + /// let mut x = LinkedList::new(); + /// for i in 0..100000 { + /// x.push(i); // Adds i to the front of x + /// } + /// let y = x.clone(); + /// + /// // Drop the clones in parallel + /// let x_thread = std::thread::spawn(|| drop(x)); + /// let y_thread = std::thread::spawn(|| drop(y)); + /// x_thread.join().unwrap(); + /// y_thread.join().unwrap(); + /// ``` + + // FIXME: when `Arc::into_inner` is stabilized, adjust above documentation + // and the documentation of `Arc::try_unwrap` according to the `FIXME`s. Also + // open an issue on rust-lang/rust-clippy, asking for a lint against + // `Arc::try_unwrap(...).ok()`. + #[inline] + #[unstable(feature = "arc_into_inner", issue = "106894")] + pub fn into_inner(this: Self) -> Option<T> { + // Make sure that the ordinary `Drop` implementation isn’t called as well + let mut this = mem::ManuallyDrop::new(this); + + // Following the implementation of `drop` and `drop_slow` + if this.inner().strong.fetch_sub(1, Release) != 1 { + return None; + } + + acquire!(this.inner().strong); + + // SAFETY: This mirrors the line + // + // unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) }; + // + // in `drop_slow`. Instead of dropping the value behind the pointer, + // it is read and eventually returned; `ptr::read` has the same + // safety conditions as `ptr::drop_in_place`. + let inner = unsafe { ptr::read(Self::get_mut_unchecked(&mut this)) }; + + drop(Weak { ptr: this.ptr }); + + Some(inner) + } } impl<T> Arc<[T]> { @@ -1588,7 +1733,7 @@ impl<T: ?Sized> Arc<T> { /// # Safety /// /// If any other `Arc` or [`Weak`] pointers to the same allocation exist, then - /// they must be must not be dereferenced or have active borrows for the duration + /// they must not be dereferenced or have active borrows for the duration /// of the returned borrow, and their inner type must be exactly the same as the /// inner type of this Rc (including lifetimes). This is trivially the case if no /// such pointers exist, for example immediately after `Arc::new`. @@ -2750,7 +2895,7 @@ impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I { Arc::from_iter_exact(self, low) } } else { - // TrustedLen contract guarantees that `upper_bound == `None` implies an iterator + // TrustedLen contract guarantees that `upper_bound == None` implies an iterator // length exceeding `usize::MAX`. // The default implementation would collect into a vec which would panic. // Thus we panic here immediately without invoking `Vec` code. diff --git a/library/alloc/src/sync/tests.rs b/library/alloc/src/sync/tests.rs index 0fae8953a..863d58bdf 100644 --- a/library/alloc/src/sync/tests.rs +++ b/library/alloc/src/sync/tests.rs @@ -102,6 +102,38 @@ fn try_unwrap() { } #[test] +fn into_inner() { + for _ in 0..100 + // ^ Increase chances of hitting potential race conditions + { + let x = Arc::new(3); + let y = Arc::clone(&x); + let r_thread = std::thread::spawn(|| Arc::into_inner(x)); + let s_thread = std::thread::spawn(|| Arc::into_inner(y)); + let r = r_thread.join().expect("r_thread panicked"); + let s = s_thread.join().expect("s_thread panicked"); + assert!( + matches!((r, s), (None, Some(3)) | (Some(3), None)), + "assertion failed: unexpected result `{:?}`\ + \n expected `(None, Some(3))` or `(Some(3), None)`", + (r, s), + ); + } + + let x = Arc::new(3); + assert_eq!(Arc::into_inner(x), Some(3)); + + let x = Arc::new(4); + let y = Arc::clone(&x); + assert_eq!(Arc::into_inner(x), None); + assert_eq!(Arc::into_inner(y), Some(4)); + + let x = Arc::new(5); + let _w = Arc::downgrade(&x); + assert_eq!(Arc::into_inner(x), Some(5)); +} + +#[test] fn into_from_raw() { let x = Arc::new(Box::new("hello")); let y = x.clone(); diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 36b0b3c9e..f2aa30f18 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -378,8 +378,8 @@ mod spec_extend; /// Currently, `Vec` does not guarantee the order in which elements are dropped. /// The order has changed in the past and may change again. /// -/// [`get`]: ../../std/vec/struct.Vec.html#method.get -/// [`get_mut`]: ../../std/vec/struct.Vec.html#method.get_mut +/// [`get`]: slice::get +/// [`get_mut`]: slice::get_mut /// [`String`]: crate::string::String /// [`&str`]: type@str /// [`shrink_to_fit`]: Vec::shrink_to_fit @@ -2647,35 +2647,6 @@ impl<T, A: Allocator> ops::DerefMut for Vec<T, A> { } #[cfg(not(no_global_oom_handling))] -trait SpecCloneFrom { - fn clone_from(this: &mut Self, other: &Self); -} - -#[cfg(not(no_global_oom_handling))] -impl<T: Clone, A: Allocator> SpecCloneFrom for Vec<T, A> { - default fn clone_from(this: &mut Self, other: &Self) { - // drop anything that will not be overwritten - this.truncate(other.len()); - - // self.len <= other.len due to the truncate above, so the - // slices here are always in-bounds. - let (init, tail) = other.split_at(this.len()); - - // reuse the contained values' allocations/resources. - this.clone_from_slice(init); - this.extend_from_slice(tail); - } -} - -#[cfg(not(no_global_oom_handling))] -impl<T: Copy, A: Allocator> SpecCloneFrom for Vec<T, A> { - fn clone_from(this: &mut Self, other: &Self) { - this.clear(); - this.extend_from_slice(other); - } -} - -#[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> { #[cfg(not(test))] @@ -2695,7 +2666,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> { } fn clone_from(&mut self, other: &Self) { - SpecCloneFrom::clone_from(self, other) + crate::slice::SpecCloneIntoVec::clone_into(other.as_slice(), self); } } @@ -3160,10 +3131,7 @@ impl<T, const N: usize> From<[T; N]> for Vec<T> { /// ``` #[cfg(not(test))] fn from(s: [T; N]) -> Vec<T> { - <[T]>::into_vec( - #[rustc_box] - Box::new(s), - ) + <[T]>::into_vec(Box::new(s)) } #[cfg(test)] diff --git a/library/core/benches/array.rs b/library/core/benches/array.rs new file mode 100644 index 000000000..d8cc44d05 --- /dev/null +++ b/library/core/benches/array.rs @@ -0,0 +1,19 @@ +use test::black_box; +use test::Bencher; + +macro_rules! map_array { + ($func_name:ident, $start_item: expr, $map_item: expr, $arr_size: expr) => { + #[bench] + fn $func_name(b: &mut Bencher) { + let arr = [$start_item; $arr_size]; + b.iter(|| black_box(arr).map(|_| black_box($map_item))); + } + }; +} + +map_array!(map_8byte_8byte_8, 0u64, 1u64, 80); +map_array!(map_8byte_8byte_64, 0u64, 1u64, 640); +map_array!(map_8byte_8byte_256, 0u64, 1u64, 2560); + +map_array!(map_8byte_256byte_256, 0u64, [0u64; 4], 2560); +map_array!(map_256byte_8byte_256, [0u64; 4], 0u64, 2560); diff --git a/library/core/benches/char/methods.rs b/library/core/benches/char/methods.rs index 9408f83c3..5d4df1ac8 100644 --- a/library/core/benches/char/methods.rs +++ b/library/core/benches/char/methods.rs @@ -1,26 +1,26 @@ -use test::Bencher; +use test::{black_box, Bencher}; const CHARS: [char; 9] = ['0', 'x', '2', '5', 'A', 'f', '7', '8', '9']; const RADIX: [u32; 5] = [2, 8, 10, 16, 32]; #[bench] fn bench_to_digit_radix_2(b: &mut Bencher) { - b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(2)).min()) + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| black_box(c).to_digit(2)).min()) } #[bench] fn bench_to_digit_radix_10(b: &mut Bencher) { - b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(10)).min()) + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| black_box(c).to_digit(10)).min()) } #[bench] fn bench_to_digit_radix_16(b: &mut Bencher) { - b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(16)).min()) + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| black_box(c).to_digit(16)).min()) } #[bench] fn bench_to_digit_radix_36(b: &mut Bencher) { - b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(36)).min()) + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| black_box(c).to_digit(36)).min()) } #[bench] @@ -31,47 +31,59 @@ fn bench_to_digit_radix_var(b: &mut Bencher) { .cycle() .zip(RADIX.iter().cycle()) .take(10_000) - .map(|(c, radix)| c.to_digit(*radix)) + .map(|(c, radix)| black_box(c).to_digit(*radix)) .min() }) } #[bench] fn bench_to_ascii_uppercase(b: &mut Bencher) { - b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_ascii_uppercase()).min()) + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| black_box(c).to_ascii_uppercase()).min()) } #[bench] fn bench_to_ascii_lowercase(b: &mut Bencher) { - b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_ascii_lowercase()).min()) + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| black_box(c).to_ascii_lowercase()).min()) } #[bench] fn bench_ascii_mix_to_uppercase(b: &mut Bencher) { - b.iter(|| (0..=255).cycle().take(10_000).map(|b| char::from(b).to_uppercase()).count()) + b.iter(|| { + (0..=255).cycle().take(10_000).map(|b| black_box(char::from(b)).to_uppercase()).count() + }) } #[bench] fn bench_ascii_mix_to_lowercase(b: &mut Bencher) { - b.iter(|| (0..=255).cycle().take(10_000).map(|b| char::from(b).to_lowercase()).count()) + b.iter(|| { + (0..=255).cycle().take(10_000).map(|b| black_box(char::from(b)).to_lowercase()).count() + }) } #[bench] fn bench_ascii_char_to_uppercase(b: &mut Bencher) { - b.iter(|| (0..=127).cycle().take(10_000).map(|b| char::from(b).to_uppercase()).count()) + b.iter(|| { + (0..=127).cycle().take(10_000).map(|b| black_box(char::from(b)).to_uppercase()).count() + }) } #[bench] fn bench_ascii_char_to_lowercase(b: &mut Bencher) { - b.iter(|| (0..=127).cycle().take(10_000).map(|b| char::from(b).to_lowercase()).count()) + b.iter(|| { + (0..=127).cycle().take(10_000).map(|b| black_box(char::from(b)).to_lowercase()).count() + }) } #[bench] fn bench_non_ascii_char_to_uppercase(b: &mut Bencher) { - b.iter(|| (128..=255).cycle().take(10_000).map(|b| char::from(b).to_uppercase()).count()) + b.iter(|| { + (128..=255).cycle().take(10_000).map(|b| black_box(char::from(b)).to_uppercase()).count() + }) } #[bench] fn bench_non_ascii_char_to_lowercase(b: &mut Bencher) { - b.iter(|| (128..=255).cycle().take(10_000).map(|b| char::from(b).to_lowercase()).count()) + b.iter(|| { + (128..=255).cycle().take(10_000).map(|b| black_box(char::from(b)).to_lowercase()).count() + }) } diff --git a/library/core/benches/lib.rs b/library/core/benches/lib.rs index f1244d932..e4100120d 100644 --- a/library/core/benches/lib.rs +++ b/library/core/benches/lib.rs @@ -9,6 +9,7 @@ extern crate test; mod any; +mod array; mod ascii; mod char; mod fmt; diff --git a/library/core/benches/num/flt2dec/strategy/dragon.rs b/library/core/benches/num/flt2dec/strategy/dragon.rs index 319b9773e..377c99eff 100644 --- a/library/core/benches/num/flt2dec/strategy/dragon.rs +++ b/library/core/benches/num/flt2dec/strategy/dragon.rs @@ -1,14 +1,14 @@ use super::super::*; use core::num::flt2dec::strategy::dragon::*; use std::mem::MaybeUninit; -use test::Bencher; +use test::{black_box, Bencher}; #[bench] fn bench_small_shortest(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS]; b.iter(|| { - format_shortest(&decoded, &mut buf); + format_shortest(black_box(&decoded), &mut buf); }); } @@ -17,7 +17,7 @@ fn bench_big_shortest(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS]; b.iter(|| { - format_shortest(&decoded, &mut buf); + format_shortest(black_box(&decoded), &mut buf); }); } @@ -26,7 +26,7 @@ fn bench_small_exact_3(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); 3]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -35,7 +35,7 @@ fn bench_big_exact_3(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); 3]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -44,7 +44,7 @@ fn bench_small_exact_12(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); 12]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -53,7 +53,7 @@ fn bench_big_exact_12(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); 12]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -62,7 +62,7 @@ fn bench_small_exact_inf(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); 1024]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -71,6 +71,6 @@ fn bench_big_exact_inf(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); 1024]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } diff --git a/library/core/benches/num/flt2dec/strategy/grisu.rs b/library/core/benches/num/flt2dec/strategy/grisu.rs index 8e47a046c..6bea5e55d 100644 --- a/library/core/benches/num/flt2dec/strategy/grisu.rs +++ b/library/core/benches/num/flt2dec/strategy/grisu.rs @@ -1,7 +1,7 @@ use super::super::*; use core::num::flt2dec::strategy::grisu::*; use std::mem::MaybeUninit; -use test::Bencher; +use test::{black_box, Bencher}; pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded { match decode(v).1 { @@ -15,7 +15,7 @@ fn bench_small_shortest(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS]; b.iter(|| { - format_shortest(&decoded, &mut buf); + format_shortest(black_box(&decoded), &mut buf); }); } @@ -24,7 +24,7 @@ fn bench_big_shortest(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS]; b.iter(|| { - format_shortest(&decoded, &mut buf); + format_shortest(black_box(&decoded), &mut buf); }); } @@ -33,7 +33,7 @@ fn bench_small_exact_3(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); 3]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -42,7 +42,7 @@ fn bench_big_exact_3(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); 3]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -51,7 +51,7 @@ fn bench_small_exact_12(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); 12]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -60,7 +60,7 @@ fn bench_big_exact_12(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); 12]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -69,7 +69,7 @@ fn bench_small_exact_inf(b: &mut Bencher) { let decoded = decode_finite(3.141592f64); let mut buf = [MaybeUninit::new(0); 1024]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } @@ -78,6 +78,6 @@ fn bench_big_exact_inf(b: &mut Bencher) { let decoded = decode_finite(f64::MAX); let mut buf = [MaybeUninit::new(0); 1024]; b.iter(|| { - format_exact(&decoded, &mut buf, i16::MIN); + format_exact(black_box(&decoded), &mut buf, i16::MIN); }); } diff --git a/library/core/src/alloc/global.rs b/library/core/src/alloc/global.rs index 1d80b8bf9..18da70451 100644 --- a/library/core/src/alloc/global.rs +++ b/library/core/src/alloc/global.rs @@ -203,7 +203,7 @@ pub unsafe trait GlobalAlloc { ptr } - /// Shrink or grow a block of memory to the given `new_size`. + /// Shrink or grow a block of memory to the given `new_size` in bytes. /// The block is described by the given `ptr` pointer and `layout`. /// /// If this returns a non-null pointer, then ownership of the memory block @@ -211,10 +211,11 @@ pub unsafe trait GlobalAlloc { /// Any access to the old `ptr` is Undefined Behavior, even if the /// allocation remained in-place. The newly returned pointer is the only valid pointer /// for accessing this memory now. + /// /// The new memory block is allocated with `layout`, - /// but with the `size` updated to `new_size`. This new layout must be - /// used when deallocating the new memory block with `dealloc`. The range - /// `0..min(layout.size(), new_size)` of the new memory block is + /// but with the `size` updated to `new_size` in bytes. + /// This new layout must be used when deallocating the new memory block with `dealloc`. + /// The range `0..min(layout.size(), new_size)` of the new memory block is /// guaranteed to have the same values as the original block. /// /// If this method returns null, then ownership of the memory diff --git a/library/core/src/any.rs b/library/core/src/any.rs index c0fb0d993..c27646b8f 100644 --- a/library/core/src/any.rs +++ b/library/core/src/any.rs @@ -56,7 +56,7 @@ //! let value_any = value as &dyn Any; //! //! // Try to convert our value to a `String`. If successful, we want to -//! // output the String`'s length as well as its value. If not, it's a +//! // output the `String`'s length as well as its value. If not, it's a //! // different type: just print it out unadorned. //! match value_any.downcast_ref::<String>() { //! Some(as_string) => { diff --git a/library/core/src/array/drain.rs b/library/core/src/array/drain.rs new file mode 100644 index 000000000..5fadf907b --- /dev/null +++ b/library/core/src/array/drain.rs @@ -0,0 +1,76 @@ +use crate::iter::{TrustedLen, UncheckedIterator}; +use crate::mem::ManuallyDrop; +use crate::ptr::drop_in_place; +use crate::slice; + +/// A situationally-optimized version of `array.into_iter().for_each(func)`. +/// +/// [`crate::array::IntoIter`]s are great when you need an owned iterator, but +/// storing the entire array *inside* the iterator like that can sometimes +/// pessimize code. Notable, it can be more bytes than you really want to move +/// around, and because the array accesses index into it SRoA has a harder time +/// optimizing away the type than it does iterators that just hold a couple pointers. +/// +/// Thus this function exists, which gives a way to get *moved* access to the +/// elements of an array using a small iterator -- no bigger than a slice iterator. +/// +/// The function-taking-a-closure structure makes it safe, as it keeps callers +/// from looking at already-dropped elements. +pub(crate) fn drain_array_with<T, R, const N: usize>( + array: [T; N], + func: impl for<'a> FnOnce(Drain<'a, T>) -> R, +) -> R { + let mut array = ManuallyDrop::new(array); + // SAFETY: Now that the local won't drop it, it's ok to construct the `Drain` which will. + let drain = Drain(array.iter_mut()); + func(drain) +} + +/// See [`drain_array_with`] -- this is `pub(crate)` only so it's allowed to be +/// mentioned in the signature of that method. (Otherwise it hits `E0446`.) +// INVARIANT: It's ok to drop the remainder of the inner iterator. +pub(crate) struct Drain<'a, T>(slice::IterMut<'a, T>); + +impl<T> Drop for Drain<'_, T> { + fn drop(&mut self) { + // SAFETY: By the type invariant, we're allowed to drop all these. + unsafe { drop_in_place(self.0.as_mut_slice()) } + } +} + +impl<T> Iterator for Drain<'_, T> { + type Item = T; + + #[inline] + fn next(&mut self) -> Option<T> { + let p: *const T = self.0.next()?; + // SAFETY: The iterator was already advanced, so we won't drop this later. + Some(unsafe { p.read() }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + let n = self.len(); + (n, Some(n)) + } +} + +impl<T> ExactSizeIterator for Drain<'_, T> { + #[inline] + fn len(&self) -> usize { + self.0.len() + } +} + +// SAFETY: This is a 1:1 wrapper for a slice iterator, which is also `TrustedLen`. +unsafe impl<T> TrustedLen for Drain<'_, T> {} + +impl<T> UncheckedIterator for Drain<'_, T> { + unsafe fn next_unchecked(&mut self) -> T { + // SAFETY: `Drain` is 1:1 with the inner iterator, so if the caller promised + // that there's an element left, the inner iterator has one too. + let p: *const T = unsafe { self.0.next_unchecked() }; + // SAFETY: The iterator was already advanced, so we won't drop this later. + unsafe { p.read() } + } +} diff --git a/library/core/src/array/equality.rs b/library/core/src/array/equality.rs index b2c895f88..d749865f7 100644 --- a/library/core/src/array/equality.rs +++ b/library/core/src/array/equality.rs @@ -1,6 +1,5 @@ +use crate::cmp::BytewiseEq; use crate::convert::TryInto; -use crate::num::{NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize}; -use crate::num::{NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize}; #[stable(feature = "rust1", since = "1.0.0")] impl<A, B, const N: usize> PartialEq<[B; N]> for [A; N] @@ -144,74 +143,14 @@ impl<T: PartialEq<Other>, Other, const N: usize> SpecArrayEq<Other, N> for T { } } -impl<T: IsRawEqComparable<U>, U, const N: usize> SpecArrayEq<U, N> for T { +impl<T: BytewiseEq<U>, U, const N: usize> SpecArrayEq<U, N> for T { fn spec_eq(a: &[T; N], b: &[U; N]) -> bool { - // SAFETY: This is why `IsRawEqComparable` is an `unsafe trait`. - unsafe { - let b = &*b.as_ptr().cast::<[T; N]>(); - crate::intrinsics::raw_eq(a, b) - } + // SAFETY: Arrays are compared element-wise, and don't add any padding + // between elements, so when the elements are `BytewiseEq`, we can + // compare the entire array at once. + unsafe { crate::intrinsics::raw_eq(a, crate::mem::transmute(b)) } } fn spec_ne(a: &[T; N], b: &[U; N]) -> bool { !Self::spec_eq(a, b) } } - -/// `U` exists on here mostly because `min_specialization` didn't let me -/// repeat the `T` type parameter in the above specialization, so instead -/// the `T == U` constraint comes from the impls on this. -/// # Safety -/// - Neither `Self` nor `U` has any padding. -/// - `Self` and `U` have the same layout. -/// - `Self: PartialEq<U>` is byte-wise (this means no floats, among other things) -#[rustc_specialization_trait] -unsafe trait IsRawEqComparable<U>: PartialEq<U> {} - -macro_rules! is_raw_eq_comparable { - ($($t:ty),+ $(,)?) => {$( - unsafe impl IsRawEqComparable<$t> for $t {} - )+}; -} - -// SAFETY: All the ordinary integer types have no padding, and are not pointers. -is_raw_eq_comparable!(u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize); - -// SAFETY: bool and char have *niches*, but no *padding* (and these are not pointer types), so this -// is sound -is_raw_eq_comparable!(bool, char); - -// SAFETY: Similarly, the non-zero types have a niche, but no undef and no pointers, -// and they compare like their underlying numeric type. -is_raw_eq_comparable!( - NonZeroU8, - NonZeroU16, - NonZeroU32, - NonZeroU64, - NonZeroU128, - NonZeroUsize, - NonZeroI8, - NonZeroI16, - NonZeroI32, - NonZeroI64, - NonZeroI128, - NonZeroIsize, -); - -// SAFETY: The NonZero types have the "null" optimization guaranteed, and thus -// are also safe to equality-compare bitwise inside an `Option`. -// The way `PartialOrd` is defined for `Option` means that this wouldn't work -// for `<` or `>` on the signed types, but since we only do `==` it's fine. -is_raw_eq_comparable!( - Option<NonZeroU8>, - Option<NonZeroU16>, - Option<NonZeroU32>, - Option<NonZeroU64>, - Option<NonZeroU128>, - Option<NonZeroUsize>, - Option<NonZeroI8>, - Option<NonZeroI16>, - Option<NonZeroI32>, - Option<NonZeroI64>, - Option<NonZeroI128>, - Option<NonZeroIsize>, -); diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs index 2825e0bbb..1643842d6 100644 --- a/library/core/src/array/mod.rs +++ b/library/core/src/array/mod.rs @@ -10,16 +10,19 @@ use crate::convert::{Infallible, TryFrom}; use crate::error::Error; use crate::fmt; use crate::hash::{self, Hash}; -use crate::iter::TrustedLen; +use crate::iter::UncheckedIterator; use crate::mem::{self, MaybeUninit}; use crate::ops::{ ChangeOutputType, ControlFlow, FromResidual, Index, IndexMut, NeverShortCircuit, Residual, Try, }; use crate::slice::{Iter, IterMut}; +mod drain; mod equality; mod iter; +pub(crate) use drain::drain_array_with; + #[stable(feature = "array_value_iter", since = "1.51.0")] pub use iter::IntoIter; @@ -52,16 +55,11 @@ pub use iter::IntoIter; /// ``` #[inline] #[stable(feature = "array_from_fn", since = "1.63.0")] -pub fn from_fn<T, const N: usize, F>(mut cb: F) -> [T; N] +pub fn from_fn<T, const N: usize, F>(cb: F) -> [T; N] where F: FnMut(usize) -> T, { - let mut idx = 0; - [(); N].map(|_| { - let res = cb(idx); - idx += 1; - res - }) + try_from_fn(NeverShortCircuit::wrap_mut_1(cb)).0 } /// Creates an array `[T; N]` where each fallible array element `T` is returned by the `cb` call. @@ -101,9 +99,14 @@ where R: Try, R::Residual: Residual<[R::Output; N]>, { - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { try_collect_into_array_unchecked(&mut (0..N).map(cb)) } + let mut array = MaybeUninit::uninit_array::<N>(); + match try_from_fn_erased(&mut array, cb) { + ControlFlow::Break(r) => FromResidual::from_residual(r), + ControlFlow::Continue(()) => { + // SAFETY: All elements of the array were populated. + try { unsafe { MaybeUninit::array_assume_init(array) } } + } + } } /// Converts a reference to `T` into a reference to an array of length 1 (without copying). @@ -131,7 +134,8 @@ pub struct TryFromSliceError(()); impl fmt::Display for TryFromSliceError { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self.__description(), f) + #[allow(deprecated)] + self.description().fmt(f) } } @@ -139,20 +143,6 @@ impl fmt::Display for TryFromSliceError { impl Error for TryFromSliceError { #[allow(deprecated)] fn description(&self) -> &str { - self.__description() - } -} - -impl TryFromSliceError { - #[unstable( - feature = "array_error_internals", - reason = "available through Error trait and this method should not \ - be exposed publicly", - issue = "none" - )] - #[inline] - #[doc(hidden)] - pub fn __description(&self) -> &str { "could not convert slice to array" } } @@ -427,9 +417,7 @@ trait SpecArrayClone: Clone { impl<T: Clone> SpecArrayClone for T { #[inline] default fn clone<const N: usize>(array: &[T; N]) -> [T; N] { - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { collect_into_array_unchecked(&mut array.iter().cloned()) } + from_trusted_iterator(array.iter().cloned()) } } @@ -513,9 +501,7 @@ impl<T, const N: usize> [T; N] { where F: FnMut(T) -> U, { - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { collect_into_array_unchecked(&mut IntoIterator::into_iter(self).map(f)) } + self.try_map(NeverShortCircuit::wrap_mut_1(f)).0 } /// A fallible function `f` applied to each element on array `self` in order to @@ -552,9 +538,7 @@ impl<T, const N: usize> [T; N] { R: Try, R::Residual: Residual<[R::Output; N]>, { - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { try_collect_into_array_unchecked(&mut IntoIterator::into_iter(self).map(f)) } + drain_array_with(self, |iter| try_from_trusted_iterator(iter.map(f))) } /// 'Zips up' two arrays into a single array of pairs. @@ -575,11 +559,9 @@ impl<T, const N: usize> [T; N] { /// ``` #[unstable(feature = "array_zip", issue = "80094")] pub fn zip<U>(self, rhs: [U; N]) -> [(T, U); N] { - let mut iter = IntoIterator::into_iter(self).zip(rhs); - - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { collect_into_array_unchecked(&mut iter) } + drain_array_with(self, |lhs| { + drain_array_with(rhs, |rhs| from_trusted_iterator(crate::iter::zip(lhs, rhs))) + }) } /// Returns a slice containing the entire array. Equivalent to `&s[..]`. @@ -626,9 +608,7 @@ impl<T, const N: usize> [T; N] { /// ``` #[unstable(feature = "array_methods", issue = "76118")] pub fn each_ref(&self) -> [&T; N] { - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { collect_into_array_unchecked(&mut self.iter()) } + from_trusted_iterator(self.iter()) } /// Borrows each element mutably and returns an array of mutable references @@ -648,9 +628,7 @@ impl<T, const N: usize> [T; N] { /// ``` #[unstable(feature = "array_methods", issue = "76118")] pub fn each_mut(&mut self) -> [&mut T; N] { - // SAFETY: we know for certain that this iterator will yield exactly `N` - // items. - unsafe { collect_into_array_unchecked(&mut self.iter_mut()) } + from_trusted_iterator(self.iter_mut()) } /// Divides one array reference into two at an index. @@ -810,105 +788,71 @@ impl<T, const N: usize> [T; N] { } } -/// Pulls `N` items from `iter` and returns them as an array. If the iterator -/// yields fewer than `N` items, this function exhibits undefined behavior. +/// Populate an array from the first `N` elements of `iter` /// -/// See [`try_collect_into_array`] for more information. +/// # Panics /// +/// If the iterator doesn't actually have enough items. /// -/// # Safety -/// -/// It is up to the caller to guarantee that `iter` yields at least `N` items. -/// Violating this condition causes undefined behavior. -unsafe fn try_collect_into_array_unchecked<I, T, R, const N: usize>(iter: &mut I) -> R::TryType -where - // Note: `TrustedLen` here is somewhat of an experiment. This is just an - // internal function, so feel free to remove if this bound turns out to be a - // bad idea. In that case, remember to also remove the lower bound - // `debug_assert!` below! - I: Iterator + TrustedLen, - I::Item: Try<Output = T, Residual = R>, - R: Residual<[T; N]>, -{ - debug_assert!(N <= iter.size_hint().1.unwrap_or(usize::MAX)); - debug_assert!(N <= iter.size_hint().0); - - // SAFETY: covered by the function contract. - unsafe { try_collect_into_array(iter).unwrap_unchecked() } +/// By depending on `TrustedLen`, however, we can do that check up-front (where +/// it easily optimizes away) so it doesn't impact the loop that fills the array. +#[inline] +fn from_trusted_iterator<T, const N: usize>(iter: impl UncheckedIterator<Item = T>) -> [T; N] { + try_from_trusted_iterator(iter.map(NeverShortCircuit)).0 } -// Infallible version of `try_collect_into_array_unchecked`. -unsafe fn collect_into_array_unchecked<I, const N: usize>(iter: &mut I) -> [I::Item; N] +#[inline] +fn try_from_trusted_iterator<T, R, const N: usize>( + iter: impl UncheckedIterator<Item = R>, +) -> ChangeOutputType<R, [T; N]> where - I: Iterator + TrustedLen, + R: Try<Output = T>, + R::Residual: Residual<[T; N]>, { - let mut map = iter.map(NeverShortCircuit); - - // SAFETY: The same safety considerations w.r.t. the iterator length - // apply for `try_collect_into_array_unchecked` as for - // `collect_into_array_unchecked` - match unsafe { try_collect_into_array_unchecked(&mut map) } { - NeverShortCircuit(array) => array, + assert!(iter.size_hint().0 >= N); + fn next<T>(mut iter: impl UncheckedIterator<Item = T>) -> impl FnMut(usize) -> T { + move |_| { + // SAFETY: We know that `from_fn` will call this at most N times, + // and we checked to ensure that we have at least that many items. + unsafe { iter.next_unchecked() } + } } + + try_from_fn(next(iter)) } -/// Pulls `N` items from `iter` and returns them as an array. If the iterator -/// yields fewer than `N` items, `Err` is returned containing an iterator over -/// the already yielded items. +/// Version of [`try_from_fn`] using a passed-in slice in order to avoid +/// needing to monomorphize for every array length. /// -/// Since the iterator is passed as a mutable reference and this function calls -/// `next` at most `N` times, the iterator can still be used afterwards to -/// retrieve the remaining items. +/// This takes a generator rather than an iterator so that *at the type level* +/// it never needs to worry about running out of items. When combined with +/// an infallible `Try` type, that means the loop canonicalizes easily, allowing +/// it to optimize well. /// -/// If `iter.next()` panicks, all items already yielded by the iterator are -/// dropped. +/// It would be *possible* to unify this and [`iter_next_chunk_erased`] into one +/// function that does the union of both things, but last time it was that way +/// it resulted in poor codegen from the "are there enough source items?" checks +/// not optimizing away. So if you give it a shot, make sure to watch what +/// happens in the codegen tests. #[inline] -fn try_collect_into_array<I, T, R, const N: usize>( - iter: &mut I, -) -> Result<R::TryType, IntoIter<T, N>> +fn try_from_fn_erased<T, R>( + buffer: &mut [MaybeUninit<T>], + mut generator: impl FnMut(usize) -> R, +) -> ControlFlow<R::Residual> where - I: Iterator, - I::Item: Try<Output = T, Residual = R>, - R: Residual<[T; N]>, + R: Try<Output = T>, { - if N == 0 { - // SAFETY: An empty array is always inhabited and has no validity invariants. - return Ok(Try::from_output(unsafe { mem::zeroed() })); - } + let mut guard = Guard { array_mut: buffer, initialized: 0 }; - let mut array = MaybeUninit::uninit_array::<N>(); - let mut guard = Guard { array_mut: &mut array, initialized: 0 }; - - for _ in 0..N { - match iter.next() { - Some(item_rslt) => { - let item = match item_rslt.branch() { - ControlFlow::Break(r) => { - return Ok(FromResidual::from_residual(r)); - } - ControlFlow::Continue(elem) => elem, - }; - - // SAFETY: `guard.initialized` starts at 0, which means push can be called - // at most N times, which this loop does. - unsafe { - guard.push_unchecked(item); - } - } - None => { - let alive = 0..guard.initialized; - mem::forget(guard); - // SAFETY: `array` was initialized with exactly `initialized` - // number of elements. - return Err(unsafe { IntoIter::new_unchecked(array, alive) }); - } - } + while guard.initialized < guard.array_mut.len() { + let item = generator(guard.initialized).branch()?; + + // SAFETY: The loop condition ensures we have space to push the item + unsafe { guard.push_unchecked(item) }; } mem::forget(guard); - // SAFETY: All elements of the array were populated in the loop above. - let output = unsafe { array.transpose().assume_init() }; - Ok(Try::from_output(output)) + ControlFlow::Continue(()) } /// Panic guard for incremental initialization of arrays. @@ -922,14 +866,14 @@ where /// /// To minimize indirection fields are still pub but callers should at least use /// `push_unchecked` to signal that something unsafe is going on. -pub(crate) struct Guard<'a, T, const N: usize> { +struct Guard<'a, T> { /// The array to be initialized. - pub array_mut: &'a mut [MaybeUninit<T>; N], + pub array_mut: &'a mut [MaybeUninit<T>], /// The number of items that have been initialized so far. pub initialized: usize, } -impl<T, const N: usize> Guard<'_, T, N> { +impl<T> Guard<'_, T> { /// Adds an item to the array and updates the initialized item counter. /// /// # Safety @@ -947,28 +891,73 @@ impl<T, const N: usize> Guard<'_, T, N> { } } -impl<T, const N: usize> Drop for Guard<'_, T, N> { +impl<T> Drop for Guard<'_, T> { fn drop(&mut self) { - debug_assert!(self.initialized <= N); + debug_assert!(self.initialized <= self.array_mut.len()); // SAFETY: this slice will contain only initialized objects. unsafe { crate::ptr::drop_in_place(MaybeUninit::slice_assume_init_mut( - &mut self.array_mut.get_unchecked_mut(..self.initialized), + self.array_mut.get_unchecked_mut(..self.initialized), )); } } } -/// Returns the next chunk of `N` items from the iterator or errors with an -/// iterator over the remainder. Used for `Iterator::next_chunk`. +/// Pulls `N` items from `iter` and returns them as an array. If the iterator +/// yields fewer than `N` items, `Err` is returned containing an iterator over +/// the already yielded items. +/// +/// Since the iterator is passed as a mutable reference and this function calls +/// `next` at most `N` times, the iterator can still be used afterwards to +/// retrieve the remaining items. +/// +/// If `iter.next()` panicks, all items already yielded by the iterator are +/// dropped. +/// +/// Used for [`Iterator::next_chunk`]. +#[inline] +pub(crate) fn iter_next_chunk<T, const N: usize>( + iter: &mut impl Iterator<Item = T>, +) -> Result<[T; N], IntoIter<T, N>> { + let mut array = MaybeUninit::uninit_array::<N>(); + let r = iter_next_chunk_erased(&mut array, iter); + match r { + Ok(()) => { + // SAFETY: All elements of `array` were populated. + Ok(unsafe { MaybeUninit::array_assume_init(array) }) + } + Err(initialized) => { + // SAFETY: Only the first `initialized` elements were populated + Err(unsafe { IntoIter::new_unchecked(array, 0..initialized) }) + } + } +} + +/// Version of [`iter_next_chunk`] using a passed-in slice in order to avoid +/// needing to monomorphize for every array length. +/// +/// Unfortunately this loop has two exit conditions, the buffer filling up +/// or the iterator running out of items, making it tend to optimize poorly. #[inline] -pub(crate) fn iter_next_chunk<I, const N: usize>( - iter: &mut I, -) -> Result<[I::Item; N], IntoIter<I::Item, N>> -where - I: Iterator, -{ - let mut map = iter.map(NeverShortCircuit); - try_collect_into_array(&mut map).map(|NeverShortCircuit(arr)| arr) +fn iter_next_chunk_erased<T>( + buffer: &mut [MaybeUninit<T>], + iter: &mut impl Iterator<Item = T>, +) -> Result<(), usize> { + let mut guard = Guard { array_mut: buffer, initialized: 0 }; + while guard.initialized < guard.array_mut.len() { + let Some(item) = iter.next() else { + // Unlike `try_from_fn_erased`, we want to keep the partial results, + // so we need to defuse the guard instead of using `?`. + let initialized = guard.initialized; + mem::forget(guard); + return Err(initialized) + }; + + // SAFETY: The loop condition ensures we have space to push the item + unsafe { guard.push_unchecked(item) }; + } + + mem::forget(guard); + Ok(()) } diff --git a/library/core/src/cell.rs b/library/core/src/cell.rs index 129213fde..897d03595 100644 --- a/library/core/src/cell.rs +++ b/library/core/src/cell.rs @@ -196,7 +196,7 @@ use crate::cmp::Ordering; use crate::fmt::{self, Debug, Display}; use crate::marker::{PhantomData, Unsize}; use crate::mem; -use crate::ops::{CoerceUnsized, Deref, DerefMut}; +use crate::ops::{CoerceUnsized, Deref, DerefMut, DispatchFromDyn}; use crate::ptr::{self, NonNull}; mod lazy; @@ -571,6 +571,16 @@ impl<T: Default> Cell<T> { #[unstable(feature = "coerce_unsized", issue = "18598")] impl<T: CoerceUnsized<U>, U> CoerceUnsized<Cell<U>> for Cell<T> {} +// Allow types that wrap `Cell` to also implement `DispatchFromDyn` +// and become object safe method receivers. +// Note that currently `Cell` itself cannot be a method receiver +// because it does not implement Deref. +// In other words: +// `self: Cell<&Self>` won't work +// `self: CellWrapper<Self>` becomes possible +#[unstable(feature = "dispatch_from_dyn", issue = "none")] +impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<Cell<U>> for Cell<T> {} + impl<T> Cell<[T]> { /// Returns a `&[Cell<T>]` from a `&Cell<[T]>` /// @@ -622,7 +632,7 @@ pub struct RefCell<T: ?Sized> { // Stores the location of the earliest currently active borrow. // This gets updated whenever we go from having zero borrows // to having a single borrow. When a borrow occurs, this gets included - // in the generated `BorrowError/`BorrowMutError` + // in the generated `BorrowError`/`BorrowMutError` #[cfg(feature = "debug_refcell")] borrowed_at: Cell<Option<&'static crate::panic::Location<'static>>>, value: UnsafeCell<T>, @@ -2078,6 +2088,16 @@ impl<T> const From<T> for UnsafeCell<T> { #[unstable(feature = "coerce_unsized", issue = "18598")] impl<T: CoerceUnsized<U>, U> CoerceUnsized<UnsafeCell<U>> for UnsafeCell<T> {} +// Allow types that wrap `UnsafeCell` to also implement `DispatchFromDyn` +// and become object safe method receivers. +// Note that currently `UnsafeCell` itself cannot be a method receiver +// because it does not implement Deref. +// In other words: +// `self: UnsafeCell<&Self>` won't work +// `self: UnsafeCellWrapper<Self>` becomes possible +#[unstable(feature = "dispatch_from_dyn", issue = "none")] +impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<UnsafeCell<U>> for UnsafeCell<T> {} + /// [`UnsafeCell`], but [`Sync`]. /// /// This is just an `UnsafeCell`, except it implements `Sync` @@ -2169,6 +2189,17 @@ impl<T> const From<T> for SyncUnsafeCell<T> { //#[unstable(feature = "sync_unsafe_cell", issue = "95439")] impl<T: CoerceUnsized<U>, U> CoerceUnsized<SyncUnsafeCell<U>> for SyncUnsafeCell<T> {} +// Allow types that wrap `SyncUnsafeCell` to also implement `DispatchFromDyn` +// and become object safe method receivers. +// Note that currently `SyncUnsafeCell` itself cannot be a method receiver +// because it does not implement Deref. +// In other words: +// `self: SyncUnsafeCell<&Self>` won't work +// `self: SyncUnsafeCellWrapper<Self>` becomes possible +#[unstable(feature = "dispatch_from_dyn", issue = "none")] +//#[unstable(feature = "sync_unsafe_cell", issue = "95439")] +impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<SyncUnsafeCell<U>> for SyncUnsafeCell<T> {} + #[allow(unused)] fn assert_coerce_unsized( a: UnsafeCell<&i32>, diff --git a/library/core/src/cell/once.rs b/library/core/src/cell/once.rs index 7757068a4..f74e563f1 100644 --- a/library/core/src/cell/once.rs +++ b/library/core/src/cell/once.rs @@ -298,3 +298,7 @@ impl<T> const From<T> for OnceCell<T> { OnceCell { inner: UnsafeCell::new(Some(value)) } } } + +// Just like for `Cell<T>` this isn't needed, but results in nicer error messages. +#[unstable(feature = "once_cell", issue = "74465")] +impl<T> !Sync for OnceCell<T> {} diff --git a/library/core/src/char/convert.rs b/library/core/src/char/convert.rs index f1a51a550..136bbcb8b 100644 --- a/library/core/src/char/convert.rs +++ b/library/core/src/char/convert.rs @@ -2,6 +2,7 @@ use crate::char::TryFromCharError; use crate::convert::TryFrom; +use crate::error::Error; use crate::fmt; use crate::mem::transmute; use crate::str::FromStr; @@ -150,14 +151,16 @@ pub struct ParseCharError { kind: CharErrorKind, } -impl ParseCharError { - #[unstable( - feature = "char_error_internals", - reason = "this method should not be available publicly", - issue = "none" - )] - #[doc(hidden)] - pub fn __description(&self) -> &str { +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +enum CharErrorKind { + EmptyString, + TooManyChars, +} + +#[stable(feature = "char_from_str", since = "1.20.0")] +impl Error for ParseCharError { + #[allow(deprecated)] + fn description(&self) -> &str { match self.kind { CharErrorKind::EmptyString => "cannot parse char from empty string", CharErrorKind::TooManyChars => "too many characters in string", @@ -165,16 +168,11 @@ impl ParseCharError { } } -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -enum CharErrorKind { - EmptyString, - TooManyChars, -} - #[stable(feature = "char_from_str", since = "1.20.0")] impl fmt::Display for ParseCharError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.__description().fmt(f) + #[allow(deprecated)] + self.description().fmt(f) } } diff --git a/library/core/src/char/decode.rs b/library/core/src/char/decode.rs index eeb088030..dbfe251f2 100644 --- a/library/core/src/char/decode.rs +++ b/library/core/src/char/decode.rs @@ -3,8 +3,6 @@ use crate::error::Error; use crate::fmt; -use super::from_u32_unchecked; - /// An iterator that decodes UTF-16 encoded code points from an iterator of `u16`s. /// /// This `struct` is created by the [`decode_utf16`] method on [`char`]. See its @@ -49,7 +47,7 @@ impl<I: Iterator<Item = u16>> Iterator for DecodeUtf16<I> { if !u.is_utf16_surrogate() { // SAFETY: not a surrogate - Some(Ok(unsafe { from_u32_unchecked(u as u32) })) + Some(Ok(unsafe { char::from_u32_unchecked(u as u32) })) } else if u >= 0xDC00 { // a trailing surrogate Some(Err(DecodeUtf16Error { code: u })) @@ -69,7 +67,7 @@ impl<I: Iterator<Item = u16>> Iterator for DecodeUtf16<I> { // all ok, so lets decode it. let c = (((u & 0x3ff) as u32) << 10 | (u2 & 0x3ff) as u32) + 0x1_0000; // SAFETY: we checked that it's a legal unicode value - Some(Ok(unsafe { from_u32_unchecked(c) })) + Some(Ok(unsafe { char::from_u32_unchecked(c) })) } } diff --git a/library/core/src/char/methods.rs b/library/core/src/char/methods.rs index 3e7383b4c..9bc97ea0b 100644 --- a/library/core/src/char/methods.rs +++ b/library/core/src/char/methods.rs @@ -53,15 +53,13 @@ impl char { /// Basic usage: /// /// ``` - /// use std::char::decode_utf16; - /// /// // 𝄞mus<invalid>ic<invalid> /// let v = [ /// 0xD834, 0xDD1E, 0x006d, 0x0075, 0x0073, 0xDD1E, 0x0069, 0x0063, 0xD834, /// ]; /// /// assert_eq!( - /// decode_utf16(v) + /// char::decode_utf16(v) /// .map(|r| r.map_err(|e| e.unpaired_surrogate())) /// .collect::<Vec<_>>(), /// vec![ @@ -77,16 +75,14 @@ impl char { /// A lossy decoder can be obtained by replacing `Err` results with the replacement character: /// /// ``` - /// use std::char::{decode_utf16, REPLACEMENT_CHARACTER}; - /// /// // 𝄞mus<invalid>ic<invalid> /// let v = [ /// 0xD834, 0xDD1E, 0x006d, 0x0075, 0x0073, 0xDD1E, 0x0069, 0x0063, 0xD834, /// ]; /// /// assert_eq!( - /// decode_utf16(v) - /// .map(|r| r.unwrap_or(REPLACEMENT_CHARACTER)) + /// char::decode_utf16(v) + /// .map(|r| r.unwrap_or(char::REPLACEMENT_CHARACTER)) /// .collect::<String>(), /// "𝄞mus�ic�" /// ); @@ -123,8 +119,6 @@ impl char { /// Basic usage: /// /// ``` - /// use std::char; - /// /// let c = char::from_u32(0x2764); /// /// assert_eq!(Some('❤'), c); @@ -133,8 +127,6 @@ impl char { /// Returning `None` when the input is not a valid `char`: /// /// ``` - /// use std::char; - /// /// let c = char::from_u32(0x110000); /// /// assert_eq!(None, c); @@ -176,8 +168,6 @@ impl char { /// Basic usage: /// /// ``` - /// use std::char; - /// /// let c = unsafe { char::from_u32_unchecked(0x2764) }; /// /// assert_eq!('❤', c); @@ -210,8 +200,6 @@ impl char { /// Basic usage: /// /// ``` - /// use std::char; - /// /// let c = char::from_digit(4, 10); /// /// assert_eq!(Some('4'), c); @@ -225,8 +213,6 @@ impl char { /// Returning `None` when the input is not a digit: /// /// ``` - /// use std::char; - /// /// let c = char::from_digit(20, 10); /// /// assert_eq!(None, c); @@ -235,8 +221,6 @@ impl char { /// Passing a large radix, causing a panic: /// /// ```should_panic - /// use std::char; - /// /// // this panics /// let _c = char::from_digit(1, 37); /// ``` @@ -1786,7 +1770,7 @@ pub fn encode_utf16_raw(mut code: u32, dst: &mut [u16]) -> &mut [u16] { } else { panic!( "encode_utf16: need {} units to encode U+{:X}, but the buffer has {}", - from_u32_unchecked(code).len_utf16(), + char::from_u32_unchecked(code).len_utf16(), code, dst.len(), ) diff --git a/library/core/src/char/mod.rs b/library/core/src/char/mod.rs index af98059cf..8ec78e887 100644 --- a/library/core/src/char/mod.rs +++ b/library/core/src/char/mod.rs @@ -189,7 +189,7 @@ impl Iterator for EscapeUnicode { } EscapeUnicodeState::Value => { let hex_digit = ((self.c as u32) >> (self.hex_digit_idx * 4)) & 0xf; - let c = from_digit(hex_digit, 16).unwrap(); + let c = char::from_digit(hex_digit, 16).unwrap(); if self.hex_digit_idx == 0 { self.state = EscapeUnicodeState::RightBrace; } else { diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs index a7d6fec7d..068637d1a 100644 --- a/library/core/src/cmp.rs +++ b/library/core/src/cmp.rs @@ -22,7 +22,9 @@ #![stable(feature = "rust1", since = "1.0.0")] -use crate::const_closure::ConstFnMutClosure; +mod bytewise; +pub(crate) use bytewise::BytewiseEq; + use crate::marker::Destruct; use self::Ordering::*; @@ -798,12 +800,6 @@ pub trait Ord: Eq + PartialOrd<Self> { Self: Sized, Self: ~const Destruct, { - #[cfg(not(bootstrap))] - { - max_by(self, other, Ord::cmp) - } - - #[cfg(bootstrap)] match self.cmp(&other) { Ordering::Less | Ordering::Equal => other, Ordering::Greater => self, @@ -828,12 +824,6 @@ pub trait Ord: Eq + PartialOrd<Self> { Self: Sized, Self: ~const Destruct, { - #[cfg(not(bootstrap))] - { - min_by(self, other, Ord::cmp) - } - - #[cfg(bootstrap)] match self.cmp(&other) { Ordering::Less | Ordering::Equal => self, Ordering::Greater => other, @@ -1200,12 +1190,7 @@ pub const fn min<T: ~const Ord + ~const Destruct>(v1: T, v2: T) -> T { #[inline] #[must_use] #[stable(feature = "cmp_min_max_by", since = "1.53.0")] -#[rustc_const_unstable(feature = "const_cmp", issue = "92391")] -pub const fn min_by<T, F: ~const FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T -where - T: ~const Destruct, - F: ~const Destruct, -{ +pub fn min_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T { match compare(&v1, &v2) { Ordering::Less | Ordering::Equal => v1, Ordering::Greater => v2, @@ -1227,30 +1212,8 @@ where #[inline] #[must_use] #[stable(feature = "cmp_min_max_by", since = "1.53.0")] -#[rustc_const_unstable(feature = "const_cmp", issue = "92391")] -pub const fn min_by_key<T, F: ~const FnMut(&T) -> K, K: ~const Ord>(v1: T, v2: T, mut f: F) -> T -where - T: ~const Destruct, - F: ~const Destruct, - K: ~const Destruct, -{ - cfg_if! { - if #[cfg(bootstrap)] { - const fn imp<T, F: ~const FnMut(&T) -> K, K: ~const Ord>( - f: &mut F, - (v1, v2): (&T, &T), - ) -> Ordering - where - T: ~const Destruct, - K: ~const Destruct, - { - f(v1).cmp(&f(v2)) - } - min_by(v1, v2, ConstFnMutClosure::new(&mut f, imp)) - } else { - min_by(v1, v2, const |v1, v2| f(v1).cmp(&f(v2))) - } - } +pub fn min_by_key<T, F: FnMut(&T) -> K, K: Ord>(v1: T, v2: T, mut f: F) -> T { + min_by(v1, v2, |v1, v2| f(v1).cmp(&f(v2))) } /// Compares and returns the maximum of two values. @@ -1291,12 +1254,7 @@ pub const fn max<T: ~const Ord + ~const Destruct>(v1: T, v2: T) -> T { #[inline] #[must_use] #[stable(feature = "cmp_min_max_by", since = "1.53.0")] -#[rustc_const_unstable(feature = "const_cmp", issue = "92391")] -pub const fn max_by<T, F: ~const FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T -where - T: ~const Destruct, - F: ~const Destruct, -{ +pub fn max_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T { match compare(&v1, &v2) { Ordering::Less | Ordering::Equal => v2, Ordering::Greater => v1, @@ -1318,24 +1276,8 @@ where #[inline] #[must_use] #[stable(feature = "cmp_min_max_by", since = "1.53.0")] -#[rustc_const_unstable(feature = "const_cmp", issue = "92391")] -pub const fn max_by_key<T, F: ~const FnMut(&T) -> K, K: ~const Ord>(v1: T, v2: T, mut f: F) -> T -where - T: ~const Destruct, - F: ~const Destruct, - K: ~const Destruct, -{ - const fn imp<T, F: ~const FnMut(&T) -> K, K: ~const Ord>( - f: &mut F, - (v1, v2): (&T, &T), - ) -> Ordering - where - T: ~const Destruct, - K: ~const Destruct, - { - f(v1).cmp(&f(v2)) - } - max_by(v1, v2, ConstFnMutClosure::new(&mut f, imp)) +pub fn max_by_key<T, F: FnMut(&T) -> K, K: Ord>(v1: T, v2: T, mut f: F) -> T { + max_by(v1, v2, |v1, v2| f(v1).cmp(&f(v2))) } // Implementation of PartialEq, Eq, PartialOrd and Ord for primitive types @@ -1536,9 +1478,10 @@ mod impls { } } #[stable(feature = "rust1", since = "1.0.0")] - impl<A: ?Sized, B: ?Sized> PartialOrd<&B> for &A + #[rustc_const_unstable(feature = "const_cmp", issue = "92391")] + impl<A: ?Sized, B: ?Sized> const PartialOrd<&B> for &A where - A: PartialOrd<B>, + A: ~const PartialOrd<B>, { #[inline] fn partial_cmp(&self, other: &&B) -> Option<Ordering> { diff --git a/library/core/src/cmp/bytewise.rs b/library/core/src/cmp/bytewise.rs new file mode 100644 index 000000000..2548d9e24 --- /dev/null +++ b/library/core/src/cmp/bytewise.rs @@ -0,0 +1,83 @@ +use crate::num::{NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize}; +use crate::num::{NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize}; + +/// Types where `==` & `!=` are equivalent to comparing their underlying bytes. +/// +/// Importantly, this means no floating-point types, as those have different +/// byte representations (like `-0` and `+0`) which compare as the same. +/// Since byte arrays are `Eq`, that implies that these types are probably also +/// `Eq`, but that's not technically required to use this trait. +/// +/// `Rhs` is *de facto* always `Self`, but the separate parameter is important +/// to avoid the `specializing impl repeats parameter` error when consuming this. +/// +/// # Safety +/// +/// - `Self` and `Rhs` have no padding. +/// - `Self` and `Rhs` have the same layout (size and alignment). +/// - Neither `Self` nor `Rhs` have provenance, so integer comparisons are correct. +/// - `<Self as PartialEq<Rhs>>::{eq,ne}` are equivalent to comparing the bytes. +#[rustc_specialization_trait] +pub(crate) unsafe trait BytewiseEq<Rhs = Self>: PartialEq<Rhs> + Sized {} + +macro_rules! is_bytewise_comparable { + ($($t:ty),+ $(,)?) => {$( + unsafe impl BytewiseEq for $t {} + )+}; +} + +// SAFETY: All the ordinary integer types have no padding, and are not pointers. +is_bytewise_comparable!(u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize); + +// SAFETY: These have *niches*, but no *padding* and no *provenance*, +// so we can compare them directly. +is_bytewise_comparable!(bool, char, super::Ordering); + +// SAFETY: Similarly, the non-zero types have a niche, but no undef and no pointers, +// and they compare like their underlying numeric type. +is_bytewise_comparable!( + NonZeroU8, + NonZeroU16, + NonZeroU32, + NonZeroU64, + NonZeroU128, + NonZeroUsize, + NonZeroI8, + NonZeroI16, + NonZeroI32, + NonZeroI64, + NonZeroI128, + NonZeroIsize, +); + +// SAFETY: The NonZero types have the "null" optimization guaranteed, and thus +// are also safe to equality-compare bitwise inside an `Option`. +// The way `PartialOrd` is defined for `Option` means that this wouldn't work +// for `<` or `>` on the signed types, but since we only do `==` it's fine. +is_bytewise_comparable!( + Option<NonZeroU8>, + Option<NonZeroU16>, + Option<NonZeroU32>, + Option<NonZeroU64>, + Option<NonZeroU128>, + Option<NonZeroUsize>, + Option<NonZeroI8>, + Option<NonZeroI16>, + Option<NonZeroI32>, + Option<NonZeroI64>, + Option<NonZeroI128>, + Option<NonZeroIsize>, +); + +macro_rules! is_bytewise_comparable_array_length { + ($($n:literal),+ $(,)?) => {$( + // SAFETY: Arrays have no padding between elements, so if the elements are + // `BytewiseEq`, then the whole array can be too. + unsafe impl<T: BytewiseEq<U>, U> BytewiseEq<[U; $n]> for [T; $n] {} + )+}; +} + +// Frustratingly, this can't be made const-generic as it gets +// error: specializing impl repeats parameter `N` +// so just do it for a couple of plausibly-common ones. +is_bytewise_comparable_array_length!(0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64); diff --git a/library/core/src/const_closure.rs b/library/core/src/const_closure.rs deleted file mode 100644 index 97900a486..000000000 --- a/library/core/src/const_closure.rs +++ /dev/null @@ -1,78 +0,0 @@ -use crate::marker::Destruct; -use crate::marker::Tuple; - -/// Struct representing a closure with mutably borrowed data. -/// -/// Example: -/// ```no_build -/// #![feature(const_mut_refs)] -/// use crate::const_closure::ConstFnMutClosure; -/// const fn imp(state: &mut i32, (arg,): (i32,)) -> i32 { -/// *state += arg; -/// *state -/// } -/// let mut i = 5; -/// let mut cl = ConstFnMutClosure::new(&mut i, imp); -/// -/// assert!(7 == cl(2)); -/// assert!(8 == cl(1)); -/// ``` -pub(crate) struct ConstFnMutClosure<CapturedData, Function> { - /// The Data captured by the Closure. - /// Must be either a (mutable) reference or a tuple of (mutable) references. - pub data: CapturedData, - /// The Function of the Closure, must be: Fn(CapturedData, ClosureArgs) -> ClosureReturn - pub func: Function, -} -impl<'a, CapturedData: ?Sized, Function> ConstFnMutClosure<&'a mut CapturedData, Function> { - /// Function for creating a new closure. - /// - /// `data` is the a mutable borrow of data that is captured from the environment. - /// If you want Data to be a tuple of mutable Borrows, the struct must be constructed manually. - /// - /// `func` is the function of the closure, it gets the data and a tuple of the arguments closure - /// and return the return value of the closure. - pub(crate) const fn new<ClosureArguments, ClosureReturnValue>( - data: &'a mut CapturedData, - func: Function, - ) -> Self - where - Function: ~const Fn(&mut CapturedData, ClosureArguments) -> ClosureReturnValue, - { - Self { data, func } - } -} - -macro_rules! impl_fn_mut_tuple { - ($($var:ident)*) => { - #[allow(unused_parens)] - impl<'a, $($var,)* ClosureArguments: Tuple, Function, ClosureReturnValue> const - FnOnce<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function> - where - Function: ~const Fn(($(&mut $var),*), ClosureArguments) -> ClosureReturnValue+ ~const Destruct, - { - type Output = ClosureReturnValue; - - extern "rust-call" fn call_once(mut self, args: ClosureArguments) -> Self::Output { - self.call_mut(args) - } - } - #[allow(unused_parens)] - impl<'a, $($var,)* ClosureArguments: Tuple, Function, ClosureReturnValue> const - FnMut<ClosureArguments> for ConstFnMutClosure<($(&'a mut $var),*), Function> - where - Function: ~const Fn(($(&mut $var),*), ClosureArguments)-> ClosureReturnValue, - { - extern "rust-call" fn call_mut(&mut self, args: ClosureArguments) -> Self::Output { - #[allow(non_snake_case)] - let ($($var),*) = &mut self.data; - (self.func)(($($var),*), args) - } - } - }; -} -impl_fn_mut_tuple!(A); -impl_fn_mut_tuple!(A B); -impl_fn_mut_tuple!(A B C); -impl_fn_mut_tuple!(A B C D); -impl_fn_mut_tuple!(A B C D E); diff --git a/library/core/src/convert/mod.rs b/library/core/src/convert/mod.rs index f95b880df..805354be0 100644 --- a/library/core/src/convert/mod.rs +++ b/library/core/src/convert/mod.rs @@ -542,7 +542,7 @@ pub trait Into<T>: Sized { #[const_trait] pub trait From<T>: Sized { /// Converts to this type from the input type. - #[lang = "from"] + #[rustc_diagnostic_item = "from_fn"] #[must_use] #[stable(feature = "rust1", since = "1.0.0")] fn from(value: T) -> Self; diff --git a/library/core/src/error.rs b/library/core/src/error.rs index 7152300ab..d4103183c 100644 --- a/library/core/src/error.rs +++ b/library/core/src/error.rs @@ -28,6 +28,7 @@ use crate::fmt::{Debug, Display}; #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "Error")] #[rustc_has_incoherent_inherent_impls] +#[cfg_attr(not(bootstrap), allow(multiple_supertrait_upcastable))] pub trait Error: Debug + Display { /// The lower-level source of this error, if any. /// @@ -485,26 +486,10 @@ impl Error for crate::char::CharTryFromError { } } -#[stable(feature = "char_from_str", since = "1.20.0")] -impl Error for crate::char::ParseCharError { - #[allow(deprecated)] - fn description(&self) -> &str { - self.__description() - } -} - #[stable(feature = "duration_checked_float", since = "1.66.0")] impl Error for crate::time::TryFromFloatSecsError {} -#[stable(feature = "frombyteswithnulerror_impls", since = "1.17.0")] -impl Error for crate::ffi::FromBytesWithNulError { - #[allow(deprecated)] - fn description(&self) -> &str { - self.__description() - } -} - -#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")] +#[stable(feature = "cstr_from_bytes_until_nul", since = "1.69.0")] impl Error for crate::ffi::FromBytesUntilNulError {} #[unstable(feature = "get_many_mut", issue = "104642")] diff --git a/library/core/src/ffi/c_str.rs b/library/core/src/ffi/c_str.rs index 15dd9ea7e..fe8abdf7f 100644 --- a/library/core/src/ffi/c_str.rs +++ b/library/core/src/ffi/c_str.rs @@ -1,4 +1,5 @@ use crate::cmp::Ordering; +use crate::error::Error; use crate::ffi::c_char; use crate::fmt; use crate::intrinsics; @@ -129,10 +130,12 @@ impl FromBytesWithNulError { const fn not_nul_terminated() -> FromBytesWithNulError { FromBytesWithNulError { kind: FromBytesWithNulErrorKind::NotNulTerminated } } +} - #[doc(hidden)] - #[unstable(feature = "cstr_internals", issue = "none")] - pub fn __description(&self) -> &str { +#[stable(feature = "frombyteswithnulerror_impls", since = "1.17.0")] +impl Error for FromBytesWithNulError { + #[allow(deprecated)] + fn description(&self) -> &str { match self.kind { FromBytesWithNulErrorKind::InteriorNul(..) => { "data provided contains an interior nul byte" @@ -150,10 +153,10 @@ impl FromBytesWithNulError { /// This error is created by the [`CStr::from_bytes_until_nul`] method. /// #[derive(Clone, PartialEq, Eq, Debug)] -#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")] +#[stable(feature = "cstr_from_bytes_until_nul", since = "1.69.0")] pub struct FromBytesUntilNulError(()); -#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")] +#[stable(feature = "cstr_from_bytes_until_nul", since = "1.69.0")] impl fmt::Display for FromBytesUntilNulError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "data provided does not contain a nul") @@ -180,7 +183,7 @@ impl Default for &CStr { impl fmt::Display for FromBytesWithNulError { #[allow(deprecated, deprecated_in_future)] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(self.__description())?; + f.write_str(self.description())?; if let FromBytesWithNulErrorKind::InteriorNul(pos) = self.kind { write!(f, " at byte pos {pos}")?; } @@ -306,8 +309,6 @@ impl CStr { /// /// # Examples /// ``` - /// #![feature(cstr_from_bytes_until_nul)] - /// /// use std::ffi::CStr; /// /// let mut buffer = [0u8; 16]; @@ -322,8 +323,9 @@ impl CStr { /// assert_eq!(c_str.to_str().unwrap(), "AAAAAAAA"); /// ``` /// - #[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")] - #[rustc_const_unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")] + #[rustc_allow_const_fn_unstable(const_slice_index)] + #[stable(feature = "cstr_from_bytes_until_nul", since = "1.69.0")] + #[rustc_const_stable(feature = "cstr_from_bytes_until_nul", since = "1.69.0")] pub const fn from_bytes_until_nul(bytes: &[u8]) -> Result<&CStr, FromBytesUntilNulError> { let nul_pos = memchr::memchr(0, bytes); match nul_pos { @@ -455,6 +457,10 @@ impl CStr { /// to a contiguous region of memory terminated with a 0 byte to represent /// the end of the string. /// + /// The type of the returned pointer is + /// [`*const c_char`][crate::ffi::c_char], and whether it's + /// an alias for `*const i8` or `*const u8` is platform-specific. + /// /// **WARNING** /// /// The returned pointer is read-only; writing to it (including passing it @@ -468,6 +474,7 @@ impl CStr { /// # #![allow(unused_must_use)] #![allow(temporary_cstring_as_ptr)] /// use std::ffi::CString; /// + /// // Do not do this: /// let ptr = CString::new("Hello").expect("CString::new failed").as_ptr(); /// unsafe { /// // `ptr` is dangling diff --git a/library/core/src/ffi/mod.rs b/library/core/src/ffi/mod.rs index 76daceecd..27f665904 100644 --- a/library/core/src/ffi/mod.rs +++ b/library/core/src/ffi/mod.rs @@ -144,6 +144,7 @@ mod c_char_definition { ) ), all(target_os = "fuchsia", target_arch = "aarch64"), + all(target_os = "nto", target_arch = "aarch64"), target_os = "horizon" ))] { pub type c_char = u8; diff --git a/library/core/src/fmt/mod.rs b/library/core/src/fmt/mod.rs index fa5073e33..c9821bf81 100644 --- a/library/core/src/fmt/mod.rs +++ b/library/core/src/fmt/mod.rs @@ -267,6 +267,7 @@ extern "C" { /// family of functions. It contains a function to format the given value. At /// compile time it is ensured that the function and the value have the correct /// types, and then this struct is used to canonicalize arguments to one type. +#[cfg_attr(not(bootstrap), lang = "format_argument")] #[derive(Copy, Clone)] #[allow(missing_debug_implementations)] #[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")] @@ -279,6 +280,7 @@ pub struct ArgumentV1<'a> { /// This struct represents the unsafety of constructing an `Arguments`. /// It exists, rather than an unsafe function, in order to simplify the expansion /// of `format_args!(..)` and reduce the scope of the `unsafe` block. +#[cfg_attr(not(bootstrap), lang = "format_unsafe_arg")] #[allow(missing_debug_implementations)] #[doc(hidden)] #[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")] @@ -473,8 +475,8 @@ impl<'a> Arguments<'a> { /// ``` /// /// [`format()`]: ../../std/fmt/fn.format.html +#[cfg_attr(not(bootstrap), lang = "format_arguments")] #[stable(feature = "rust1", since = "1.0.0")] -#[cfg_attr(not(test), rustc_diagnostic_item = "Arguments")] #[derive(Copy, Clone)] pub struct Arguments<'a> { // Format string pieces to print. @@ -489,9 +491,26 @@ pub struct Arguments<'a> { } impl<'a> Arguments<'a> { - /// Get the formatted string, if it has no arguments to be formatted. + /// Get the formatted string, if it has no arguments to be formatted at runtime. /// - /// This can be used to avoid allocations in the most trivial case. + /// This can be used to avoid allocations in some cases. + /// + /// # Guarantees + /// + /// For `format_args!("just a literal")`, this function is guaranteed to + /// return `Some("just a literal")`. + /// + /// For most cases with placeholders, this function will return `None`. + /// + /// However, the compiler may perform optimizations that can cause this + /// function to return `Some(_)` even if the format string contains + /// placeholders. For example, `format_args!("Hello, {}!", "world")` may be + /// optimized to `format_args!("Hello, world!")`, such that `as_str()` + /// returns `Some("Hello, world!")`. + /// + /// The behavior for anything but the trivial case (without placeholders) + /// is not guaranteed, and should not be relied upon for anything other + /// than optimization. /// /// # Examples /// @@ -512,7 +531,7 @@ impl<'a> Arguments<'a> { /// ```rust /// assert_eq!(format_args!("hello").as_str(), Some("hello")); /// assert_eq!(format_args!("").as_str(), Some("")); - /// assert_eq!(format_args!("{}", 1).as_str(), None); + /// assert_eq!(format_args!("{:?}", std::env::current_dir()).as_str(), None); /// ``` #[stable(feature = "fmt_as_str", since = "1.52.0")] #[rustc_const_unstable(feature = "const_arguments_as_str", issue = "103900")] @@ -1355,11 +1374,11 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{}", Foo::new(2)), "2"); - /// assert_eq!(&format!("{}", Foo::new(-1)), "-1"); - /// assert_eq!(&format!("{}", Foo::new(0)), "0"); - /// assert_eq!(&format!("{:#}", Foo::new(-1)), "-Foo 1"); - /// assert_eq!(&format!("{:0>#8}", Foo::new(-1)), "00-Foo 1"); + /// assert_eq!(format!("{}", Foo::new(2)), "2"); + /// assert_eq!(format!("{}", Foo::new(-1)), "-1"); + /// assert_eq!(format!("{}", Foo::new(0)), "0"); + /// assert_eq!(format!("{:#}", Foo::new(-1)), "-Foo 1"); + /// assert_eq!(format!("{:0>#8}", Foo::new(-1)), "00-Foo 1"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn pad_integral(&mut self, is_nonnegative: bool, prefix: &str, buf: &str) -> Result { @@ -1452,8 +1471,8 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{Foo:<4}"), "Foo "); - /// assert_eq!(&format!("{Foo:0>4}"), "0Foo"); + /// assert_eq!(format!("{Foo:<4}"), "Foo "); + /// assert_eq!(format!("{Foo:0>4}"), "0Foo"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn pad(&mut self, s: &str) -> Result { @@ -1636,8 +1655,8 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{Foo}"), "Foo"); - /// assert_eq!(&format!("{Foo:0>8}"), "Foo"); + /// assert_eq!(format!("{Foo}"), "Foo"); + /// assert_eq!(format!("{Foo:0>8}"), "Foo"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn write_str(&mut self, data: &str) -> Result { @@ -1659,8 +1678,8 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{}", Foo(-1)), "Foo -1"); - /// assert_eq!(&format!("{:0>8}", Foo(2)), "Foo 2"); + /// assert_eq!(format!("{}", Foo(-1)), "Foo -1"); + /// assert_eq!(format!("{:0>8}", Foo(2)), "Foo 2"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn write_fmt(&mut self, fmt: Arguments<'_>) -> Result { @@ -1703,8 +1722,8 @@ impl<'a> Formatter<'a> { /// } /// /// // We set alignment to the right with ">". - /// assert_eq!(&format!("{Foo:G>3}"), "GGG"); - /// assert_eq!(&format!("{Foo:t>6}"), "tttttt"); + /// assert_eq!(format!("{Foo:G>3}"), "GGG"); + /// assert_eq!(format!("{Foo:t>6}"), "tttttt"); /// ``` #[must_use] #[stable(feature = "fmt_flags", since = "1.5.0")] @@ -1738,10 +1757,10 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{Foo:<}"), "left"); - /// assert_eq!(&format!("{Foo:>}"), "right"); - /// assert_eq!(&format!("{Foo:^}"), "center"); - /// assert_eq!(&format!("{Foo}"), "into the void"); + /// assert_eq!(format!("{Foo:<}"), "left"); + /// assert_eq!(format!("{Foo:>}"), "right"); + /// assert_eq!(format!("{Foo:^}"), "center"); + /// assert_eq!(format!("{Foo}"), "into the void"); /// ``` #[must_use] #[stable(feature = "fmt_flags_align", since = "1.28.0")] @@ -1767,7 +1786,7 @@ impl<'a> Formatter<'a> { /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { /// if let Some(width) = formatter.width() { /// // If we received a width, we use it - /// write!(formatter, "{:width$}", &format!("Foo({})", self.0), width = width) + /// write!(formatter, "{:width$}", format!("Foo({})", self.0), width = width) /// } else { /// // Otherwise we do nothing special /// write!(formatter, "Foo({})", self.0) @@ -1775,8 +1794,8 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{:10}", Foo(23)), "Foo(23) "); - /// assert_eq!(&format!("{}", Foo(23)), "Foo(23)"); + /// assert_eq!(format!("{:10}", Foo(23)), "Foo(23) "); + /// assert_eq!(format!("{}", Foo(23)), "Foo(23)"); /// ``` #[must_use] #[stable(feature = "fmt_flags", since = "1.5.0")] @@ -1806,8 +1825,8 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{:.4}", Foo(23.2)), "Foo(23.2000)"); - /// assert_eq!(&format!("{}", Foo(23.2)), "Foo(23.20)"); + /// assert_eq!(format!("{:.4}", Foo(23.2)), "Foo(23.2000)"); + /// assert_eq!(format!("{}", Foo(23.2)), "Foo(23.20)"); /// ``` #[must_use] #[stable(feature = "fmt_flags", since = "1.5.0")] @@ -1837,9 +1856,9 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{:+}", Foo(23)), "Foo(+23)"); - /// assert_eq!(&format!("{:+}", Foo(-23)), "Foo(-23)"); - /// assert_eq!(&format!("{}", Foo(23)), "Foo(23)"); + /// assert_eq!(format!("{:+}", Foo(23)), "Foo(+23)"); + /// assert_eq!(format!("{:+}", Foo(-23)), "Foo(-23)"); + /// assert_eq!(format!("{}", Foo(23)), "Foo(23)"); /// ``` #[must_use] #[stable(feature = "fmt_flags", since = "1.5.0")] @@ -1867,8 +1886,8 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{:-}", Foo(23)), "-Foo(23)"); - /// assert_eq!(&format!("{}", Foo(23)), "Foo(23)"); + /// assert_eq!(format!("{:-}", Foo(23)), "-Foo(23)"); + /// assert_eq!(format!("{}", Foo(23)), "Foo(23)"); /// ``` #[must_use] #[stable(feature = "fmt_flags", since = "1.5.0")] @@ -1895,8 +1914,8 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{:#}", Foo(23)), "Foo(23)"); - /// assert_eq!(&format!("{}", Foo(23)), "23"); + /// assert_eq!(format!("{:#}", Foo(23)), "Foo(23)"); + /// assert_eq!(format!("{}", Foo(23)), "23"); /// ``` #[must_use] #[stable(feature = "fmt_flags", since = "1.5.0")] @@ -1922,7 +1941,7 @@ impl<'a> Formatter<'a> { /// } /// } /// - /// assert_eq!(&format!("{:04}", Foo(23)), "23"); + /// assert_eq!(format!("{:04}", Foo(23)), "23"); /// ``` #[must_use] #[stable(feature = "fmt_flags", since = "1.5.0")] diff --git a/library/core/src/fmt/rt/v1.rs b/library/core/src/fmt/rt/v1.rs index 37202b277..11a50951a 100644 --- a/library/core/src/fmt/rt/v1.rs +++ b/library/core/src/fmt/rt/v1.rs @@ -5,7 +5,9 @@ //! these can be statically allocated and are slightly optimized for the runtime #![allow(missing_debug_implementations)] +#[cfg_attr(not(bootstrap), lang = "format_placeholder")] #[derive(Copy, Clone)] +// FIXME: Rename this to Placeholder pub struct Argument { pub position: usize, pub format: FormatSpec, @@ -20,7 +22,22 @@ pub struct FormatSpec { pub width: Count, } +impl Argument { + #[inline(always)] + pub const fn new( + position: usize, + fill: char, + align: Alignment, + flags: u32, + precision: Count, + width: Count, + ) -> Self { + Self { position, format: FormatSpec { fill, align, flags, precision, width } } + } +} + /// Possible alignments that can be requested as part of a formatting directive. +#[cfg_attr(not(bootstrap), lang = "format_alignment")] #[derive(Copy, Clone, PartialEq, Eq)] pub enum Alignment { /// Indication that contents should be left-aligned. @@ -34,6 +51,7 @@ pub enum Alignment { } /// Used by [width](https://doc.rust-lang.org/std/fmt/#width) and [precision](https://doc.rust-lang.org/std/fmt/#precision) specifiers. +#[cfg_attr(not(bootstrap), lang = "format_count")] #[derive(Copy, Clone)] pub enum Count { /// Specified with a literal number, stores the value diff --git a/library/core/src/future/mod.rs b/library/core/src/future/mod.rs index c4fb36209..46cbcd435 100644 --- a/library/core/src/future/mod.rs +++ b/library/core/src/future/mod.rs @@ -56,51 +56,6 @@ unsafe impl Send for ResumeTy {} #[unstable(feature = "gen_future", issue = "50547")] unsafe impl Sync for ResumeTy {} -/// Wrap a generator in a future. -/// -/// This function returns a `GenFuture` underneath, but hides it in `impl Trait` to give -/// better error messages (`impl Future` rather than `GenFuture<[closure.....]>`). -// This is `const` to avoid extra errors after we recover from `const async fn` -#[doc(hidden)] -#[unstable(feature = "gen_future", issue = "50547")] -#[rustc_const_unstable(feature = "gen_future", issue = "50547")] -#[inline] -pub const fn from_generator<T>(gen: T) -> impl Future<Output = T::Return> -where - T: crate::ops::Generator<ResumeTy, Yield = ()>, -{ - use crate::{ - ops::{Generator, GeneratorState}, - pin::Pin, - task::Poll, - }; - - #[rustc_diagnostic_item = "gen_future"] - struct GenFuture<T: Generator<ResumeTy, Yield = ()>>(T); - - // We rely on the fact that async/await futures are immovable in order to create - // self-referential borrows in the underlying generator. - impl<T: Generator<ResumeTy, Yield = ()>> !Unpin for GenFuture<T> {} - - impl<T: Generator<ResumeTy, Yield = ()>> Future for GenFuture<T> { - type Output = T::Return; - #[track_caller] - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { - // SAFETY: Safe because we're !Unpin + !Drop, and this is just a field projection. - let gen = unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) }; - - // Resume the generator, turning the `&mut Context` into a `NonNull` raw pointer. The - // `.await` lowering will safely cast that back to a `&mut Context`. - match gen.resume(ResumeTy(NonNull::from(cx).cast::<Context<'static>>())) { - GeneratorState::Yielded(()) => Poll::Pending, - GeneratorState::Complete(x) => Poll::Ready(x), - } - } - } - - GenFuture(gen) -} - #[lang = "get_context"] #[doc(hidden)] #[unstable(feature = "gen_future", issue = "50547")] diff --git a/library/core/src/hint.rs b/library/core/src/hint.rs index 5a76e8669..ee13dae60 100644 --- a/library/core/src/hint.rs +++ b/library/core/src/hint.rs @@ -216,7 +216,8 @@ pub fn spin_loop() { /// /// Note however, that `black_box` is only (and can only be) provided on a "best-effort" basis. The /// extent to which it can block optimisations may vary depending upon the platform and code-gen -/// backend used. Programs cannot rely on `black_box` for *correctness* in any way. +/// backend used. Programs cannot rely on `black_box` for *correctness*, beyond it behaving as the +/// identity function. /// /// [`std::convert::identity`]: crate::convert::identity /// diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs index a315a28fb..18a90599c 100644 --- a/library/core/src/intrinsics.rs +++ b/library/core/src/intrinsics.rs @@ -58,7 +58,6 @@ use crate::marker::DiscriminantKind; use crate::marker::Tuple; use crate::mem; -#[cfg(not(bootstrap))] pub mod mir; // These imports are used for simplifying intra-doc links @@ -963,7 +962,6 @@ extern "rust-intrinsic" { /// This intrinsic does not have a stable counterpart. #[rustc_const_unstable(feature = "const_assert_type2", issue = "none")] #[rustc_safe_intrinsic] - #[cfg(not(bootstrap))] pub fn assert_mem_uninitialized_valid<T>(); /// Gets a reference to a static `Location` indicating where it was called. @@ -2095,6 +2093,10 @@ extern "rust-intrinsic" { /// Above some backend-decided threshold this will emit calls to `memcmp`, /// like slice equality does, instead of causing massive code size. /// + /// Since this works by comparing the underlying bytes, the actual `T` is + /// not particularly important. It will be used for its size and alignment, + /// but any validity restrictions will be ignored, not enforced. + /// /// # Safety /// /// It's UB to call this if any of the *bytes* in `*a` or `*b` are uninitialized or carry a diff --git a/library/core/src/intrinsics/mir.rs b/library/core/src/intrinsics/mir.rs index e3157b669..72db1d87c 100644 --- a/library/core/src/intrinsics/mir.rs +++ b/library/core/src/intrinsics/mir.rs @@ -42,7 +42,7 @@ //! another function. The `dialect` and `phase` parameters indicate which [version of MIR][dialect //! docs] you are inserting here. Generally you'll want to use `#![custom_mir(dialect = "built")]` //! if you want your MIR to be modified by the full MIR pipeline, or `#![custom_mir(dialect = -//! "runtime", phase = "optimized")] if you don't. +//! "runtime", phase = "optimized")]` if you don't. //! //! [dialect docs]: //! https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/mir/enum.MirPhase.html @@ -60,8 +60,7 @@ //! //! # Examples //! -#![cfg_attr(bootstrap, doc = "```rust,compile_fail")] -#![cfg_attr(not(bootstrap), doc = "```rust")] +//! ```rust //! #![feature(core_intrinsics, custom_mir)] //! //! extern crate core; @@ -211,13 +210,16 @@ //! //! #### Statements //! - Assign statements work via normal Rust assignment. -//! - [`Retag`] statements have an associated function. +//! - [`Retag`], [`StorageLive`], [`StorageDead`], [`Deinit`] statements have an associated function. //! //! #### Rvalues //! //! - Operands implicitly convert to `Use` rvalues. //! - `&`, `&mut`, `addr_of!`, and `addr_of_mut!` all work to create their associated rvalue. -//! - [`Discriminant`] has an associated function. +//! - [`Discriminant`] and [`Len`] have associated functions. +//! - Unary and binary operations use their normal Rust syntax - `a * b`, `!c`, etc. +//! - Checked binary operations are represented by wrapping the associated binop in [`Checked`]. +//! - Array repetition syntax (`[foo; 10]`) creates the associated rvalue. //! //! #### Terminators //! @@ -261,6 +263,9 @@ define!("mir_drop_and_replace", fn DropAndReplace<T>(place: T, value: T, goto: B define!("mir_call", fn Call<T>(place: T, goto: BasicBlock, call: T)); define!("mir_storage_live", fn StorageLive<T>(local: T)); define!("mir_storage_dead", fn StorageDead<T>(local: T)); +define!("mir_deinit", fn Deinit<T>(place: T)); +define!("mir_checked", fn Checked<T>(binop: T) -> (T, bool)); +define!("mir_len", fn Len<T>(place: T) -> usize); define!("mir_retag", fn Retag<T>(place: T)); define!("mir_move", fn Move<T>(place: T) -> T); define!("mir_static", fn Static<T>(s: T) -> &'static T); @@ -294,8 +299,7 @@ define!( /// /// # Examples /// - #[cfg_attr(bootstrap, doc = "```rust,compile_fail")] - #[cfg_attr(not(bootstrap), doc = "```rust")] + /// ```rust /// #![feature(custom_mir, core_intrinsics)] /// /// extern crate core; diff --git a/library/core/src/iter/adapters/array_chunks.rs b/library/core/src/iter/adapters/array_chunks.rs index 5e4211058..13719c727 100644 --- a/library/core/src/iter/adapters/array_chunks.rs +++ b/library/core/src/iter/adapters/array_chunks.rs @@ -1,7 +1,5 @@ use crate::array; -use crate::const_closure::ConstFnMutClosure; use crate::iter::{ByRefSized, FusedIterator, Iterator, TrustedRandomAccessNoCoerce}; -use crate::mem::{self, MaybeUninit}; use crate::ops::{ControlFlow, NeverShortCircuit, Try}; /// An iterator over `N` elements of the iterator at a time. @@ -189,13 +187,12 @@ where I: Iterator, { #[inline] - default fn fold<B, F>(mut self, init: B, mut f: F) -> B + default fn fold<B, F>(mut self, init: B, f: F) -> B where Self: Sized, F: FnMut(B, Self::Item) -> B, { - let fold = ConstFnMutClosure::new(&mut f, NeverShortCircuit::wrap_mut_2_imp); - self.try_fold(init, fold).0 + self.try_fold(init, NeverShortCircuit::wrap_mut_2(f)).0 } } @@ -214,19 +211,14 @@ where let mut i = 0; // Use a while loop because (0..len).step_by(N) doesn't optimize well. while inner_len - i >= N { - let mut chunk = MaybeUninit::uninit_array(); - let mut guard = array::Guard { array_mut: &mut chunk, initialized: 0 }; - while guard.initialized < N { + let chunk = crate::array::from_fn(|local| { // SAFETY: The method consumes the iterator and the loop condition ensures that // all accesses are in bounds and only happen once. unsafe { - let idx = i + guard.initialized; - guard.push_unchecked(self.iter.__iterator_get_unchecked(idx)); + let idx = i + local; + self.iter.__iterator_get_unchecked(idx) } - } - mem::forget(guard); - // SAFETY: The loop above initialized all elements - let chunk = unsafe { MaybeUninit::array_assume_init(chunk) }; + }); accum = f(accum, chunk); i += N; } diff --git a/library/core/src/iter/adapters/by_ref_sized.rs b/library/core/src/iter/adapters/by_ref_sized.rs index 1945e402f..477e7117c 100644 --- a/library/core/src/iter/adapters/by_ref_sized.rs +++ b/library/core/src/iter/adapters/by_ref_sized.rs @@ -1,7 +1,4 @@ -use crate::{ - const_closure::ConstFnMutClosure, - ops::{NeverShortCircuit, Try}, -}; +use crate::ops::{NeverShortCircuit, Try}; /// Like `Iterator::by_ref`, but requiring `Sized` so it can forward generics. /// @@ -39,13 +36,12 @@ impl<I: Iterator> Iterator for ByRefSized<'_, I> { } #[inline] - fn fold<B, F>(self, init: B, mut f: F) -> B + fn fold<B, F>(self, init: B, f: F) -> B where F: FnMut(B, Self::Item) -> B, { // `fold` needs ownership, so this can't forward directly. - I::try_fold(self.0, init, ConstFnMutClosure::new(&mut f, NeverShortCircuit::wrap_mut_2_imp)) - .0 + I::try_fold(self.0, init, NeverShortCircuit::wrap_mut_2(f)).0 } #[inline] @@ -76,17 +72,12 @@ impl<I: DoubleEndedIterator> DoubleEndedIterator for ByRefSized<'_, I> { } #[inline] - fn rfold<B, F>(self, init: B, mut f: F) -> B + fn rfold<B, F>(self, init: B, f: F) -> B where F: FnMut(B, Self::Item) -> B, { // `rfold` needs ownership, so this can't forward directly. - I::try_rfold( - self.0, - init, - ConstFnMutClosure::new(&mut f, NeverShortCircuit::wrap_mut_2_imp), - ) - .0 + I::try_rfold(self.0, init, NeverShortCircuit::wrap_mut_2(f)).0 } #[inline] diff --git a/library/core/src/iter/adapters/cloned.rs b/library/core/src/iter/adapters/cloned.rs index aba24a79d..914ff86c1 100644 --- a/library/core/src/iter/adapters/cloned.rs +++ b/library/core/src/iter/adapters/cloned.rs @@ -1,7 +1,7 @@ use crate::iter::adapters::{ zip::try_get_unchecked, TrustedRandomAccess, TrustedRandomAccessNoCoerce, }; -use crate::iter::{FusedIterator, TrustedLen}; +use crate::iter::{FusedIterator, TrustedLen, UncheckedIterator}; use crate::ops::Try; /// An iterator that clones the elements of an underlying iterator. @@ -140,3 +140,16 @@ where T: Clone, { } + +impl<'a, I, T: 'a> UncheckedIterator for Cloned<I> +where + I: UncheckedIterator<Item = &'a T>, + T: Clone, +{ + unsafe fn next_unchecked(&mut self) -> T { + // SAFETY: `Cloned` is 1:1 with the inner iterator, so if the caller promised + // that there's an element left, the inner iterator has one too. + let item = unsafe { self.it.next_unchecked() }; + item.clone() + } +} diff --git a/library/core/src/iter/adapters/filter_map.rs b/library/core/src/iter/adapters/filter_map.rs index e0d665c9e..6bdf53f7f 100644 --- a/library/core/src/iter/adapters/filter_map.rs +++ b/library/core/src/iter/adapters/filter_map.rs @@ -99,7 +99,7 @@ where ) -> impl FnMut((), T) -> ControlFlow<B> + '_ { move |(), x| match f(x) { Some(x) => ControlFlow::Break(x), - None => ControlFlow::CONTINUE, + None => ControlFlow::Continue(()), } } diff --git a/library/core/src/iter/adapters/flatten.rs b/library/core/src/iter/adapters/flatten.rs index 307016c26..b040a0ea9 100644 --- a/library/core/src/iter/adapters/flatten.rs +++ b/library/core/src/iter/adapters/flatten.rs @@ -539,7 +539,7 @@ where #[rustc_inherit_overflow_checks] fn advance<U: Iterator>(n: usize, iter: &mut U) -> ControlFlow<(), usize> { match iter.advance_by(n) { - Ok(()) => ControlFlow::BREAK, + Ok(()) => ControlFlow::Break(()), Err(advanced) => ControlFlow::Continue(n - advanced), } } @@ -629,7 +629,7 @@ where #[rustc_inherit_overflow_checks] fn advance<U: DoubleEndedIterator>(n: usize, iter: &mut U) -> ControlFlow<(), usize> { match iter.advance_back_by(n) { - Ok(()) => ControlFlow::BREAK, + Ok(()) => ControlFlow::Break(()), Err(advanced) => ControlFlow::Continue(n - advanced), } } diff --git a/library/core/src/iter/adapters/map.rs b/library/core/src/iter/adapters/map.rs index 9e25dbe46..31d02a4da 100644 --- a/library/core/src/iter/adapters/map.rs +++ b/library/core/src/iter/adapters/map.rs @@ -2,7 +2,7 @@ use crate::fmt; use crate::iter::adapters::{ zip::try_get_unchecked, SourceIter, TrustedRandomAccess, TrustedRandomAccessNoCoerce, }; -use crate::iter::{FusedIterator, InPlaceIterable, TrustedLen}; +use crate::iter::{FusedIterator, InPlaceIterable, TrustedLen, UncheckedIterator}; use crate::ops::Try; /// An iterator that maps the values of `iter` with `f`. @@ -187,6 +187,19 @@ where { } +impl<B, I, F> UncheckedIterator for Map<I, F> +where + I: UncheckedIterator, + F: FnMut(I::Item) -> B, +{ + unsafe fn next_unchecked(&mut self) -> B { + // SAFETY: `Map` is 1:1 with the inner iterator, so if the caller promised + // that there's an element left, the inner iterator has one too. + let item = unsafe { self.iter.next_unchecked() }; + (self.f)(item) + } +} + #[doc(hidden)] #[unstable(feature = "trusted_random_access", issue = "none")] unsafe impl<I, F> TrustedRandomAccess for Map<I, F> where I: TrustedRandomAccess {} diff --git a/library/core/src/iter/adapters/zip.rs b/library/core/src/iter/adapters/zip.rs index 8153c8cfe..b6b0c90cb 100644 --- a/library/core/src/iter/adapters/zip.rs +++ b/library/core/src/iter/adapters/zip.rs @@ -1,7 +1,7 @@ use crate::cmp; use crate::fmt::{self, Debug}; use crate::iter::{DoubleEndedIterator, ExactSizeIterator, FusedIterator, Iterator}; -use crate::iter::{InPlaceIterable, SourceIter, TrustedLen}; +use crate::iter::{InPlaceIterable, SourceIter, TrustedLen, UncheckedIterator}; /// An iterator that iterates two other iterators simultaneously. /// @@ -417,6 +417,13 @@ where { } +impl<A, B> UncheckedIterator for Zip<A, B> +where + A: UncheckedIterator, + B: UncheckedIterator, +{ +} + // Arbitrarily selects the left side of the zip iteration as extractable "source" // it would require negative trait bounds to be able to try both #[unstable(issue = "none", feature = "inplace_iteration")] diff --git a/library/core/src/iter/mod.rs b/library/core/src/iter/mod.rs index bb35d50b4..ae00232c1 100644 --- a/library/core/src/iter/mod.rs +++ b/library/core/src/iter/mod.rs @@ -278,6 +278,7 @@ //! //! ``` //! # #![allow(unused_must_use)] +//! # #![cfg_attr(not(bootstrap), allow(map_unit_fn))] //! let v = vec![1, 2, 3, 4, 5]; //! v.iter().map(|x| println!("{x}")); //! ``` @@ -362,15 +363,13 @@ macro_rules! impl_fold_via_try_fold { }; (@internal $fold:ident -> $try_fold:ident) => { #[inline] - fn $fold<AAA, FFF>(mut self, init: AAA, mut fold: FFF) -> AAA + fn $fold<AAA, FFF>(mut self, init: AAA, fold: FFF) -> AAA where FFF: FnMut(AAA, Self::Item) -> AAA, { - use crate::const_closure::ConstFnMutClosure; use crate::ops::NeverShortCircuit; - let fold = ConstFnMutClosure::new(&mut fold, NeverShortCircuit::wrap_mut_2_imp); - self.$try_fold(init, fold).0 + self.$try_fold(init, NeverShortCircuit::wrap_mut_2(fold)).0 } }; } @@ -452,6 +451,7 @@ pub use self::adapters::{ pub use self::adapters::{Intersperse, IntersperseWith}; pub(crate) use self::adapters::try_process; +pub(crate) use self::traits::UncheckedIterator; mod adapters; mod range; diff --git a/library/core/src/iter/range.rs b/library/core/src/iter/range.rs index b5739f2f3..78e27d730 100644 --- a/library/core/src/iter/range.rs +++ b/library/core/src/iter/range.rs @@ -1,4 +1,3 @@ -use crate::char; use crate::convert::TryFrom; use crate::mem; use crate::ops::{self, Try}; diff --git a/library/core/src/iter/traits/double_ended.rs b/library/core/src/iter/traits/double_ended.rs index bdf94c792..ed23873cd 100644 --- a/library/core/src/iter/traits/double_ended.rs +++ b/library/core/src/iter/traits/double_ended.rs @@ -352,7 +352,7 @@ pub trait DoubleEndedIterator: Iterator { #[inline] fn check<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut((), T) -> ControlFlow<T> { move |(), x| { - if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::CONTINUE } + if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::Continue(()) } } } diff --git a/library/core/src/iter/traits/exact_size.rs b/library/core/src/iter/traits/exact_size.rs index 1757e37ec..908830d8a 100644 --- a/library/core/src/iter/traits/exact_size.rs +++ b/library/core/src/iter/traits/exact_size.rs @@ -21,6 +21,16 @@ /// /// [`len`]: ExactSizeIterator::len /// +/// # When *shouldn't* an adapter be `ExactSizeIterator`? +/// +/// If an adapter makes an iterator *longer*, then it's usually incorrect for +/// that adapter to implement `ExactSizeIterator`. The inner exact-sized +/// iterator might already be `usize::MAX`-long, and thus the length of the +/// longer adapted iterator would no longer be exactly representable in `usize`. +/// +/// This is why [`Chain<A, B>`](crate::iter::Chain) isn't `ExactSizeIterator`, +/// even when `A` and `B` are both `ExactSizeIterator`. +/// /// # Examples /// /// Basic usage: diff --git a/library/core/src/iter/traits/iterator.rs b/library/core/src/iter/traits/iterator.rs index a4a665d48..b8e7d0a68 100644 --- a/library/core/src/iter/traits/iterator.rs +++ b/library/core/src/iter/traits/iterator.rs @@ -69,6 +69,7 @@ fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {} #[doc(notable_trait)] #[rustc_diagnostic_item = "Iterator"] #[must_use = "iterators are lazy and do nothing unless consumed"] +#[cfg_attr(not(bootstrap), const_trait)] pub trait Iterator { /// The type of the elements being iterated over. #[rustc_diagnostic_item = "IteratorItem"] @@ -141,6 +142,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "iter_next_chunk", reason = "recently added", issue = "98326")] + #[rustc_do_not_const_check] fn next_chunk<const N: usize>( &mut self, ) -> Result<[Self::Item; N], array::IntoIter<Self::Item, N>> @@ -218,6 +220,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn size_hint(&self) -> (usize, Option<usize>) { (0, None) } @@ -255,6 +258,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn count(self) -> usize where Self: Sized, @@ -285,6 +289,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn last(self) -> Option<Self::Item> where Self: Sized, @@ -331,6 +336,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "iter_advance_by", reason = "recently added", issue = "77404")] + #[rustc_do_not_const_check] fn advance_by(&mut self, n: usize) -> Result<(), usize> { for i in 0..n { self.next().ok_or(i)?; @@ -379,6 +385,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn nth(&mut self, n: usize) -> Option<Self::Item> { self.advance_by(n).ok()?; self.next() @@ -431,6 +438,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iterator_step_by", since = "1.28.0")] + #[rustc_do_not_const_check] fn step_by(self, step: usize) -> StepBy<Self> where Self: Sized, @@ -502,6 +510,7 @@ pub trait Iterator { /// [`OsStr`]: ../../std/ffi/struct.OsStr.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn chain<U>(self, other: U) -> Chain<Self, U::IntoIter> where Self: Sized, @@ -620,6 +629,7 @@ pub trait Iterator { /// [`zip`]: crate::iter::zip #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn zip<U>(self, other: U) -> Zip<Self, U::IntoIter> where Self: Sized, @@ -662,6 +672,7 @@ pub trait Iterator { /// [`intersperse_with`]: Iterator::intersperse_with #[inline] #[unstable(feature = "iter_intersperse", reason = "recently added", issue = "79524")] + #[rustc_do_not_const_check] fn intersperse(self, separator: Self::Item) -> Intersperse<Self> where Self: Sized, @@ -720,6 +731,7 @@ pub trait Iterator { /// [`intersperse`]: Iterator::intersperse #[inline] #[unstable(feature = "iter_intersperse", reason = "recently added", issue = "79524")] + #[rustc_do_not_const_check] fn intersperse_with<G>(self, separator: G) -> IntersperseWith<Self, G> where Self: Sized, @@ -777,8 +789,10 @@ pub trait Iterator { /// println!("{x}"); /// } /// ``` + #[rustc_diagnostic_item = "IteratorMap"] #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn map<B, F>(self, f: F) -> Map<Self, F> where Self: Sized, @@ -824,6 +838,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iterator_for_each", since = "1.21.0")] + #[rustc_do_not_const_check] fn for_each<F>(self, f: F) where Self: Sized, @@ -899,6 +914,7 @@ pub trait Iterator { /// Note that `iter.filter(f).next()` is equivalent to `iter.find(f)`. #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn filter<P>(self, predicate: P) -> Filter<Self, P> where Self: Sized, @@ -944,6 +960,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn filter_map<B, F>(self, f: F) -> FilterMap<Self, F> where Self: Sized, @@ -990,6 +1007,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn enumerate(self) -> Enumerate<Self> where Self: Sized, @@ -1061,6 +1079,7 @@ pub trait Iterator { /// [`next`]: Iterator::next #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn peekable(self) -> Peekable<Self> where Self: Sized, @@ -1126,6 +1145,7 @@ pub trait Iterator { #[inline] #[doc(alias = "drop_while")] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn skip_while<P>(self, predicate: P) -> SkipWhile<Self, P> where Self: Sized, @@ -1207,6 +1227,7 @@ pub trait Iterator { /// the iteration should stop, but wasn't placed back into the iterator. #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn take_while<P>(self, predicate: P) -> TakeWhile<Self, P> where Self: Sized, @@ -1295,6 +1316,7 @@ pub trait Iterator { /// [`fuse`]: Iterator::fuse #[inline] #[stable(feature = "iter_map_while", since = "1.57.0")] + #[rustc_do_not_const_check] fn map_while<B, P>(self, predicate: P) -> MapWhile<Self, P> where Self: Sized, @@ -1326,6 +1348,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn skip(self, n: usize) -> Skip<Self> where Self: Sized, @@ -1379,6 +1402,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn take(self, n: usize) -> Take<Self> where Self: Sized, @@ -1428,6 +1452,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn scan<St, B, F>(self, initial_state: St, f: F) -> Scan<Self, St, F> where Self: Sized, @@ -1468,6 +1493,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn flat_map<U, F>(self, f: F) -> FlatMap<Self, U, F> where Self: Sized, @@ -1552,6 +1578,7 @@ pub trait Iterator { /// [`flat_map()`]: Iterator::flat_map #[inline] #[stable(feature = "iterator_flatten", since = "1.29.0")] + #[rustc_do_not_const_check] fn flatten(self) -> Flatten<Self> where Self: Sized, @@ -1620,6 +1647,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn fuse(self) -> Fuse<Self> where Self: Sized, @@ -1704,6 +1732,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn inspect<F>(self, f: F) -> Inspect<Self, F> where Self: Sized, @@ -1734,6 +1763,7 @@ pub trait Iterator { /// assert_eq!(of_rust, vec!["of", "Rust"]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn by_ref(&mut self) -> &mut Self where Self: Sized, @@ -1853,6 +1883,7 @@ pub trait Iterator { #[stable(feature = "rust1", since = "1.0.0")] #[must_use = "if you really need to exhaust the iterator, consider `.for_each(drop)` instead"] #[cfg_attr(not(test), rustc_diagnostic_item = "iterator_collect_fn")] + #[rustc_do_not_const_check] fn collect<B: FromIterator<Self::Item>>(self) -> B where Self: Sized, @@ -1931,6 +1962,7 @@ pub trait Iterator { /// [`collect`]: Iterator::collect #[inline] #[unstable(feature = "iterator_try_collect", issue = "94047")] + #[rustc_do_not_const_check] fn try_collect<B>(&mut self) -> ChangeOutputType<Self::Item, B> where Self: Sized, @@ -2004,6 +2036,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "iter_collect_into", reason = "new API", issue = "94780")] + #[rustc_do_not_const_check] fn collect_into<E: Extend<Self::Item>>(self, collection: &mut E) -> &mut E where Self: Sized, @@ -2038,6 +2071,7 @@ pub trait Iterator { /// assert_eq!(odd, vec![1, 3]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn partition<B, F>(self, f: F) -> (B, B) where Self: Sized, @@ -2100,6 +2134,7 @@ pub trait Iterator { /// assert!(a[i..].iter().all(|&n| n % 2 == 1)); // odds /// ``` #[unstable(feature = "iter_partition_in_place", reason = "new API", issue = "62543")] + #[rustc_do_not_const_check] fn partition_in_place<'a, T: 'a, P>(mut self, ref mut predicate: P) -> usize where Self: Sized + DoubleEndedIterator<Item = &'a mut T>, @@ -2157,6 +2192,7 @@ pub trait Iterator { /// assert!(!"IntoIterator".chars().is_partitioned(char::is_uppercase)); /// ``` #[unstable(feature = "iter_is_partitioned", reason = "new API", issue = "62544")] + #[rustc_do_not_const_check] fn is_partitioned<P>(mut self, mut predicate: P) -> bool where Self: Sized, @@ -2251,6 +2287,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iterator_try_fold", since = "1.27.0")] + #[rustc_do_not_const_check] fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R where Self: Sized, @@ -2309,6 +2346,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iterator_try_fold", since = "1.27.0")] + #[rustc_do_not_const_check] fn try_for_each<F, R>(&mut self, f: F) -> R where Self: Sized, @@ -2428,6 +2466,7 @@ pub trait Iterator { #[doc(alias = "inject", alias = "foldl")] #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn fold<B, F>(mut self, init: B, mut f: F) -> B where Self: Sized, @@ -2465,6 +2504,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iterator_fold_self", since = "1.51.0")] + #[rustc_do_not_const_check] fn reduce<F>(mut self, f: F) -> Option<Self::Item> where Self: Sized, @@ -2536,6 +2576,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "iterator_try_reduce", reason = "new API", issue = "87053")] + #[rustc_do_not_const_check] fn try_reduce<F, R>(&mut self, f: F) -> ChangeOutputType<R, Option<R::Output>> where Self: Sized, @@ -2593,6 +2634,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn all<F>(&mut self, f: F) -> bool where Self: Sized, @@ -2601,10 +2643,10 @@ pub trait Iterator { #[inline] fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<()> { move |(), x| { - if f(x) { ControlFlow::CONTINUE } else { ControlFlow::BREAK } + if f(x) { ControlFlow::Continue(()) } else { ControlFlow::Break(()) } } } - self.try_fold((), check(f)) == ControlFlow::CONTINUE + self.try_fold((), check(f)) == ControlFlow::Continue(()) } /// Tests if any element of the iterator matches a predicate. @@ -2646,6 +2688,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn any<F>(&mut self, f: F) -> bool where Self: Sized, @@ -2654,11 +2697,11 @@ pub trait Iterator { #[inline] fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<()> { move |(), x| { - if f(x) { ControlFlow::BREAK } else { ControlFlow::CONTINUE } + if f(x) { ControlFlow::Break(()) } else { ControlFlow::Continue(()) } } } - self.try_fold((), check(f)) == ControlFlow::BREAK + self.try_fold((), check(f)) == ControlFlow::Break(()) } /// Searches for an element of an iterator that satisfies a predicate. @@ -2709,6 +2752,7 @@ pub trait Iterator { /// Note that `iter.find(f)` is equivalent to `iter.filter(f).next()`. #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn find<P>(&mut self, predicate: P) -> Option<Self::Item> where Self: Sized, @@ -2717,7 +2761,7 @@ pub trait Iterator { #[inline] fn check<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut((), T) -> ControlFlow<T> { move |(), x| { - if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::CONTINUE } + if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::Continue(()) } } } @@ -2740,6 +2784,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iterator_find_map", since = "1.30.0")] + #[rustc_do_not_const_check] fn find_map<B, F>(&mut self, f: F) -> Option<B> where Self: Sized, @@ -2749,7 +2794,7 @@ pub trait Iterator { fn check<T, B>(mut f: impl FnMut(T) -> Option<B>) -> impl FnMut((), T) -> ControlFlow<B> { move |(), x| match f(x) { Some(x) => ControlFlow::Break(x), - None => ControlFlow::CONTINUE, + None => ControlFlow::Continue(()), } } @@ -2796,6 +2841,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "try_find", reason = "new API", issue = "63178")] + #[rustc_do_not_const_check] fn try_find<F, R>(&mut self, f: F) -> ChangeOutputType<R, Option<Self::Item>> where Self: Sized, @@ -2812,7 +2858,7 @@ pub trait Iterator { R: Residual<Option<I>>, { move |(), x| match f(&x).branch() { - ControlFlow::Continue(false) => ControlFlow::CONTINUE, + ControlFlow::Continue(false) => ControlFlow::Continue(()), ControlFlow::Continue(true) => ControlFlow::Break(Try::from_output(Some(x))), ControlFlow::Break(r) => ControlFlow::Break(FromResidual::from_residual(r)), } @@ -2878,6 +2924,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn position<P>(&mut self, predicate: P) -> Option<usize> where Self: Sized, @@ -2935,6 +2982,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn rposition<P>(&mut self, predicate: P) -> Option<usize> where P: FnMut(Self::Item) -> bool, @@ -2986,6 +3034,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn max(self) -> Option<Self::Item> where Self: Sized, @@ -3024,6 +3073,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn min(self) -> Option<Self::Item> where Self: Sized, @@ -3046,6 +3096,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iter_cmp_by_key", since = "1.6.0")] + #[rustc_do_not_const_check] fn max_by_key<B: Ord, F>(self, f: F) -> Option<Self::Item> where Self: Sized, @@ -3079,6 +3130,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iter_max_by", since = "1.15.0")] + #[rustc_do_not_const_check] fn max_by<F>(self, compare: F) -> Option<Self::Item> where Self: Sized, @@ -3106,6 +3158,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iter_cmp_by_key", since = "1.6.0")] + #[rustc_do_not_const_check] fn min_by_key<B: Ord, F>(self, f: F) -> Option<Self::Item> where Self: Sized, @@ -3139,6 +3192,7 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iter_min_by", since = "1.15.0")] + #[rustc_do_not_const_check] fn min_by<F>(self, compare: F) -> Option<Self::Item> where Self: Sized, @@ -3176,6 +3230,7 @@ pub trait Iterator { #[inline] #[doc(alias = "reverse")] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn rev(self) -> Rev<Self> where Self: Sized + DoubleEndedIterator, @@ -3214,6 +3269,7 @@ pub trait Iterator { /// assert_eq!(z, [3, 6]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB) where FromA: Default + Extend<A>, @@ -3246,6 +3302,7 @@ pub trait Iterator { /// assert_eq!(v_map, vec![1, 2, 3]); /// ``` #[stable(feature = "iter_copied", since = "1.36.0")] + #[rustc_do_not_const_check] fn copied<'a, T: 'a>(self) -> Copied<Self> where Self: Sized + Iterator<Item = &'a T>, @@ -3293,6 +3350,7 @@ pub trait Iterator { /// assert_eq!(&[vec![23]], &faster[..]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_do_not_const_check] fn cloned<'a, T: 'a>(self) -> Cloned<Self> where Self: Sized + Iterator<Item = &'a T>, @@ -3327,6 +3385,7 @@ pub trait Iterator { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] + #[rustc_do_not_const_check] fn cycle(self) -> Cycle<Self> where Self: Sized + Clone, @@ -3370,6 +3429,7 @@ pub trait Iterator { /// ``` #[track_caller] #[unstable(feature = "iter_array_chunks", reason = "recently added", issue = "100450")] + #[rustc_do_not_const_check] fn array_chunks<const N: usize>(self) -> ArrayChunks<Self, N> where Self: Sized, @@ -3400,6 +3460,7 @@ pub trait Iterator { /// assert_eq!(sum, 6); /// ``` #[stable(feature = "iter_arith", since = "1.11.0")] + #[rustc_do_not_const_check] fn sum<S>(self) -> S where Self: Sized, @@ -3429,6 +3490,7 @@ pub trait Iterator { /// assert_eq!(factorial(5), 120); /// ``` #[stable(feature = "iter_arith", since = "1.11.0")] + #[rustc_do_not_const_check] fn product<P>(self) -> P where Self: Sized, @@ -3450,6 +3512,7 @@ pub trait Iterator { /// assert_eq!([1, 2].iter().cmp([1].iter()), Ordering::Greater); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn cmp<I>(self, other: I) -> Ordering where I: IntoIterator<Item = Self::Item>, @@ -3479,6 +3542,7 @@ pub trait Iterator { /// assert_eq!(xs.iter().cmp_by(&ys, |&x, &y| (2 * x).cmp(&y)), Ordering::Greater); /// ``` #[unstable(feature = "iter_order_by", issue = "64295")] + #[rustc_do_not_const_check] fn cmp_by<I, F>(self, other: I, cmp: F) -> Ordering where Self: Sized, @@ -3491,7 +3555,7 @@ pub trait Iterator { F: FnMut(X, Y) -> Ordering, { move |x, y| match cmp(x, y) { - Ordering::Equal => ControlFlow::CONTINUE, + Ordering::Equal => ControlFlow::Continue(()), non_eq => ControlFlow::Break(non_eq), } } @@ -3502,8 +3566,10 @@ pub trait Iterator { } } - /// [Lexicographically](Ord#lexicographical-comparison) compares the elements of this [`Iterator`] with those - /// of another. + /// [Lexicographically](Ord#lexicographical-comparison) compares the [`PartialOrd`] elements of + /// this [`Iterator`] with those of another. The comparison works like short-circuit + /// evaluation, returning a result without comparing the remaining elements. + /// As soon as an order can be determined, the evaluation stops and a result is returned. /// /// # Examples /// @@ -3513,10 +3579,27 @@ pub trait Iterator { /// assert_eq!([1.].iter().partial_cmp([1.].iter()), Some(Ordering::Equal)); /// assert_eq!([1.].iter().partial_cmp([1., 2.].iter()), Some(Ordering::Less)); /// assert_eq!([1., 2.].iter().partial_cmp([1.].iter()), Some(Ordering::Greater)); + /// ``` /// + /// For floating-point numbers, NaN does not have a total order and will result + /// in `None` when compared: + /// + /// ``` /// assert_eq!([f64::NAN].iter().partial_cmp([1.].iter()), None); /// ``` + /// + /// The results are determined by the order of evaluation. + /// + /// ``` + /// use std::cmp::Ordering; + /// + /// assert_eq!([1.0, f64::NAN].iter().partial_cmp([2.0, f64::NAN].iter()), Some(Ordering::Less)); + /// assert_eq!([2.0, f64::NAN].iter().partial_cmp([1.0, f64::NAN].iter()), Some(Ordering::Greater)); + /// assert_eq!([f64::NAN, 1.0].iter().partial_cmp([f64::NAN, 2.0].iter()), None); + /// ``` + /// #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn partial_cmp<I>(self, other: I) -> Option<Ordering> where I: IntoIterator, @@ -3555,6 +3638,7 @@ pub trait Iterator { /// ); /// ``` #[unstable(feature = "iter_order_by", issue = "64295")] + #[rustc_do_not_const_check] fn partial_cmp_by<I, F>(self, other: I, partial_cmp: F) -> Option<Ordering> where Self: Sized, @@ -3567,7 +3651,7 @@ pub trait Iterator { F: FnMut(X, Y) -> Option<Ordering>, { move |x, y| match partial_cmp(x, y) { - Some(Ordering::Equal) => ControlFlow::CONTINUE, + Some(Ordering::Equal) => ControlFlow::Continue(()), non_eq => ControlFlow::Break(non_eq), } } @@ -3588,6 +3672,7 @@ pub trait Iterator { /// assert_eq!([1].iter().eq([1, 2].iter()), false); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn eq<I>(self, other: I) -> bool where I: IntoIterator, @@ -3613,6 +3698,7 @@ pub trait Iterator { /// assert!(xs.iter().eq_by(&ys, |&x, &y| x * x == y)); /// ``` #[unstable(feature = "iter_order_by", issue = "64295")] + #[rustc_do_not_const_check] fn eq_by<I, F>(self, other: I, eq: F) -> bool where Self: Sized, @@ -3625,7 +3711,7 @@ pub trait Iterator { F: FnMut(X, Y) -> bool, { move |x, y| { - if eq(x, y) { ControlFlow::CONTINUE } else { ControlFlow::BREAK } + if eq(x, y) { ControlFlow::Continue(()) } else { ControlFlow::Break(()) } } } @@ -3645,6 +3731,7 @@ pub trait Iterator { /// assert_eq!([1].iter().ne([1, 2].iter()), true); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn ne<I>(self, other: I) -> bool where I: IntoIterator, @@ -3666,6 +3753,7 @@ pub trait Iterator { /// assert_eq!([1, 2].iter().lt([1, 2].iter()), false); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn lt<I>(self, other: I) -> bool where I: IntoIterator, @@ -3687,6 +3775,7 @@ pub trait Iterator { /// assert_eq!([1, 2].iter().le([1, 2].iter()), true); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn le<I>(self, other: I) -> bool where I: IntoIterator, @@ -3708,6 +3797,7 @@ pub trait Iterator { /// assert_eq!([1, 2].iter().gt([1, 2].iter()), false); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn gt<I>(self, other: I) -> bool where I: IntoIterator, @@ -3729,6 +3819,7 @@ pub trait Iterator { /// assert_eq!([1, 2].iter().ge([1, 2].iter()), true); /// ``` #[stable(feature = "iter_order", since = "1.5.0")] + #[rustc_do_not_const_check] fn ge<I>(self, other: I) -> bool where I: IntoIterator, @@ -3760,6 +3851,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")] + #[rustc_do_not_const_check] fn is_sorted(self) -> bool where Self: Sized, @@ -3788,6 +3880,7 @@ pub trait Iterator { /// /// [`is_sorted`]: Iterator::is_sorted #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")] + #[rustc_do_not_const_check] fn is_sorted_by<F>(mut self, compare: F) -> bool where Self: Sized, @@ -3834,6 +3927,7 @@ pub trait Iterator { /// ``` #[inline] #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")] + #[rustc_do_not_const_check] fn is_sorted_by_key<F, K>(self, f: F) -> bool where Self: Sized, @@ -3849,6 +3943,7 @@ pub trait Iterator { #[inline] #[doc(hidden)] #[unstable(feature = "trusted_random_access", issue = "none")] + #[rustc_do_not_const_check] unsafe fn __iterator_get_unchecked(&mut self, _idx: usize) -> Self::Item where Self: TrustedRandomAccessNoCoerce, @@ -3859,7 +3954,7 @@ pub trait Iterator { /// Compares two iterators element-wise using the given function. /// -/// If `ControlFlow::CONTINUE` is returned from the function, the comparison moves on to the next +/// If `ControlFlow::Continue(())` is returned from the function, the comparison moves on to the next /// elements of both iterators. Returning `ControlFlow::Break(x)` short-circuits the iteration and /// returns `ControlFlow::Break(x)`. If one of the iterators runs out of elements, /// `ControlFlow::Continue(ord)` is returned where `ord` is the result of comparing the lengths of diff --git a/library/core/src/iter/traits/marker.rs b/library/core/src/iter/traits/marker.rs index da7537457..af0284823 100644 --- a/library/core/src/iter/traits/marker.rs +++ b/library/core/src/iter/traits/marker.rs @@ -31,6 +31,17 @@ impl<I: FusedIterator + ?Sized> FusedIterator for &mut I {} /// The iterator must produce exactly the number of elements it reported /// or diverge before reaching the end. /// +/// # When *shouldn't* an adapter be `TrustedLen`? +/// +/// If an adapter makes an iterator *shorter* by a given amount, then it's +/// usually incorrect for that adapter to implement `TrustedLen`. The inner +/// iterator might return more than `usize::MAX` items, but there's no way to +/// know what `k` elements less than that will be, since the `size_hint` from +/// the inner iterator has already saturated and lost that information. +/// +/// This is why [`Skip<I>`](crate::iter::Skip) isn't `TrustedLen`, even when +/// `I` implements `TrustedLen`. +/// /// # Safety /// /// This trait must only be implemented when the contract is upheld. Consumers diff --git a/library/core/src/iter/traits/mod.rs b/library/core/src/iter/traits/mod.rs index ed0fb634d..41ea29e6a 100644 --- a/library/core/src/iter/traits/mod.rs +++ b/library/core/src/iter/traits/mod.rs @@ -4,6 +4,7 @@ mod double_ended; mod exact_size; mod iterator; mod marker; +mod unchecked_iterator; #[stable(feature = "rust1", since = "1.0.0")] pub use self::{ @@ -19,3 +20,5 @@ pub use self::{ pub use self::marker::InPlaceIterable; #[unstable(feature = "trusted_step", issue = "85731")] pub use self::marker::TrustedStep; + +pub(crate) use self::unchecked_iterator::UncheckedIterator; diff --git a/library/core/src/iter/traits/unchecked_iterator.rs b/library/core/src/iter/traits/unchecked_iterator.rs new file mode 100644 index 000000000..ae4bfcad4 --- /dev/null +++ b/library/core/src/iter/traits/unchecked_iterator.rs @@ -0,0 +1,36 @@ +use crate::iter::TrustedLen; + +/// [`TrustedLen`] cannot have methods, so this allows augmenting it. +/// +/// It currently requires `TrustedLen` because it's unclear whether it's +/// reasonably possible to depend on the `size_hint` of anything else. +pub(crate) trait UncheckedIterator: TrustedLen { + /// Gets the next item from a non-empty iterator. + /// + /// Because there's always a value to return, that means it can return + /// the `Item` type directly, without wrapping it in an `Option`. + /// + /// # Safety + /// + /// This can only be called if `size_hint().0 != 0`, guaranteeing that + /// there's at least one item available. + /// + /// Otherwise (aka when `size_hint().1 == Some(0)`), this is UB. + /// + /// # Note to Implementers + /// + /// This has a default implementation using [`Option::unwrap_unchecked`]. + /// That's probably sufficient if your `next` *always* returns `Some`, + /// such as for infinite iterators. In more complicated situations, however, + /// sometimes there can still be `insertvalue`/`assume`/`extractvalue` + /// instructions remaining in the IR from the `Option` handling, at which + /// point you might want to implement this manually instead. + #[unstable(feature = "trusted_len_next_unchecked", issue = "37572")] + #[inline] + unsafe fn next_unchecked(&mut self) -> Self::Item { + let opt = self.next(); + // SAFETY: The caller promised that we're not empty, and + // `Self: TrustedLen` so we can actually trust the `size_hint`. + unsafe { opt.unwrap_unchecked() } + } +} diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs index 8790649ab..24bad799f 100644 --- a/library/core/src/lib.rs +++ b/library/core/src/lib.rs @@ -95,6 +95,7 @@ #![warn(missing_docs)] #![allow(explicit_outlives_requirements)] #![allow(incomplete_features)] +#![cfg_attr(not(bootstrap), warn(multiple_supertrait_upcastable))] // // Library features: #![feature(const_align_offset)] @@ -123,6 +124,8 @@ #![feature(const_inherent_unchecked_arith)] #![feature(const_int_unchecked_arith)] #![feature(const_intrinsic_forget)] +#![feature(const_ipv4)] +#![feature(const_ipv6)] #![feature(const_likely)] #![feature(const_maybe_uninit_uninit_array)] #![feature(const_maybe_uninit_as_mut_ptr)] @@ -133,6 +136,7 @@ #![feature(const_option)] #![feature(const_option_ext)] #![feature(const_pin)] +#![feature(const_pointer_byte_offsets)] #![feature(const_pointer_is_aligned)] #![feature(const_ptr_sub_ptr)] #![feature(const_replace)] @@ -178,6 +182,7 @@ #![feature(const_slice_index)] #![feature(const_is_char_boundary)] #![feature(const_cstr_methods)] +#![feature(ip)] #![feature(is_ascii_octdigit)] // // Language features: @@ -191,8 +196,9 @@ #![feature(cfg_sanitize)] #![feature(cfg_target_has_atomic)] #![feature(cfg_target_has_atomic_equal_alignment)] -#![cfg_attr(not(bootstrap), feature(const_closures))] +#![feature(const_closures)] #![feature(const_fn_floating_point_arithmetic)] +#![feature(const_for)] #![feature(const_mut_refs)] #![feature(const_precise_live_drops)] #![feature(const_refs_to_cell)] @@ -235,11 +241,11 @@ #![feature(unsized_fn_params)] #![feature(asm_const)] #![feature(const_transmute_copy)] +#![cfg_attr(not(bootstrap), feature(multiple_supertrait_upcastable))] // // Target features: #![feature(arm_target_feature)] #![feature(avx512_target_feature)] -#![feature(cmpxchg16b_target_feature)] #![feature(hexagon_target_feature)] #![feature(mips_target_feature)] #![feature(powerpc_target_feature)] @@ -248,7 +254,7 @@ #![feature(sse4a_target_feature)] #![feature(tbm_target_feature)] #![feature(wasm_target_feature)] -#![cfg_attr(bootstrap, feature(f16c_target_feature))] +#![cfg_attr(bootstrap, feature(cmpxchg16b_target_feature))] // allow using `core::` in intra-doc links #[allow(unused_extern_crates)] @@ -347,6 +353,7 @@ pub mod cell; pub mod char; pub mod ffi; pub mod iter; +pub mod net; pub mod option; pub mod panic; pub mod panicking; @@ -375,8 +382,6 @@ mod bool; mod tuple; mod unit; -mod const_closure; - #[stable(feature = "core_primitive", since = "1.43.0")] pub mod primitive; diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs index 1326fc9ab..520ae0edb 100644 --- a/library/core/src/marker.rs +++ b/library/core/src/marker.rs @@ -97,6 +97,7 @@ unsafe impl<T: Sync + ?Sized> Send for &T {} #[fundamental] // for Default, for example, which requires that `[T]: !Default` be evaluatable #[rustc_specialization_trait] #[rustc_deny_explicit_impl] +#[cfg_attr(not(bootstrap), rustc_coinductive)] pub trait Sized { // Empty. } @@ -469,6 +470,62 @@ pub macro Copy($item:item) { #[cfg_attr(not(test), rustc_diagnostic_item = "Sync")] #[lang = "sync"] #[rustc_on_unimplemented( + on( + _Self = "std::cell::OnceCell<T>", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::OnceLock` instead" + ), + on( + _Self = "std::cell::Cell<u8>", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU8` instead", + ), + on( + _Self = "std::cell::Cell<u16>", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU16` instead", + ), + on( + _Self = "std::cell::Cell<u32>", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU32` instead", + ), + on( + _Self = "std::cell::Cell<u64>", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicU64` instead", + ), + on( + _Self = "std::cell::Cell<usize>", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicUsize` instead", + ), + on( + _Self = "std::cell::Cell<i8>", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI8` instead", + ), + on( + _Self = "std::cell::Cell<i16>", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI16` instead", + ), + on( + _Self = "std::cell::Cell<i32>", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI32` instead", + ), + on( + _Self = "std::cell::Cell<i64>", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicI64` instead", + ), + on( + _Self = "std::cell::Cell<isize>", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicIsize` instead", + ), + on( + _Self = "std::cell::Cell<bool>", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` or `std::sync::atomic::AtomicBool` instead", + ), + on( + _Self = "std::cell::Cell<T>", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock`", + ), + on( + _Self = "std::cell::RefCell<T>", + note = "if you want to do aliasing and mutation between multiple threads, use `std::sync::RwLock` instead", + ), message = "`{Self}` cannot be shared between threads safely", label = "`{Self}` cannot be shared between threads safely" )] @@ -815,14 +872,18 @@ pub trait Destruct {} #[rustc_deny_explicit_impl] pub trait Tuple {} -/// A marker for things -#[unstable(feature = "pointer_sized_trait", issue = "none")] -#[lang = "pointer_sized"] +/// A marker for pointer-like types. +/// +/// All types that have the same size and alignment as a `usize` or +/// `*const ()` automatically implement this trait. +#[unstable(feature = "pointer_like_trait", issue = "none")] +#[cfg_attr(bootstrap, lang = "pointer_sized")] +#[cfg_attr(not(bootstrap), lang = "pointer_like")] #[rustc_on_unimplemented( - message = "`{Self}` needs to be a pointer-sized type", - label = "`{Self}` needs to be a pointer-sized type" + message = "`{Self}` needs to have the same alignment and size as a pointer", + label = "`{Self}` needs to be a pointer-like type" )] -pub trait PointerSized {} +pub trait PointerLike {} /// Implementations of `Copy` for primitive types. /// diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs index 5e01ccc07..a67df7ed5 100644 --- a/library/core/src/mem/mod.rs +++ b/library/core/src/mem/mod.rs @@ -682,7 +682,6 @@ pub unsafe fn zeroed<T>() -> T { pub unsafe fn uninitialized<T>() -> T { // SAFETY: the caller must guarantee that an uninitialized value is valid for `T`. unsafe { - #[cfg(not(bootstrap))] // If the compiler hits this itself then it deserves the UB. intrinsics::assert_mem_uninitialized_valid::<T>(); let mut val = MaybeUninit::<T>::uninit(); diff --git a/library/std/src/net/display_buffer.rs b/library/core/src/net/display_buffer.rs index 7aadf06e9..7aadf06e9 100644 --- a/library/std/src/net/display_buffer.rs +++ b/library/core/src/net/display_buffer.rs diff --git a/library/core/src/net/ip_addr.rs b/library/core/src/net/ip_addr.rs new file mode 100644 index 000000000..954d88d54 --- /dev/null +++ b/library/core/src/net/ip_addr.rs @@ -0,0 +1,2070 @@ +use crate::cmp::Ordering; +use crate::fmt::{self, Write}; +use crate::mem::transmute; + +use super::display_buffer::DisplayBuffer; + +/// An IP address, either IPv4 or IPv6. +/// +/// This enum can contain either an [`Ipv4Addr`] or an [`Ipv6Addr`], see their +/// respective documentation for more details. +/// +/// # Examples +/// +/// ``` +/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; +/// +/// let localhost_v4 = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); +/// let localhost_v6 = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)); +/// +/// assert_eq!("127.0.0.1".parse(), Ok(localhost_v4)); +/// assert_eq!("::1".parse(), Ok(localhost_v6)); +/// +/// assert_eq!(localhost_v4.is_ipv6(), false); +/// assert_eq!(localhost_v4.is_ipv4(), true); +/// ``` +#[cfg_attr(not(test), rustc_diagnostic_item = "IpAddr")] +#[stable(feature = "ip_addr", since = "1.7.0")] +#[derive(Copy, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)] +pub enum IpAddr { + /// An IPv4 address. + #[stable(feature = "ip_addr", since = "1.7.0")] + V4(#[stable(feature = "ip_addr", since = "1.7.0")] Ipv4Addr), + /// An IPv6 address. + #[stable(feature = "ip_addr", since = "1.7.0")] + V6(#[stable(feature = "ip_addr", since = "1.7.0")] Ipv6Addr), +} + +/// An IPv4 address. +/// +/// IPv4 addresses are defined as 32-bit integers in [IETF RFC 791]. +/// They are usually represented as four octets. +/// +/// See [`IpAddr`] for a type encompassing both IPv4 and IPv6 addresses. +/// +/// [IETF RFC 791]: https://tools.ietf.org/html/rfc791 +/// +/// # Textual representation +/// +/// `Ipv4Addr` provides a [`FromStr`] implementation. The four octets are in decimal +/// notation, divided by `.` (this is called "dot-decimal notation"). +/// Notably, octal numbers (which are indicated with a leading `0`) and hexadecimal numbers (which +/// are indicated with a leading `0x`) are not allowed per [IETF RFC 6943]. +/// +/// [IETF RFC 6943]: https://tools.ietf.org/html/rfc6943#section-3.1.1 +/// [`FromStr`]: crate::str::FromStr +/// +/// # Examples +/// +/// ``` +/// use std::net::Ipv4Addr; +/// +/// let localhost = Ipv4Addr::new(127, 0, 0, 1); +/// assert_eq!("127.0.0.1".parse(), Ok(localhost)); +/// assert_eq!(localhost.is_loopback(), true); +/// assert!("012.004.002.000".parse::<Ipv4Addr>().is_err()); // all octets are in octal +/// assert!("0000000.0.0.0".parse::<Ipv4Addr>().is_err()); // first octet is a zero in octal +/// assert!("0xcb.0x0.0x71.0x00".parse::<Ipv4Addr>().is_err()); // all octets are in hex +/// ``` +#[derive(Copy, Clone, PartialEq, Eq, Hash)] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Ipv4Addr { + octets: [u8; 4], +} + +/// An IPv6 address. +/// +/// IPv6 addresses are defined as 128-bit integers in [IETF RFC 4291]. +/// They are usually represented as eight 16-bit segments. +/// +/// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291 +/// +/// # Embedding IPv4 Addresses +/// +/// See [`IpAddr`] for a type encompassing both IPv4 and IPv6 addresses. +/// +/// To assist in the transition from IPv4 to IPv6 two types of IPv6 addresses that embed an IPv4 address were defined: +/// IPv4-compatible and IPv4-mapped addresses. Of these IPv4-compatible addresses have been officially deprecated. +/// +/// Both types of addresses are not assigned any special meaning by this implementation, +/// other than what the relevant standards prescribe. This means that an address like `::ffff:127.0.0.1`, +/// while representing an IPv4 loopback address, is not itself an IPv6 loopback address; only `::1` is. +/// To handle these so called "IPv4-in-IPv6" addresses, they have to first be converted to their canonical IPv4 address. +/// +/// ### IPv4-Compatible IPv6 Addresses +/// +/// IPv4-compatible IPv6 addresses are defined in [IETF RFC 4291 Section 2.5.5.1], and have been officially deprecated. +/// The RFC describes the format of an "IPv4-Compatible IPv6 address" as follows: +/// +/// ```text +/// | 80 bits | 16 | 32 bits | +/// +--------------------------------------+--------------------------+ +/// |0000..............................0000|0000| IPv4 address | +/// +--------------------------------------+----+---------------------+ +/// ``` +/// So `::a.b.c.d` would be an IPv4-compatible IPv6 address representing the IPv4 address `a.b.c.d`. +/// +/// To convert from an IPv4 address to an IPv4-compatible IPv6 address, use [`Ipv4Addr::to_ipv6_compatible`]. +/// Use [`Ipv6Addr::to_ipv4`] to convert an IPv4-compatible IPv6 address to the canonical IPv4 address. +/// +/// [IETF RFC 4291 Section 2.5.5.1]: https://datatracker.ietf.org/doc/html/rfc4291#section-2.5.5.1 +/// +/// ### IPv4-Mapped IPv6 Addresses +/// +/// IPv4-mapped IPv6 addresses are defined in [IETF RFC 4291 Section 2.5.5.2]. +/// The RFC describes the format of an "IPv4-Mapped IPv6 address" as follows: +/// +/// ```text +/// | 80 bits | 16 | 32 bits | +/// +--------------------------------------+--------------------------+ +/// |0000..............................0000|FFFF| IPv4 address | +/// +--------------------------------------+----+---------------------+ +/// ``` +/// So `::ffff:a.b.c.d` would be an IPv4-mapped IPv6 address representing the IPv4 address `a.b.c.d`. +/// +/// To convert from an IPv4 address to an IPv4-mapped IPv6 address, use [`Ipv4Addr::to_ipv6_mapped`]. +/// Use [`Ipv6Addr::to_ipv4`] to convert an IPv4-mapped IPv6 address to the canonical IPv4 address. +/// Note that this will also convert the IPv6 loopback address `::1` to `0.0.0.1`. Use +/// [`Ipv6Addr::to_ipv4_mapped`] to avoid this. +/// +/// [IETF RFC 4291 Section 2.5.5.2]: https://datatracker.ietf.org/doc/html/rfc4291#section-2.5.5.2 +/// +/// # Textual representation +/// +/// `Ipv6Addr` provides a [`FromStr`] implementation. There are many ways to represent +/// an IPv6 address in text, but in general, each segments is written in hexadecimal +/// notation, and segments are separated by `:`. For more information, see +/// [IETF RFC 5952]. +/// +/// [`FromStr`]: crate::str::FromStr +/// [IETF RFC 5952]: https://tools.ietf.org/html/rfc5952 +/// +/// # Examples +/// +/// ``` +/// use std::net::Ipv6Addr; +/// +/// let localhost = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); +/// assert_eq!("::1".parse(), Ok(localhost)); +/// assert_eq!(localhost.is_loopback(), true); +/// ``` +#[derive(Copy, Clone, PartialEq, Eq, Hash)] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Ipv6Addr { + octets: [u8; 16], +} + +/// Scope of an [IPv6 multicast address] as defined in [IETF RFC 7346 section 2]. +/// +/// # Stability Guarantees +/// +/// Not all possible values for a multicast scope have been assigned. +/// Future RFCs may introduce new scopes, which will be added as variants to this enum; +/// because of this the enum is marked as `#[non_exhaustive]`. +/// +/// # Examples +/// ``` +/// #![feature(ip)] +/// +/// use std::net::Ipv6Addr; +/// use std::net::Ipv6MulticastScope::*; +/// +/// // An IPv6 multicast address with global scope (`ff0e::`). +/// let address = Ipv6Addr::new(0xff0e, 0, 0, 0, 0, 0, 0, 0); +/// +/// // Will print "Global scope". +/// match address.multicast_scope() { +/// Some(InterfaceLocal) => println!("Interface-Local scope"), +/// Some(LinkLocal) => println!("Link-Local scope"), +/// Some(RealmLocal) => println!("Realm-Local scope"), +/// Some(AdminLocal) => println!("Admin-Local scope"), +/// Some(SiteLocal) => println!("Site-Local scope"), +/// Some(OrganizationLocal) => println!("Organization-Local scope"), +/// Some(Global) => println!("Global scope"), +/// Some(_) => println!("Unknown scope"), +/// None => println!("Not a multicast address!") +/// } +/// +/// ``` +/// +/// [IPv6 multicast address]: Ipv6Addr +/// [IETF RFC 7346 section 2]: https://tools.ietf.org/html/rfc7346#section-2 +#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug)] +#[unstable(feature = "ip", issue = "27709")] +#[non_exhaustive] +pub enum Ipv6MulticastScope { + /// Interface-Local scope. + InterfaceLocal, + /// Link-Local scope. + LinkLocal, + /// Realm-Local scope. + RealmLocal, + /// Admin-Local scope. + AdminLocal, + /// Site-Local scope. + SiteLocal, + /// Organization-Local scope. + OrganizationLocal, + /// Global scope. + Global, +} + +impl IpAddr { + /// Returns [`true`] for the special 'unspecified' address. + /// + /// See the documentation for [`Ipv4Addr::is_unspecified()`] and + /// [`Ipv6Addr::is_unspecified()`] for more details. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)).is_unspecified(), true); + /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)).is_unspecified(), true); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "ip_shared", since = "1.12.0")] + #[must_use] + #[inline] + pub const fn is_unspecified(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_unspecified(), + IpAddr::V6(ip) => ip.is_unspecified(), + } + } + + /// Returns [`true`] if this is a loopback address. + /// + /// See the documentation for [`Ipv4Addr::is_loopback()`] and + /// [`Ipv6Addr::is_loopback()`] for more details. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).is_loopback(), true); + /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1)).is_loopback(), true); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "ip_shared", since = "1.12.0")] + #[must_use] + #[inline] + pub const fn is_loopback(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_loopback(), + IpAddr::V6(ip) => ip.is_loopback(), + } + } + + /// Returns [`true`] if the address appears to be globally routable. + /// + /// See the documentation for [`Ipv4Addr::is_global()`] and + /// [`Ipv6Addr::is_global()`] for more details. + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(80, 9, 12, 3)).is_global(), true); + /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0x1c9, 0, 0, 0xafc8, 0, 0x1)).is_global(), true); + /// ``` + #[rustc_const_unstable(feature = "const_ip", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_global(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_global(), + IpAddr::V6(ip) => ip.is_global(), + } + } + + /// Returns [`true`] if this is a multicast address. + /// + /// See the documentation for [`Ipv4Addr::is_multicast()`] and + /// [`Ipv6Addr::is_multicast()`] for more details. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(224, 254, 0, 0)).is_multicast(), true); + /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0)).is_multicast(), true); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "ip_shared", since = "1.12.0")] + #[must_use] + #[inline] + pub const fn is_multicast(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_multicast(), + IpAddr::V6(ip) => ip.is_multicast(), + } + } + + /// Returns [`true`] if this address is in a range designated for documentation. + /// + /// See the documentation for [`Ipv4Addr::is_documentation()`] and + /// [`Ipv6Addr::is_documentation()`] for more details. + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_documentation(), true); + /// assert_eq!( + /// IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_documentation(), + /// true + /// ); + /// ``` + #[rustc_const_unstable(feature = "const_ip", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_documentation(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_documentation(), + IpAddr::V6(ip) => ip.is_documentation(), + } + } + + /// Returns [`true`] if this address is in a range designated for benchmarking. + /// + /// See the documentation for [`Ipv4Addr::is_benchmarking()`] and + /// [`Ipv6Addr::is_benchmarking()`] for more details. + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(198, 19, 255, 255)).is_benchmarking(), true); + /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0x2, 0, 0, 0, 0, 0, 0)).is_benchmarking(), true); + /// ``` + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_benchmarking(&self) -> bool { + match self { + IpAddr::V4(ip) => ip.is_benchmarking(), + IpAddr::V6(ip) => ip.is_benchmarking(), + } + } + + /// Returns [`true`] if this address is an [`IPv4` address], and [`false`] + /// otherwise. + /// + /// [`IPv4` address]: IpAddr::V4 + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_ipv4(), true); + /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_ipv4(), false); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "ipaddr_checker", since = "1.16.0")] + #[must_use] + #[inline] + pub const fn is_ipv4(&self) -> bool { + matches!(self, IpAddr::V4(_)) + } + + /// Returns [`true`] if this address is an [`IPv6` address], and [`false`] + /// otherwise. + /// + /// [`IPv6` address]: IpAddr::V6 + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_ipv6(), false); + /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_ipv6(), true); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "ipaddr_checker", since = "1.16.0")] + #[must_use] + #[inline] + pub const fn is_ipv6(&self) -> bool { + matches!(self, IpAddr::V6(_)) + } + + /// Converts this address to an `IpAddr::V4` if it is an IPv4-mapped IPv6 addresses, otherwise it + /// return `self` as-is. + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).to_canonical().is_loopback(), true); + /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1)).is_loopback(), false); + /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1)).to_canonical().is_loopback(), true); + /// ``` + #[inline] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[rustc_const_unstable(feature = "const_ip", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + pub const fn to_canonical(&self) -> IpAddr { + match self { + &v4 @ IpAddr::V4(_) => v4, + IpAddr::V6(v6) => v6.to_canonical(), + } + } +} + +impl Ipv4Addr { + /// Creates a new IPv4 address from four eight-bit octets. + /// + /// The result will represent the IP address `a`.`b`.`c`.`d`. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// let addr = Ipv4Addr::new(127, 0, 0, 1); + /// ``` + #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")] + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use] + #[inline] + pub const fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr { + Ipv4Addr { octets: [a, b, c, d] } + } + + /// An IPv4 address with the address pointing to localhost: `127.0.0.1` + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// let addr = Ipv4Addr::LOCALHOST; + /// assert_eq!(addr, Ipv4Addr::new(127, 0, 0, 1)); + /// ``` + #[stable(feature = "ip_constructors", since = "1.30.0")] + pub const LOCALHOST: Self = Ipv4Addr::new(127, 0, 0, 1); + + /// An IPv4 address representing an unspecified address: `0.0.0.0` + /// + /// This corresponds to the constant `INADDR_ANY` in other languages. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// let addr = Ipv4Addr::UNSPECIFIED; + /// assert_eq!(addr, Ipv4Addr::new(0, 0, 0, 0)); + /// ``` + #[doc(alias = "INADDR_ANY")] + #[stable(feature = "ip_constructors", since = "1.30.0")] + pub const UNSPECIFIED: Self = Ipv4Addr::new(0, 0, 0, 0); + + /// An IPv4 address representing the broadcast address: `255.255.255.255` + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// let addr = Ipv4Addr::BROADCAST; + /// assert_eq!(addr, Ipv4Addr::new(255, 255, 255, 255)); + /// ``` + #[stable(feature = "ip_constructors", since = "1.30.0")] + pub const BROADCAST: Self = Ipv4Addr::new(255, 255, 255, 255); + + /// Returns the four eight-bit integers that make up this address. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// let addr = Ipv4Addr::new(127, 0, 0, 1); + /// assert_eq!(addr.octets(), [127, 0, 0, 1]); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use] + #[inline] + pub const fn octets(&self) -> [u8; 4] { + self.octets + } + + /// Returns [`true`] for the special 'unspecified' address (`0.0.0.0`). + /// + /// This property is defined in _UNIX Network Programming, Second Edition_, + /// W. Richard Stevens, p. 891; see also [ip7]. + /// + /// [ip7]: https://man7.org/linux/man-pages/man7/ip.7.html + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(0, 0, 0, 0).is_unspecified(), true); + /// assert_eq!(Ipv4Addr::new(45, 22, 13, 197).is_unspecified(), false); + /// ``` + #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")] + #[stable(feature = "ip_shared", since = "1.12.0")] + #[must_use] + #[inline] + pub const fn is_unspecified(&self) -> bool { + u32::from_be_bytes(self.octets) == 0 + } + + /// Returns [`true`] if this is a loopback address (`127.0.0.0/8`). + /// + /// This property is defined by [IETF RFC 1122]. + /// + /// [IETF RFC 1122]: https://tools.ietf.org/html/rfc1122 + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(127, 0, 0, 1).is_loopback(), true); + /// assert_eq!(Ipv4Addr::new(45, 22, 13, 197).is_loopback(), false); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(since = "1.7.0", feature = "ip_17")] + #[must_use] + #[inline] + pub const fn is_loopback(&self) -> bool { + self.octets()[0] == 127 + } + + /// Returns [`true`] if this is a private address. + /// + /// The private address ranges are defined in [IETF RFC 1918] and include: + /// + /// - `10.0.0.0/8` + /// - `172.16.0.0/12` + /// - `192.168.0.0/16` + /// + /// [IETF RFC 1918]: https://tools.ietf.org/html/rfc1918 + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(10, 0, 0, 1).is_private(), true); + /// assert_eq!(Ipv4Addr::new(10, 10, 10, 10).is_private(), true); + /// assert_eq!(Ipv4Addr::new(172, 16, 10, 10).is_private(), true); + /// assert_eq!(Ipv4Addr::new(172, 29, 45, 14).is_private(), true); + /// assert_eq!(Ipv4Addr::new(172, 32, 0, 2).is_private(), false); + /// assert_eq!(Ipv4Addr::new(192, 168, 0, 2).is_private(), true); + /// assert_eq!(Ipv4Addr::new(192, 169, 0, 2).is_private(), false); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(since = "1.7.0", feature = "ip_17")] + #[must_use] + #[inline] + pub const fn is_private(&self) -> bool { + match self.octets() { + [10, ..] => true, + [172, b, ..] if b >= 16 && b <= 31 => true, + [192, 168, ..] => true, + _ => false, + } + } + + /// Returns [`true`] if the address is link-local (`169.254.0.0/16`). + /// + /// This property is defined by [IETF RFC 3927]. + /// + /// [IETF RFC 3927]: https://tools.ietf.org/html/rfc3927 + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(169, 254, 0, 0).is_link_local(), true); + /// assert_eq!(Ipv4Addr::new(169, 254, 10, 65).is_link_local(), true); + /// assert_eq!(Ipv4Addr::new(16, 89, 10, 65).is_link_local(), false); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(since = "1.7.0", feature = "ip_17")] + #[must_use] + #[inline] + pub const fn is_link_local(&self) -> bool { + matches!(self.octets(), [169, 254, ..]) + } + + /// Returns [`true`] if the address appears to be globally reachable + /// as specified by the [IANA IPv4 Special-Purpose Address Registry]. + /// Whether or not an address is practically reachable will depend on your network configuration. + /// + /// Most IPv4 addresses are globally reachable; + /// unless they are specifically defined as *not* globally reachable. + /// + /// Non-exhaustive list of notable addresses that are not globally reachable: + /// + /// - The [unspecified address] ([`is_unspecified`](Ipv4Addr::is_unspecified)) + /// - Addresses reserved for private use ([`is_private`](Ipv4Addr::is_private)) + /// - Addresses in the shared address space ([`is_shared`](Ipv4Addr::is_shared)) + /// - Loopback addresses ([`is_loopback`](Ipv4Addr::is_loopback)) + /// - Link-local addresses ([`is_link_local`](Ipv4Addr::is_link_local)) + /// - Addresses reserved for documentation ([`is_documentation`](Ipv4Addr::is_documentation)) + /// - Addresses reserved for benchmarking ([`is_benchmarking`](Ipv4Addr::is_benchmarking)) + /// - Reserved addresses ([`is_reserved`](Ipv4Addr::is_reserved)) + /// - The [broadcast address] ([`is_broadcast`](Ipv4Addr::is_broadcast)) + /// + /// For the complete overview of which addresses are globally reachable, see the table at the [IANA IPv4 Special-Purpose Address Registry]. + /// + /// [IANA IPv4 Special-Purpose Address Registry]: https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml + /// [unspecified address]: Ipv4Addr::UNSPECIFIED + /// [broadcast address]: Ipv4Addr::BROADCAST + + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::Ipv4Addr; + /// + /// // Most IPv4 addresses are globally reachable: + /// assert_eq!(Ipv4Addr::new(80, 9, 12, 3).is_global(), true); + /// + /// // However some addresses have been assigned a special meaning + /// // that makes them not globally reachable. Some examples are: + /// + /// // The unspecified address (`0.0.0.0`) + /// assert_eq!(Ipv4Addr::UNSPECIFIED.is_global(), false); + /// + /// // Addresses reserved for private use (`10.0.0.0/8`, `172.16.0.0/12`, 192.168.0.0/16) + /// assert_eq!(Ipv4Addr::new(10, 254, 0, 0).is_global(), false); + /// assert_eq!(Ipv4Addr::new(192, 168, 10, 65).is_global(), false); + /// assert_eq!(Ipv4Addr::new(172, 16, 10, 65).is_global(), false); + /// + /// // Addresses in the shared address space (`100.64.0.0/10`) + /// assert_eq!(Ipv4Addr::new(100, 100, 0, 0).is_global(), false); + /// + /// // The loopback addresses (`127.0.0.0/8`) + /// assert_eq!(Ipv4Addr::LOCALHOST.is_global(), false); + /// + /// // Link-local addresses (`169.254.0.0/16`) + /// assert_eq!(Ipv4Addr::new(169, 254, 45, 1).is_global(), false); + /// + /// // Addresses reserved for documentation (`192.0.2.0/24`, `198.51.100.0/24`, `203.0.113.0/24`) + /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).is_global(), false); + /// assert_eq!(Ipv4Addr::new(198, 51, 100, 65).is_global(), false); + /// assert_eq!(Ipv4Addr::new(203, 0, 113, 6).is_global(), false); + /// + /// // Addresses reserved for benchmarking (`198.18.0.0/15`) + /// assert_eq!(Ipv4Addr::new(198, 18, 0, 0).is_global(), false); + /// + /// // Reserved addresses (`240.0.0.0/4`) + /// assert_eq!(Ipv4Addr::new(250, 10, 20, 30).is_global(), false); + /// + /// // The broadcast address (`255.255.255.255`) + /// assert_eq!(Ipv4Addr::BROADCAST.is_global(), false); + /// + /// // For a complete overview see the IANA IPv4 Special-Purpose Address Registry. + /// ``` + #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_global(&self) -> bool { + !(self.octets()[0] == 0 // "This network" + || self.is_private() + || self.is_shared() + || self.is_loopback() + || self.is_link_local() + // addresses reserved for future protocols (`192.0.0.0/24`) + ||(self.octets()[0] == 192 && self.octets()[1] == 0 && self.octets()[2] == 0) + || self.is_documentation() + || self.is_benchmarking() + || self.is_reserved() + || self.is_broadcast()) + } + + /// Returns [`true`] if this address is part of the Shared Address Space defined in + /// [IETF RFC 6598] (`100.64.0.0/10`). + /// + /// [IETF RFC 6598]: https://tools.ietf.org/html/rfc6598 + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(100, 64, 0, 0).is_shared(), true); + /// assert_eq!(Ipv4Addr::new(100, 127, 255, 255).is_shared(), true); + /// assert_eq!(Ipv4Addr::new(100, 128, 0, 0).is_shared(), false); + /// ``` + #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_shared(&self) -> bool { + self.octets()[0] == 100 && (self.octets()[1] & 0b1100_0000 == 0b0100_0000) + } + + /// Returns [`true`] if this address part of the `198.18.0.0/15` range, which is reserved for + /// network devices benchmarking. This range is defined in [IETF RFC 2544] as `192.18.0.0` + /// through `198.19.255.255` but [errata 423] corrects it to `198.18.0.0/15`. + /// + /// [IETF RFC 2544]: https://tools.ietf.org/html/rfc2544 + /// [errata 423]: https://www.rfc-editor.org/errata/eid423 + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(198, 17, 255, 255).is_benchmarking(), false); + /// assert_eq!(Ipv4Addr::new(198, 18, 0, 0).is_benchmarking(), true); + /// assert_eq!(Ipv4Addr::new(198, 19, 255, 255).is_benchmarking(), true); + /// assert_eq!(Ipv4Addr::new(198, 20, 0, 0).is_benchmarking(), false); + /// ``` + #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_benchmarking(&self) -> bool { + self.octets()[0] == 198 && (self.octets()[1] & 0xfe) == 18 + } + + /// Returns [`true`] if this address is reserved by IANA for future use. [IETF RFC 1112] + /// defines the block of reserved addresses as `240.0.0.0/4`. This range normally includes the + /// broadcast address `255.255.255.255`, but this implementation explicitly excludes it, since + /// it is obviously not reserved for future use. + /// + /// [IETF RFC 1112]: https://tools.ietf.org/html/rfc1112 + /// + /// # Warning + /// + /// As IANA assigns new addresses, this method will be + /// updated. This may result in non-reserved addresses being + /// treated as reserved in code that relies on an outdated version + /// of this method. + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(240, 0, 0, 0).is_reserved(), true); + /// assert_eq!(Ipv4Addr::new(255, 255, 255, 254).is_reserved(), true); + /// + /// assert_eq!(Ipv4Addr::new(239, 255, 255, 255).is_reserved(), false); + /// // The broadcast address is not considered as reserved for future use by this implementation + /// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_reserved(), false); + /// ``` + #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_reserved(&self) -> bool { + self.octets()[0] & 240 == 240 && !self.is_broadcast() + } + + /// Returns [`true`] if this is a multicast address (`224.0.0.0/4`). + /// + /// Multicast addresses have a most significant octet between `224` and `239`, + /// and is defined by [IETF RFC 5771]. + /// + /// [IETF RFC 5771]: https://tools.ietf.org/html/rfc5771 + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(224, 254, 0, 0).is_multicast(), true); + /// assert_eq!(Ipv4Addr::new(236, 168, 10, 65).is_multicast(), true); + /// assert_eq!(Ipv4Addr::new(172, 16, 10, 65).is_multicast(), false); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(since = "1.7.0", feature = "ip_17")] + #[must_use] + #[inline] + pub const fn is_multicast(&self) -> bool { + self.octets()[0] >= 224 && self.octets()[0] <= 239 + } + + /// Returns [`true`] if this is a broadcast address (`255.255.255.255`). + /// + /// A broadcast address has all octets set to `255` as defined in [IETF RFC 919]. + /// + /// [IETF RFC 919]: https://tools.ietf.org/html/rfc919 + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_broadcast(), true); + /// assert_eq!(Ipv4Addr::new(236, 168, 10, 65).is_broadcast(), false); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(since = "1.7.0", feature = "ip_17")] + #[must_use] + #[inline] + pub const fn is_broadcast(&self) -> bool { + u32::from_be_bytes(self.octets()) == u32::from_be_bytes(Self::BROADCAST.octets()) + } + + /// Returns [`true`] if this address is in a range designated for documentation. + /// + /// This is defined in [IETF RFC 5737]: + /// + /// - `192.0.2.0/24` (TEST-NET-1) + /// - `198.51.100.0/24` (TEST-NET-2) + /// - `203.0.113.0/24` (TEST-NET-3) + /// + /// [IETF RFC 5737]: https://tools.ietf.org/html/rfc5737 + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).is_documentation(), true); + /// assert_eq!(Ipv4Addr::new(198, 51, 100, 65).is_documentation(), true); + /// assert_eq!(Ipv4Addr::new(203, 0, 113, 6).is_documentation(), true); + /// assert_eq!(Ipv4Addr::new(193, 34, 17, 19).is_documentation(), false); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(since = "1.7.0", feature = "ip_17")] + #[must_use] + #[inline] + pub const fn is_documentation(&self) -> bool { + matches!(self.octets(), [192, 0, 2, _] | [198, 51, 100, _] | [203, 0, 113, _]) + } + + /// Converts this address to an [IPv4-compatible] [`IPv6` address]. + /// + /// `a.b.c.d` becomes `::a.b.c.d` + /// + /// Note that IPv4-compatible addresses have been officially deprecated. + /// If you don't explicitly need an IPv4-compatible address for legacy reasons, consider using `to_ipv6_mapped` instead. + /// + /// [IPv4-compatible]: Ipv6Addr#ipv4-compatible-ipv6-addresses + /// [`IPv6` address]: Ipv6Addr + /// + /// # Examples + /// + /// ``` + /// use std::net::{Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!( + /// Ipv4Addr::new(192, 0, 2, 255).to_ipv6_compatible(), + /// Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc000, 0x2ff) + /// ); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const fn to_ipv6_compatible(&self) -> Ipv6Addr { + let [a, b, c, d] = self.octets(); + Ipv6Addr { octets: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, a, b, c, d] } + } + + /// Converts this address to an [IPv4-mapped] [`IPv6` address]. + /// + /// `a.b.c.d` becomes `::ffff:a.b.c.d` + /// + /// [IPv4-mapped]: Ipv6Addr#ipv4-mapped-ipv6-addresses + /// [`IPv6` address]: Ipv6Addr + /// + /// # Examples + /// + /// ``` + /// use std::net::{Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).to_ipv6_mapped(), + /// Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x2ff)); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const fn to_ipv6_mapped(&self) -> Ipv6Addr { + let [a, b, c, d] = self.octets(); + Ipv6Addr { octets: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, a, b, c, d] } + } +} + +#[stable(feature = "ip_addr", since = "1.7.0")] +impl fmt::Display for IpAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + IpAddr::V4(ip) => ip.fmt(fmt), + IpAddr::V6(ip) => ip.fmt(fmt), + } + } +} + +#[stable(feature = "ip_addr", since = "1.7.0")] +impl fmt::Debug for IpAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} + +#[stable(feature = "ip_from_ip", since = "1.16.0")] +impl From<Ipv4Addr> for IpAddr { + /// Copies this address to a new `IpAddr::V4`. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr}; + /// + /// let addr = Ipv4Addr::new(127, 0, 0, 1); + /// + /// assert_eq!( + /// IpAddr::V4(addr), + /// IpAddr::from(addr) + /// ) + /// ``` + #[inline] + fn from(ipv4: Ipv4Addr) -> IpAddr { + IpAddr::V4(ipv4) + } +} + +#[stable(feature = "ip_from_ip", since = "1.16.0")] +impl From<Ipv6Addr> for IpAddr { + /// Copies this address to a new `IpAddr::V6`. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv6Addr}; + /// + /// let addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff); + /// + /// assert_eq!( + /// IpAddr::V6(addr), + /// IpAddr::from(addr) + /// ); + /// ``` + #[inline] + fn from(ipv6: Ipv6Addr) -> IpAddr { + IpAddr::V6(ipv6) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Display for Ipv4Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let octets = self.octets(); + + // If there are no alignment requirements, write the IP address directly to `f`. + // Otherwise, write it to a local buffer and then use `f.pad`. + if fmt.precision().is_none() && fmt.width().is_none() { + write!(fmt, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]) + } else { + const LONGEST_IPV4_ADDR: &str = "255.255.255.255"; + + let mut buf = DisplayBuffer::<{ LONGEST_IPV4_ADDR.len() }>::new(); + // Buffer is long enough for the longest possible IPv4 address, so this should never fail. + write!(buf, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]).unwrap(); + + fmt.pad(buf.as_str()) + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Debug for Ipv4Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} + +#[stable(feature = "ip_cmp", since = "1.16.0")] +impl PartialEq<Ipv4Addr> for IpAddr { + #[inline] + fn eq(&self, other: &Ipv4Addr) -> bool { + match self { + IpAddr::V4(v4) => v4 == other, + IpAddr::V6(_) => false, + } + } +} + +#[stable(feature = "ip_cmp", since = "1.16.0")] +impl PartialEq<IpAddr> for Ipv4Addr { + #[inline] + fn eq(&self, other: &IpAddr) -> bool { + match other { + IpAddr::V4(v4) => self == v4, + IpAddr::V6(_) => false, + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl PartialOrd for Ipv4Addr { + #[inline] + fn partial_cmp(&self, other: &Ipv4Addr) -> Option<Ordering> { + Some(self.cmp(other)) + } +} + +#[stable(feature = "ip_cmp", since = "1.16.0")] +impl PartialOrd<Ipv4Addr> for IpAddr { + #[inline] + fn partial_cmp(&self, other: &Ipv4Addr) -> Option<Ordering> { + match self { + IpAddr::V4(v4) => v4.partial_cmp(other), + IpAddr::V6(_) => Some(Ordering::Greater), + } + } +} + +#[stable(feature = "ip_cmp", since = "1.16.0")] +impl PartialOrd<IpAddr> for Ipv4Addr { + #[inline] + fn partial_cmp(&self, other: &IpAddr) -> Option<Ordering> { + match other { + IpAddr::V4(v4) => self.partial_cmp(v4), + IpAddr::V6(_) => Some(Ordering::Less), + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Ord for Ipv4Addr { + #[inline] + fn cmp(&self, other: &Ipv4Addr) -> Ordering { + self.octets.cmp(&other.octets) + } +} + +#[stable(feature = "ip_u32", since = "1.1.0")] +impl From<Ipv4Addr> for u32 { + /// Converts an `Ipv4Addr` into a host byte order `u32`. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// let addr = Ipv4Addr::new(0x12, 0x34, 0x56, 0x78); + /// assert_eq!(0x12345678, u32::from(addr)); + /// ``` + #[inline] + fn from(ip: Ipv4Addr) -> u32 { + u32::from_be_bytes(ip.octets) + } +} + +#[stable(feature = "ip_u32", since = "1.1.0")] +impl From<u32> for Ipv4Addr { + /// Converts a host byte order `u32` into an `Ipv4Addr`. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// let addr = Ipv4Addr::from(0x12345678); + /// assert_eq!(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78), addr); + /// ``` + #[inline] + fn from(ip: u32) -> Ipv4Addr { + Ipv4Addr { octets: ip.to_be_bytes() } + } +} + +#[stable(feature = "from_slice_v4", since = "1.9.0")] +impl From<[u8; 4]> for Ipv4Addr { + /// Creates an `Ipv4Addr` from a four element byte array. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv4Addr; + /// + /// let addr = Ipv4Addr::from([13u8, 12u8, 11u8, 10u8]); + /// assert_eq!(Ipv4Addr::new(13, 12, 11, 10), addr); + /// ``` + #[inline] + fn from(octets: [u8; 4]) -> Ipv4Addr { + Ipv4Addr { octets } + } +} + +#[stable(feature = "ip_from_slice", since = "1.17.0")] +impl From<[u8; 4]> for IpAddr { + /// Creates an `IpAddr::V4` from a four element byte array. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr}; + /// + /// let addr = IpAddr::from([13u8, 12u8, 11u8, 10u8]); + /// assert_eq!(IpAddr::V4(Ipv4Addr::new(13, 12, 11, 10)), addr); + /// ``` + #[inline] + fn from(octets: [u8; 4]) -> IpAddr { + IpAddr::V4(Ipv4Addr::from(octets)) + } +} + +impl Ipv6Addr { + /// Creates a new IPv6 address from eight 16-bit segments. + /// + /// The result will represent the IP address `a:b:c:d:e:f:g:h`. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// let addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff); + /// ``` + #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")] + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use] + #[inline] + pub const fn new(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, h: u16) -> Ipv6Addr { + let addr16 = [ + a.to_be(), + b.to_be(), + c.to_be(), + d.to_be(), + e.to_be(), + f.to_be(), + g.to_be(), + h.to_be(), + ]; + Ipv6Addr { + // All elements in `addr16` are big endian. + // SAFETY: `[u16; 8]` is always safe to transmute to `[u8; 16]`. + octets: unsafe { transmute::<_, [u8; 16]>(addr16) }, + } + } + + /// An IPv6 address representing localhost: `::1`. + /// + /// This corresponds to constant `IN6ADDR_LOOPBACK_INIT` or `in6addr_loopback` in other + /// languages. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// let addr = Ipv6Addr::LOCALHOST; + /// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)); + /// ``` + #[doc(alias = "IN6ADDR_LOOPBACK_INIT")] + #[doc(alias = "in6addr_loopback")] + #[stable(feature = "ip_constructors", since = "1.30.0")] + pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + + /// An IPv6 address representing the unspecified address: `::` + /// + /// This corresponds to constant `IN6ADDR_ANY_INIT` or `in6addr_any` in other languages. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// let addr = Ipv6Addr::UNSPECIFIED; + /// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)); + /// ``` + #[doc(alias = "IN6ADDR_ANY_INIT")] + #[doc(alias = "in6addr_any")] + #[stable(feature = "ip_constructors", since = "1.30.0")] + pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0); + + /// Returns the eight 16-bit segments that make up this address. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).segments(), + /// [0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff]); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use] + #[inline] + pub const fn segments(&self) -> [u16; 8] { + // All elements in `self.octets` must be big endian. + // SAFETY: `[u8; 16]` is always safe to transmute to `[u16; 8]`. + let [a, b, c, d, e, f, g, h] = unsafe { transmute::<_, [u16; 8]>(self.octets) }; + // We want native endian u16 + [ + u16::from_be(a), + u16::from_be(b), + u16::from_be(c), + u16::from_be(d), + u16::from_be(e), + u16::from_be(f), + u16::from_be(g), + u16::from_be(h), + ] + } + + /// Returns [`true`] for the special 'unspecified' address (`::`). + /// + /// This property is defined in [IETF RFC 4291]. + /// + /// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291 + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unspecified(), false); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).is_unspecified(), true); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(since = "1.7.0", feature = "ip_17")] + #[must_use] + #[inline] + pub const fn is_unspecified(&self) -> bool { + u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::UNSPECIFIED.octets()) + } + + /// Returns [`true`] if this is the [loopback address] (`::1`), + /// as defined in [IETF RFC 4291 section 2.5.3]. + /// + /// Contrary to IPv4, in IPv6 there is only one loopback address. + /// + /// [loopback address]: Ipv6Addr::LOCALHOST + /// [IETF RFC 4291 section 2.5.3]: https://tools.ietf.org/html/rfc4291#section-2.5.3 + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_loopback(), false); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1).is_loopback(), true); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(since = "1.7.0", feature = "ip_17")] + #[must_use] + #[inline] + pub const fn is_loopback(&self) -> bool { + u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::LOCALHOST.octets()) + } + + /// Returns [`true`] if the address appears to be globally reachable + /// as specified by the [IANA IPv6 Special-Purpose Address Registry]. + /// Whether or not an address is practically reachable will depend on your network configuration. + /// + /// Most IPv6 addresses are globally reachable; + /// unless they are specifically defined as *not* globally reachable. + /// + /// Non-exhaustive list of notable addresses that are not globally reachable: + /// - The [unspecified address] ([`is_unspecified`](Ipv6Addr::is_unspecified)) + /// - The [loopback address] ([`is_loopback`](Ipv6Addr::is_loopback)) + /// - IPv4-mapped addresses + /// - Addresses reserved for benchmarking + /// - Addresses reserved for documentation ([`is_documentation`](Ipv6Addr::is_documentation)) + /// - Unique local addresses ([`is_unique_local`](Ipv6Addr::is_unique_local)) + /// - Unicast addresses with link-local scope ([`is_unicast_link_local`](Ipv6Addr::is_unicast_link_local)) + /// + /// For the complete overview of which addresses are globally reachable, see the table at the [IANA IPv6 Special-Purpose Address Registry]. + /// + /// Note that an address having global scope is not the same as being globally reachable, + /// and there is no direct relation between the two concepts: There exist addresses with global scope + /// that are not globally reachable (for example unique local addresses), + /// and addresses that are globally reachable without having global scope + /// (multicast addresses with non-global scope). + /// + /// [IANA IPv6 Special-Purpose Address Registry]: https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml + /// [unspecified address]: Ipv6Addr::UNSPECIFIED + /// [loopback address]: Ipv6Addr::LOCALHOST + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::Ipv6Addr; + /// + /// // Most IPv6 addresses are globally reachable: + /// assert_eq!(Ipv6Addr::new(0x26, 0, 0x1c9, 0, 0, 0xafc8, 0x10, 0x1).is_global(), true); + /// + /// // However some addresses have been assigned a special meaning + /// // that makes them not globally reachable. Some examples are: + /// + /// // The unspecified address (`::`) + /// assert_eq!(Ipv6Addr::UNSPECIFIED.is_global(), false); + /// + /// // The loopback address (`::1`) + /// assert_eq!(Ipv6Addr::LOCALHOST.is_global(), false); + /// + /// // IPv4-mapped addresses (`::ffff:0:0/96`) + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_global(), false); + /// + /// // Addresses reserved for benchmarking (`2001:2::/48`) + /// assert_eq!(Ipv6Addr::new(0x2001, 2, 0, 0, 0, 0, 0, 1,).is_global(), false); + /// + /// // Addresses reserved for documentation (`2001:db8::/32`) + /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1).is_global(), false); + /// + /// // Unique local addresses (`fc00::/7`) + /// assert_eq!(Ipv6Addr::new(0xfc02, 0, 0, 0, 0, 0, 0, 1).is_global(), false); + /// + /// // Unicast addresses with link-local scope (`fe80::/10`) + /// assert_eq!(Ipv6Addr::new(0xfe81, 0, 0, 0, 0, 0, 0, 1).is_global(), false); + /// + /// // For a complete overview see the IANA IPv6 Special-Purpose Address Registry. + /// ``` + #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_global(&self) -> bool { + !(self.is_unspecified() + || self.is_loopback() + // IPv4-mapped Address (`::ffff:0:0/96`) + || matches!(self.segments(), [0, 0, 0, 0, 0, 0xffff, _, _]) + // IPv4-IPv6 Translat. (`64:ff9b:1::/48`) + || matches!(self.segments(), [0x64, 0xff9b, 1, _, _, _, _, _]) + // Discard-Only Address Block (`100::/64`) + || matches!(self.segments(), [0x100, 0, 0, 0, _, _, _, _]) + // IETF Protocol Assignments (`2001::/23`) + || (matches!(self.segments(), [0x2001, b, _, _, _, _, _, _] if b < 0x200) + && !( + // Port Control Protocol Anycast (`2001:1::1`) + u128::from_be_bytes(self.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0001 + // Traversal Using Relays around NAT Anycast (`2001:1::2`) + || u128::from_be_bytes(self.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0002 + // AMT (`2001:3::/32`) + || matches!(self.segments(), [0x2001, 3, _, _, _, _, _, _]) + // AS112-v6 (`2001:4:112::/48`) + || matches!(self.segments(), [0x2001, 4, 0x112, _, _, _, _, _]) + // ORCHIDv2 (`2001:20::/28`) + || matches!(self.segments(), [0x2001, b, _, _, _, _, _, _] if b >= 0x20 && b <= 0x2F) + )) + || self.is_documentation() + || self.is_unique_local() + || self.is_unicast_link_local()) + } + + /// Returns [`true`] if this is a unique local address (`fc00::/7`). + /// + /// This property is defined in [IETF RFC 4193]. + /// + /// [IETF RFC 4193]: https://tools.ietf.org/html/rfc4193 + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unique_local(), false); + /// assert_eq!(Ipv6Addr::new(0xfc02, 0, 0, 0, 0, 0, 0, 0).is_unique_local(), true); + /// ``` + #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_unique_local(&self) -> bool { + (self.segments()[0] & 0xfe00) == 0xfc00 + } + + /// Returns [`true`] if this is a unicast address, as defined by [IETF RFC 4291]. + /// Any address that is not a [multicast address] (`ff00::/8`) is unicast. + /// + /// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291 + /// [multicast address]: Ipv6Addr::is_multicast + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::Ipv6Addr; + /// + /// // The unspecified and loopback addresses are unicast. + /// assert_eq!(Ipv6Addr::UNSPECIFIED.is_unicast(), true); + /// assert_eq!(Ipv6Addr::LOCALHOST.is_unicast(), true); + /// + /// // Any address that is not a multicast address (`ff00::/8`) is unicast. + /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast(), true); + /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).is_unicast(), false); + /// ``` + #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_unicast(&self) -> bool { + !self.is_multicast() + } + + /// Returns `true` if the address is a unicast address with link-local scope, + /// as defined in [RFC 4291]. + /// + /// A unicast address has link-local scope if it has the prefix `fe80::/10`, as per [RFC 4291 section 2.4]. + /// Note that this encompasses more addresses than those defined in [RFC 4291 section 2.5.6], + /// which describes "Link-Local IPv6 Unicast Addresses" as having the following stricter format: + /// + /// ```text + /// | 10 bits | 54 bits | 64 bits | + /// +----------+-------------------------+----------------------------+ + /// |1111111010| 0 | interface ID | + /// +----------+-------------------------+----------------------------+ + /// ``` + /// So while currently the only addresses with link-local scope an application will encounter are all in `fe80::/64`, + /// this might change in the future with the publication of new standards. More addresses in `fe80::/10` could be allocated, + /// and those addresses will have link-local scope. + /// + /// Also note that while [RFC 4291 section 2.5.3] mentions about the [loopback address] (`::1`) that "it is treated as having Link-Local scope", + /// this does not mean that the loopback address actually has link-local scope and this method will return `false` on it. + /// + /// [RFC 4291]: https://tools.ietf.org/html/rfc4291 + /// [RFC 4291 section 2.4]: https://tools.ietf.org/html/rfc4291#section-2.4 + /// [RFC 4291 section 2.5.3]: https://tools.ietf.org/html/rfc4291#section-2.5.3 + /// [RFC 4291 section 2.5.6]: https://tools.ietf.org/html/rfc4291#section-2.5.6 + /// [loopback address]: Ipv6Addr::LOCALHOST + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::Ipv6Addr; + /// + /// // The loopback address (`::1`) does not actually have link-local scope. + /// assert_eq!(Ipv6Addr::LOCALHOST.is_unicast_link_local(), false); + /// + /// // Only addresses in `fe80::/10` have link-local scope. + /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast_link_local(), false); + /// assert_eq!(Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 0).is_unicast_link_local(), true); + /// + /// // Addresses outside the stricter `fe80::/64` also have link-local scope. + /// assert_eq!(Ipv6Addr::new(0xfe80, 0, 0, 1, 0, 0, 0, 0).is_unicast_link_local(), true); + /// assert_eq!(Ipv6Addr::new(0xfe81, 0, 0, 0, 0, 0, 0, 0).is_unicast_link_local(), true); + /// ``` + #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_unicast_link_local(&self) -> bool { + (self.segments()[0] & 0xffc0) == 0xfe80 + } + + /// Returns [`true`] if this is an address reserved for documentation + /// (`2001:db8::/32`). + /// + /// This property is defined in [IETF RFC 3849]. + /// + /// [IETF RFC 3849]: https://tools.ietf.org/html/rfc3849 + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_documentation(), false); + /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_documentation(), true); + /// ``` + #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_documentation(&self) -> bool { + (self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8) + } + + /// Returns [`true`] if this is an address reserved for benchmarking (`2001:2::/48`). + /// + /// This property is defined in [IETF RFC 5180], where it is mistakenly specified as covering the range `2001:0200::/48`. + /// This is corrected in [IETF RFC Errata 1752] to `2001:0002::/48`. + /// + /// [IETF RFC 5180]: https://tools.ietf.org/html/rfc5180 + /// [IETF RFC Errata 1752]: https://www.rfc-editor.org/errata_search.php?eid=1752 + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc613, 0x0).is_benchmarking(), false); + /// assert_eq!(Ipv6Addr::new(0x2001, 0x2, 0, 0, 0, 0, 0, 0).is_benchmarking(), true); + /// ``` + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_benchmarking(&self) -> bool { + (self.segments()[0] == 0x2001) && (self.segments()[1] == 0x2) && (self.segments()[2] == 0) + } + + /// Returns [`true`] if the address is a globally routable unicast address. + /// + /// The following return false: + /// + /// - the loopback address + /// - the link-local addresses + /// - unique local addresses + /// - the unspecified address + /// - the address range reserved for documentation + /// + /// This method returns [`true`] for site-local addresses as per [RFC 4291 section 2.5.7] + /// + /// ```no_rust + /// The special behavior of [the site-local unicast] prefix defined in [RFC3513] must no longer + /// be supported in new implementations (i.e., new implementations must treat this prefix as + /// Global Unicast). + /// ``` + /// + /// [RFC 4291 section 2.5.7]: https://tools.ietf.org/html/rfc4291#section-2.5.7 + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast_global(), false); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unicast_global(), true); + /// ``` + #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn is_unicast_global(&self) -> bool { + self.is_unicast() + && !self.is_loopback() + && !self.is_unicast_link_local() + && !self.is_unique_local() + && !self.is_unspecified() + && !self.is_documentation() + && !self.is_benchmarking() + } + + /// Returns the address's multicast scope if the address is multicast. + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// + /// use std::net::{Ipv6Addr, Ipv6MulticastScope}; + /// + /// assert_eq!( + /// Ipv6Addr::new(0xff0e, 0, 0, 0, 0, 0, 0, 0).multicast_scope(), + /// Some(Ipv6MulticastScope::Global) + /// ); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).multicast_scope(), None); + /// ``` + #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use] + #[inline] + pub const fn multicast_scope(&self) -> Option<Ipv6MulticastScope> { + if self.is_multicast() { + match self.segments()[0] & 0x000f { + 1 => Some(Ipv6MulticastScope::InterfaceLocal), + 2 => Some(Ipv6MulticastScope::LinkLocal), + 3 => Some(Ipv6MulticastScope::RealmLocal), + 4 => Some(Ipv6MulticastScope::AdminLocal), + 5 => Some(Ipv6MulticastScope::SiteLocal), + 8 => Some(Ipv6MulticastScope::OrganizationLocal), + 14 => Some(Ipv6MulticastScope::Global), + _ => None, + } + } else { + None + } + } + + /// Returns [`true`] if this is a multicast address (`ff00::/8`). + /// + /// This property is defined by [IETF RFC 4291]. + /// + /// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291 + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).is_multicast(), true); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_multicast(), false); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(since = "1.7.0", feature = "ip_17")] + #[must_use] + #[inline] + pub const fn is_multicast(&self) -> bool { + (self.segments()[0] & 0xff00) == 0xff00 + } + + /// Converts this address to an [`IPv4` address] if it's an [IPv4-mapped] address, + /// as defined in [IETF RFC 4291 section 2.5.5.2], otherwise returns [`None`]. + /// + /// `::ffff:a.b.c.d` becomes `a.b.c.d`. + /// All addresses *not* starting with `::ffff` will return `None`. + /// + /// [`IPv4` address]: Ipv4Addr + /// [IPv4-mapped]: Ipv6Addr + /// [IETF RFC 4291 section 2.5.5.2]: https://tools.ietf.org/html/rfc4291#section-2.5.5.2 + /// + /// # Examples + /// + /// ``` + /// use std::net::{Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).to_ipv4_mapped(), None); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).to_ipv4_mapped(), + /// Some(Ipv4Addr::new(192, 10, 2, 255))); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_ipv4_mapped(), None); + /// ``` + #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] + #[stable(feature = "ipv6_to_ipv4_mapped", since = "1.63.0")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const fn to_ipv4_mapped(&self) -> Option<Ipv4Addr> { + match self.octets() { + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => { + Some(Ipv4Addr::new(a, b, c, d)) + } + _ => None, + } + } + + /// Converts this address to an [`IPv4` address] if it is either + /// an [IPv4-compatible] address as defined in [IETF RFC 4291 section 2.5.5.1], + /// or an [IPv4-mapped] address as defined in [IETF RFC 4291 section 2.5.5.2], + /// otherwise returns [`None`]. + /// + /// Note that this will return an [`IPv4` address] for the IPv6 loopback address `::1`. Use + /// [`Ipv6Addr::to_ipv4_mapped`] to avoid this. + /// + /// `::a.b.c.d` and `::ffff:a.b.c.d` become `a.b.c.d`. `::1` becomes `0.0.0.1`. + /// All addresses *not* starting with either all zeroes or `::ffff` will return `None`. + /// + /// [`IPv4` address]: Ipv4Addr + /// [IPv4-compatible]: Ipv6Addr#ipv4-compatible-ipv6-addresses + /// [IPv4-mapped]: Ipv6Addr#ipv4-mapped-ipv6-addresses + /// [IETF RFC 4291 section 2.5.5.1]: https://tools.ietf.org/html/rfc4291#section-2.5.5.1 + /// [IETF RFC 4291 section 2.5.5.2]: https://tools.ietf.org/html/rfc4291#section-2.5.5.2 + /// + /// # Examples + /// + /// ``` + /// use std::net::{Ipv4Addr, Ipv6Addr}; + /// + /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).to_ipv4(), None); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).to_ipv4(), + /// Some(Ipv4Addr::new(192, 10, 2, 255))); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_ipv4(), + /// Some(Ipv4Addr::new(0, 0, 0, 1))); + /// ``` + #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const fn to_ipv4(&self) -> Option<Ipv4Addr> { + if let [0, 0, 0, 0, 0, 0 | 0xffff, ab, cd] = self.segments() { + let [a, b] = ab.to_be_bytes(); + let [c, d] = cd.to_be_bytes(); + Some(Ipv4Addr::new(a, b, c, d)) + } else { + None + } + } + + /// Converts this address to an `IpAddr::V4` if it is an IPv4-mapped addresses, otherwise it + /// returns self wrapped in an `IpAddr::V6`. + /// + /// # Examples + /// + /// ``` + /// #![feature(ip)] + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1).is_loopback(), false); + /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1).to_canonical().is_loopback(), true); + /// ``` + #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] + #[unstable(feature = "ip", issue = "27709")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const fn to_canonical(&self) -> IpAddr { + if let Some(mapped) = self.to_ipv4_mapped() { + return IpAddr::V4(mapped); + } + IpAddr::V6(*self) + } + + /// Returns the sixteen eight-bit integers the IPv6 address consists of. + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).octets(), + /// [255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + /// ``` + #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")] + #[stable(feature = "ipv6_to_octets", since = "1.12.0")] + #[must_use] + #[inline] + pub const fn octets(&self) -> [u8; 16] { + self.octets + } +} + +/// Write an Ipv6Addr, conforming to the canonical style described by +/// [RFC 5952](https://tools.ietf.org/html/rfc5952). +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Display for Ipv6Addr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // If there are no alignment requirements, write the IP address directly to `f`. + // Otherwise, write it to a local buffer and then use `f.pad`. + if f.precision().is_none() && f.width().is_none() { + let segments = self.segments(); + + // Special case for :: and ::1; otherwise they get written with the + // IPv4 formatter + if self.is_unspecified() { + f.write_str("::") + } else if self.is_loopback() { + f.write_str("::1") + } else if let Some(ipv4) = self.to_ipv4() { + match segments[5] { + // IPv4 Compatible address + 0 => write!(f, "::{}", ipv4), + // IPv4 Mapped address + 0xffff => write!(f, "::ffff:{}", ipv4), + _ => unreachable!(), + } + } else { + #[derive(Copy, Clone, Default)] + struct Span { + start: usize, + len: usize, + } + + // Find the inner 0 span + let zeroes = { + let mut longest = Span::default(); + let mut current = Span::default(); + + for (i, &segment) in segments.iter().enumerate() { + if segment == 0 { + if current.len == 0 { + current.start = i; + } + + current.len += 1; + + if current.len > longest.len { + longest = current; + } + } else { + current = Span::default(); + } + } + + longest + }; + + /// Write a colon-separated part of the address + #[inline] + fn fmt_subslice(f: &mut fmt::Formatter<'_>, chunk: &[u16]) -> fmt::Result { + if let Some((first, tail)) = chunk.split_first() { + write!(f, "{:x}", first)?; + for segment in tail { + f.write_char(':')?; + write!(f, "{:x}", segment)?; + } + } + Ok(()) + } + + if zeroes.len > 1 { + fmt_subslice(f, &segments[..zeroes.start])?; + f.write_str("::")?; + fmt_subslice(f, &segments[zeroes.start + zeroes.len..]) + } else { + fmt_subslice(f, &segments) + } + } + } else { + const LONGEST_IPV6_ADDR: &str = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"; + + let mut buf = DisplayBuffer::<{ LONGEST_IPV6_ADDR.len() }>::new(); + // Buffer is long enough for the longest possible IPv6 address, so this should never fail. + write!(buf, "{}", self).unwrap(); + + f.pad(buf.as_str()) + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Debug for Ipv6Addr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} + +#[stable(feature = "ip_cmp", since = "1.16.0")] +impl PartialEq<IpAddr> for Ipv6Addr { + #[inline] + fn eq(&self, other: &IpAddr) -> bool { + match other { + IpAddr::V4(_) => false, + IpAddr::V6(v6) => self == v6, + } + } +} + +#[stable(feature = "ip_cmp", since = "1.16.0")] +impl PartialEq<Ipv6Addr> for IpAddr { + #[inline] + fn eq(&self, other: &Ipv6Addr) -> bool { + match self { + IpAddr::V4(_) => false, + IpAddr::V6(v6) => v6 == other, + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl PartialOrd for Ipv6Addr { + #[inline] + fn partial_cmp(&self, other: &Ipv6Addr) -> Option<Ordering> { + Some(self.cmp(other)) + } +} + +#[stable(feature = "ip_cmp", since = "1.16.0")] +impl PartialOrd<Ipv6Addr> for IpAddr { + #[inline] + fn partial_cmp(&self, other: &Ipv6Addr) -> Option<Ordering> { + match self { + IpAddr::V4(_) => Some(Ordering::Less), + IpAddr::V6(v6) => v6.partial_cmp(other), + } + } +} + +#[stable(feature = "ip_cmp", since = "1.16.0")] +impl PartialOrd<IpAddr> for Ipv6Addr { + #[inline] + fn partial_cmp(&self, other: &IpAddr) -> Option<Ordering> { + match other { + IpAddr::V4(_) => Some(Ordering::Greater), + IpAddr::V6(v6) => self.partial_cmp(v6), + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Ord for Ipv6Addr { + #[inline] + fn cmp(&self, other: &Ipv6Addr) -> Ordering { + self.segments().cmp(&other.segments()) + } +} + +#[stable(feature = "i128", since = "1.26.0")] +impl From<Ipv6Addr> for u128 { + /// Convert an `Ipv6Addr` into a host byte order `u128`. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// let addr = Ipv6Addr::new( + /// 0x1020, 0x3040, 0x5060, 0x7080, + /// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D, + /// ); + /// assert_eq!(0x102030405060708090A0B0C0D0E0F00D_u128, u128::from(addr)); + /// ``` + #[inline] + fn from(ip: Ipv6Addr) -> u128 { + u128::from_be_bytes(ip.octets) + } +} +#[stable(feature = "i128", since = "1.26.0")] +impl From<u128> for Ipv6Addr { + /// Convert a host byte order `u128` into an `Ipv6Addr`. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// let addr = Ipv6Addr::from(0x102030405060708090A0B0C0D0E0F00D_u128); + /// assert_eq!( + /// Ipv6Addr::new( + /// 0x1020, 0x3040, 0x5060, 0x7080, + /// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D, + /// ), + /// addr); + /// ``` + #[inline] + fn from(ip: u128) -> Ipv6Addr { + Ipv6Addr::from(ip.to_be_bytes()) + } +} + +#[stable(feature = "ipv6_from_octets", since = "1.9.0")] +impl From<[u8; 16]> for Ipv6Addr { + /// Creates an `Ipv6Addr` from a sixteen element byte array. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// let addr = Ipv6Addr::from([ + /// 25u8, 24u8, 23u8, 22u8, 21u8, 20u8, 19u8, 18u8, + /// 17u8, 16u8, 15u8, 14u8, 13u8, 12u8, 11u8, 10u8, + /// ]); + /// assert_eq!( + /// Ipv6Addr::new( + /// 0x1918, 0x1716, + /// 0x1514, 0x1312, + /// 0x1110, 0x0f0e, + /// 0x0d0c, 0x0b0a + /// ), + /// addr + /// ); + /// ``` + #[inline] + fn from(octets: [u8; 16]) -> Ipv6Addr { + Ipv6Addr { octets } + } +} + +#[stable(feature = "ipv6_from_segments", since = "1.16.0")] +impl From<[u16; 8]> for Ipv6Addr { + /// Creates an `Ipv6Addr` from an eight element 16-bit array. + /// + /// # Examples + /// + /// ``` + /// use std::net::Ipv6Addr; + /// + /// let addr = Ipv6Addr::from([ + /// 525u16, 524u16, 523u16, 522u16, + /// 521u16, 520u16, 519u16, 518u16, + /// ]); + /// assert_eq!( + /// Ipv6Addr::new( + /// 0x20d, 0x20c, + /// 0x20b, 0x20a, + /// 0x209, 0x208, + /// 0x207, 0x206 + /// ), + /// addr + /// ); + /// ``` + #[inline] + fn from(segments: [u16; 8]) -> Ipv6Addr { + let [a, b, c, d, e, f, g, h] = segments; + Ipv6Addr::new(a, b, c, d, e, f, g, h) + } +} + +#[stable(feature = "ip_from_slice", since = "1.17.0")] +impl From<[u8; 16]> for IpAddr { + /// Creates an `IpAddr::V6` from a sixteen element byte array. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv6Addr}; + /// + /// let addr = IpAddr::from([ + /// 25u8, 24u8, 23u8, 22u8, 21u8, 20u8, 19u8, 18u8, + /// 17u8, 16u8, 15u8, 14u8, 13u8, 12u8, 11u8, 10u8, + /// ]); + /// assert_eq!( + /// IpAddr::V6(Ipv6Addr::new( + /// 0x1918, 0x1716, + /// 0x1514, 0x1312, + /// 0x1110, 0x0f0e, + /// 0x0d0c, 0x0b0a + /// )), + /// addr + /// ); + /// ``` + #[inline] + fn from(octets: [u8; 16]) -> IpAddr { + IpAddr::V6(Ipv6Addr::from(octets)) + } +} + +#[stable(feature = "ip_from_slice", since = "1.17.0")] +impl From<[u16; 8]> for IpAddr { + /// Creates an `IpAddr::V6` from an eight element 16-bit array. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv6Addr}; + /// + /// let addr = IpAddr::from([ + /// 525u16, 524u16, 523u16, 522u16, + /// 521u16, 520u16, 519u16, 518u16, + /// ]); + /// assert_eq!( + /// IpAddr::V6(Ipv6Addr::new( + /// 0x20d, 0x20c, + /// 0x20b, 0x20a, + /// 0x209, 0x208, + /// 0x207, 0x206 + /// )), + /// addr + /// ); + /// ``` + #[inline] + fn from(segments: [u16; 8]) -> IpAddr { + IpAddr::V6(Ipv6Addr::from(segments)) + } +} diff --git a/library/core/src/net/mod.rs b/library/core/src/net/mod.rs new file mode 100644 index 000000000..31f5f5d3c --- /dev/null +++ b/library/core/src/net/mod.rs @@ -0,0 +1,24 @@ +//! Networking primitives for IP communication. +//! +//! This module provides types for IP and socket addresses. +//! +//! # Organization +//! +//! * [`IpAddr`] represents IP addresses of either IPv4 or IPv6; [`Ipv4Addr`] and +//! [`Ipv6Addr`] are respectively IPv4 and IPv6 addresses +//! * [`SocketAddr`] represents socket addresses of either IPv4 or IPv6; [`SocketAddrV4`] +//! and [`SocketAddrV6`] are respectively IPv4 and IPv6 socket addresses + +#![unstable(feature = "ip_in_core", issue = "108443")] + +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::ip_addr::{IpAddr, Ipv4Addr, Ipv6Addr, Ipv6MulticastScope}; +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::parser::AddrParseError; +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::socket_addr::{SocketAddr, SocketAddrV4, SocketAddrV6}; + +mod display_buffer; +mod ip_addr; +mod parser; +mod socket_addr; diff --git a/library/std/src/net/parser.rs b/library/core/src/net/parser.rs index a38031c48..a08d2792d 100644 --- a/library/std/src/net/parser.rs +++ b/library/core/src/net/parser.rs @@ -3,9 +3,7 @@ //! This module is "publicly exported" through the `FromStr` implementations //! below. -#[cfg(test)] -mod tests; - +use crate::convert::TryInto; use crate::error::Error; use crate::fmt; use crate::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; diff --git a/library/core/src/net/socket_addr.rs b/library/core/src/net/socket_addr.rs new file mode 100644 index 000000000..2d48e2715 --- /dev/null +++ b/library/core/src/net/socket_addr.rs @@ -0,0 +1,664 @@ +use crate::cmp::Ordering; +use crate::fmt::{self, Write}; +use crate::hash; +use crate::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + +use super::display_buffer::DisplayBuffer; + +/// An internet socket address, either IPv4 or IPv6. +/// +/// Internet socket addresses consist of an [IP address], a 16-bit port number, as well +/// as possibly some version-dependent additional information. See [`SocketAddrV4`]'s and +/// [`SocketAddrV6`]'s respective documentation for more details. +/// +/// The size of a `SocketAddr` instance may vary depending on the target operating +/// system. +/// +/// [IP address]: IpAddr +/// +/// # Examples +/// +/// ``` +/// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +/// +/// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); +/// +/// assert_eq!("127.0.0.1:8080".parse(), Ok(socket)); +/// assert_eq!(socket.port(), 8080); +/// assert_eq!(socket.is_ipv4(), true); +/// ``` +#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[stable(feature = "rust1", since = "1.0.0")] +pub enum SocketAddr { + /// An IPv4 socket address. + #[stable(feature = "rust1", since = "1.0.0")] + V4(#[stable(feature = "rust1", since = "1.0.0")] SocketAddrV4), + /// An IPv6 socket address. + #[stable(feature = "rust1", since = "1.0.0")] + V6(#[stable(feature = "rust1", since = "1.0.0")] SocketAddrV6), +} + +/// An IPv4 socket address. +/// +/// IPv4 socket addresses consist of an [`IPv4` address] and a 16-bit port number, as +/// stated in [IETF RFC 793]. +/// +/// See [`SocketAddr`] for a type encompassing both IPv4 and IPv6 socket addresses. +/// +/// The size of a `SocketAddrV4` struct may vary depending on the target operating +/// system. Do not assume that this type has the same memory layout as the underlying +/// system representation. +/// +/// [IETF RFC 793]: https://tools.ietf.org/html/rfc793 +/// [`IPv4` address]: Ipv4Addr +/// +/// # Examples +/// +/// ``` +/// use std::net::{Ipv4Addr, SocketAddrV4}; +/// +/// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080); +/// +/// assert_eq!("127.0.0.1:8080".parse(), Ok(socket)); +/// assert_eq!(socket.ip(), &Ipv4Addr::new(127, 0, 0, 1)); +/// assert_eq!(socket.port(), 8080); +/// ``` +#[derive(Copy, Clone, Eq, PartialEq)] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct SocketAddrV4 { + ip: Ipv4Addr, + port: u16, +} + +/// An IPv6 socket address. +/// +/// IPv6 socket addresses consist of an [`IPv6` address], a 16-bit port number, as well +/// as fields containing the traffic class, the flow label, and a scope identifier +/// (see [IETF RFC 2553, Section 3.3] for more details). +/// +/// See [`SocketAddr`] for a type encompassing both IPv4 and IPv6 socket addresses. +/// +/// The size of a `SocketAddrV6` struct may vary depending on the target operating +/// system. Do not assume that this type has the same memory layout as the underlying +/// system representation. +/// +/// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3 +/// [`IPv6` address]: Ipv6Addr +/// +/// # Examples +/// +/// ``` +/// use std::net::{Ipv6Addr, SocketAddrV6}; +/// +/// let socket = SocketAddrV6::new(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1), 8080, 0, 0); +/// +/// assert_eq!("[2001:db8::1]:8080".parse(), Ok(socket)); +/// assert_eq!(socket.ip(), &Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1)); +/// assert_eq!(socket.port(), 8080); +/// ``` +#[derive(Copy, Clone, Eq, PartialEq)] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct SocketAddrV6 { + ip: Ipv6Addr, + port: u16, + flowinfo: u32, + scope_id: u32, +} + +impl SocketAddr { + /// Creates a new socket address from an [IP address] and a port number. + /// + /// [IP address]: IpAddr + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + /// + /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + /// assert_eq!(socket.ip(), IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); + /// assert_eq!(socket.port(), 8080); + /// ``` + #[stable(feature = "ip_addr", since = "1.7.0")] + #[must_use] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn new(ip: IpAddr, port: u16) -> SocketAddr { + match ip { + IpAddr::V4(a) => SocketAddr::V4(SocketAddrV4::new(a, port)), + IpAddr::V6(a) => SocketAddr::V6(SocketAddrV6::new(a, port, 0, 0)), + } + } + + /// Returns the IP address associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + /// + /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + /// assert_eq!(socket.ip(), IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); + /// ``` + #[must_use] + #[stable(feature = "ip_addr", since = "1.7.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn ip(&self) -> IpAddr { + match *self { + SocketAddr::V4(ref a) => IpAddr::V4(*a.ip()), + SocketAddr::V6(ref a) => IpAddr::V6(*a.ip()), + } + } + + /// Changes the IP address associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + /// + /// let mut socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + /// socket.set_ip(IpAddr::V4(Ipv4Addr::new(10, 10, 0, 1))); + /// assert_eq!(socket.ip(), IpAddr::V4(Ipv4Addr::new(10, 10, 0, 1))); + /// ``` + #[stable(feature = "sockaddr_setters", since = "1.9.0")] + pub fn set_ip(&mut self, new_ip: IpAddr) { + // `match (*self, new_ip)` would have us mutate a copy of self only to throw it away. + match (self, new_ip) { + (&mut SocketAddr::V4(ref mut a), IpAddr::V4(new_ip)) => a.set_ip(new_ip), + (&mut SocketAddr::V6(ref mut a), IpAddr::V6(new_ip)) => a.set_ip(new_ip), + (self_, new_ip) => *self_ = Self::new(new_ip, self_.port()), + } + } + + /// Returns the port number associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + /// + /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + /// assert_eq!(socket.port(), 8080); + /// ``` + #[must_use] + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn port(&self) -> u16 { + match *self { + SocketAddr::V4(ref a) => a.port(), + SocketAddr::V6(ref a) => a.port(), + } + } + + /// Changes the port number associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + /// + /// let mut socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + /// socket.set_port(1025); + /// assert_eq!(socket.port(), 1025); + /// ``` + #[stable(feature = "sockaddr_setters", since = "1.9.0")] + pub fn set_port(&mut self, new_port: u16) { + match *self { + SocketAddr::V4(ref mut a) => a.set_port(new_port), + SocketAddr::V6(ref mut a) => a.set_port(new_port), + } + } + + /// Returns [`true`] if the [IP address] in this `SocketAddr` is an + /// [`IPv4` address], and [`false`] otherwise. + /// + /// [IP address]: IpAddr + /// [`IPv4` address]: IpAddr::V4 + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + /// + /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + /// assert_eq!(socket.is_ipv4(), true); + /// assert_eq!(socket.is_ipv6(), false); + /// ``` + #[must_use] + #[stable(feature = "sockaddr_checker", since = "1.16.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn is_ipv4(&self) -> bool { + matches!(*self, SocketAddr::V4(_)) + } + + /// Returns [`true`] if the [IP address] in this `SocketAddr` is an + /// [`IPv6` address], and [`false`] otherwise. + /// + /// [IP address]: IpAddr + /// [`IPv6` address]: IpAddr::V6 + /// + /// # Examples + /// + /// ``` + /// use std::net::{IpAddr, Ipv6Addr, SocketAddr}; + /// + /// let socket = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 65535, 0, 1)), 8080); + /// assert_eq!(socket.is_ipv4(), false); + /// assert_eq!(socket.is_ipv6(), true); + /// ``` + #[must_use] + #[stable(feature = "sockaddr_checker", since = "1.16.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn is_ipv6(&self) -> bool { + matches!(*self, SocketAddr::V6(_)) + } +} + +impl SocketAddrV4 { + /// Creates a new socket address from an [`IPv4` address] and a port number. + /// + /// [`IPv4` address]: Ipv4Addr + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV4, Ipv4Addr}; + /// + /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn new(ip: Ipv4Addr, port: u16) -> SocketAddrV4 { + SocketAddrV4 { ip, port } + } + + /// Returns the IP address associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV4, Ipv4Addr}; + /// + /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080); + /// assert_eq!(socket.ip(), &Ipv4Addr::new(127, 0, 0, 1)); + /// ``` + #[must_use] + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn ip(&self) -> &Ipv4Addr { + &self.ip + } + + /// Changes the IP address associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV4, Ipv4Addr}; + /// + /// let mut socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080); + /// socket.set_ip(Ipv4Addr::new(192, 168, 0, 1)); + /// assert_eq!(socket.ip(), &Ipv4Addr::new(192, 168, 0, 1)); + /// ``` + #[stable(feature = "sockaddr_setters", since = "1.9.0")] + pub fn set_ip(&mut self, new_ip: Ipv4Addr) { + self.ip = new_ip; + } + + /// Returns the port number associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV4, Ipv4Addr}; + /// + /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080); + /// assert_eq!(socket.port(), 8080); + /// ``` + #[must_use] + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn port(&self) -> u16 { + self.port + } + + /// Changes the port number associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV4, Ipv4Addr}; + /// + /// let mut socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080); + /// socket.set_port(4242); + /// assert_eq!(socket.port(), 4242); + /// ``` + #[stable(feature = "sockaddr_setters", since = "1.9.0")] + pub fn set_port(&mut self, new_port: u16) { + self.port = new_port; + } +} + +impl SocketAddrV6 { + /// Creates a new socket address from an [`IPv6` address], a 16-bit port number, + /// and the `flowinfo` and `scope_id` fields. + /// + /// For more information on the meaning and layout of the `flowinfo` and `scope_id` + /// parameters, see [IETF RFC 2553, Section 3.3]. + /// + /// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3 + /// [`IPv6` address]: Ipv6Addr + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV6, Ipv6Addr}; + /// + /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn new(ip: Ipv6Addr, port: u16, flowinfo: u32, scope_id: u32) -> SocketAddrV6 { + SocketAddrV6 { ip, port, flowinfo, scope_id } + } + + /// Returns the IP address associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV6, Ipv6Addr}; + /// + /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0); + /// assert_eq!(socket.ip(), &Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)); + /// ``` + #[must_use] + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn ip(&self) -> &Ipv6Addr { + &self.ip + } + + /// Changes the IP address associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV6, Ipv6Addr}; + /// + /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0); + /// socket.set_ip(Ipv6Addr::new(76, 45, 0, 0, 0, 0, 0, 0)); + /// assert_eq!(socket.ip(), &Ipv6Addr::new(76, 45, 0, 0, 0, 0, 0, 0)); + /// ``` + #[stable(feature = "sockaddr_setters", since = "1.9.0")] + pub fn set_ip(&mut self, new_ip: Ipv6Addr) { + self.ip = new_ip; + } + + /// Returns the port number associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV6, Ipv6Addr}; + /// + /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0); + /// assert_eq!(socket.port(), 8080); + /// ``` + #[must_use] + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn port(&self) -> u16 { + self.port + } + + /// Changes the port number associated with this socket address. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV6, Ipv6Addr}; + /// + /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0); + /// socket.set_port(4242); + /// assert_eq!(socket.port(), 4242); + /// ``` + #[stable(feature = "sockaddr_setters", since = "1.9.0")] + pub fn set_port(&mut self, new_port: u16) { + self.port = new_port; + } + + /// Returns the flow information associated with this address. + /// + /// This information corresponds to the `sin6_flowinfo` field in C's `netinet/in.h`, + /// as specified in [IETF RFC 2553, Section 3.3]. + /// It combines information about the flow label and the traffic class as specified + /// in [IETF RFC 2460], respectively [Section 6] and [Section 7]. + /// + /// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3 + /// [IETF RFC 2460]: https://tools.ietf.org/html/rfc2460 + /// [Section 6]: https://tools.ietf.org/html/rfc2460#section-6 + /// [Section 7]: https://tools.ietf.org/html/rfc2460#section-7 + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV6, Ipv6Addr}; + /// + /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 10, 0); + /// assert_eq!(socket.flowinfo(), 10); + /// ``` + #[must_use] + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn flowinfo(&self) -> u32 { + self.flowinfo + } + + /// Changes the flow information associated with this socket address. + /// + /// See [`SocketAddrV6::flowinfo`]'s documentation for more details. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV6, Ipv6Addr}; + /// + /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 10, 0); + /// socket.set_flowinfo(56); + /// assert_eq!(socket.flowinfo(), 56); + /// ``` + #[stable(feature = "sockaddr_setters", since = "1.9.0")] + pub fn set_flowinfo(&mut self, new_flowinfo: u32) { + self.flowinfo = new_flowinfo; + } + + /// Returns the scope ID associated with this address. + /// + /// This information corresponds to the `sin6_scope_id` field in C's `netinet/in.h`, + /// as specified in [IETF RFC 2553, Section 3.3]. + /// + /// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3 + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV6, Ipv6Addr}; + /// + /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 78); + /// assert_eq!(socket.scope_id(), 78); + /// ``` + #[must_use] + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_stable(feature = "const_socketaddr", since = "1.69.0")] + pub const fn scope_id(&self) -> u32 { + self.scope_id + } + + /// Changes the scope ID associated with this socket address. + /// + /// See [`SocketAddrV6::scope_id`]'s documentation for more details. + /// + /// # Examples + /// + /// ``` + /// use std::net::{SocketAddrV6, Ipv6Addr}; + /// + /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 78); + /// socket.set_scope_id(42); + /// assert_eq!(socket.scope_id(), 42); + /// ``` + #[stable(feature = "sockaddr_setters", since = "1.9.0")] + pub fn set_scope_id(&mut self, new_scope_id: u32) { + self.scope_id = new_scope_id; + } +} + +#[stable(feature = "ip_from_ip", since = "1.16.0")] +impl From<SocketAddrV4> for SocketAddr { + /// Converts a [`SocketAddrV4`] into a [`SocketAddr::V4`]. + fn from(sock4: SocketAddrV4) -> SocketAddr { + SocketAddr::V4(sock4) + } +} + +#[stable(feature = "ip_from_ip", since = "1.16.0")] +impl From<SocketAddrV6> for SocketAddr { + /// Converts a [`SocketAddrV6`] into a [`SocketAddr::V6`]. + fn from(sock6: SocketAddrV6) -> SocketAddr { + SocketAddr::V6(sock6) + } +} + +#[stable(feature = "addr_from_into_ip", since = "1.17.0")] +impl<I: Into<IpAddr>> From<(I, u16)> for SocketAddr { + /// Converts a tuple struct (Into<[`IpAddr`]>, `u16`) into a [`SocketAddr`]. + /// + /// This conversion creates a [`SocketAddr::V4`] for an [`IpAddr::V4`] + /// and creates a [`SocketAddr::V6`] for an [`IpAddr::V6`]. + /// + /// `u16` is treated as port of the newly created [`SocketAddr`]. + fn from(pieces: (I, u16)) -> SocketAddr { + SocketAddr::new(pieces.0.into(), pieces.1) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Display for SocketAddr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + SocketAddr::V4(ref a) => a.fmt(f), + SocketAddr::V6(ref a) => a.fmt(f), + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Debug for SocketAddr { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Display for SocketAddrV4 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // If there are no alignment requirements, write the socket address directly to `f`. + // Otherwise, write it to a local buffer and then use `f.pad`. + if f.precision().is_none() && f.width().is_none() { + write!(f, "{}:{}", self.ip(), self.port()) + } else { + const LONGEST_IPV4_SOCKET_ADDR: &str = "255.255.255.255:65536"; + + let mut buf = DisplayBuffer::<{ LONGEST_IPV4_SOCKET_ADDR.len() }>::new(); + // Buffer is long enough for the longest possible IPv4 socket address, so this should never fail. + write!(buf, "{}:{}", self.ip(), self.port()).unwrap(); + + f.pad(buf.as_str()) + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Debug for SocketAddrV4 { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Display for SocketAddrV6 { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // If there are no alignment requirements, write the socket address directly to `f`. + // Otherwise, write it to a local buffer and then use `f.pad`. + if f.precision().is_none() && f.width().is_none() { + match self.scope_id() { + 0 => write!(f, "[{}]:{}", self.ip(), self.port()), + scope_id => write!(f, "[{}%{}]:{}", self.ip(), scope_id, self.port()), + } + } else { + const LONGEST_IPV6_SOCKET_ADDR: &str = + "[ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff%4294967296]:65536"; + + let mut buf = DisplayBuffer::<{ LONGEST_IPV6_SOCKET_ADDR.len() }>::new(); + match self.scope_id() { + 0 => write!(buf, "[{}]:{}", self.ip(), self.port()), + scope_id => write!(buf, "[{}%{}]:{}", self.ip(), scope_id, self.port()), + } + // Buffer is long enough for the longest possible IPv6 socket address, so this should never fail. + .unwrap(); + + f.pad(buf.as_str()) + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Debug for SocketAddrV6 { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, fmt) + } +} + +#[stable(feature = "socketaddr_ordering", since = "1.45.0")] +impl PartialOrd for SocketAddrV4 { + fn partial_cmp(&self, other: &SocketAddrV4) -> Option<Ordering> { + Some(self.cmp(other)) + } +} + +#[stable(feature = "socketaddr_ordering", since = "1.45.0")] +impl PartialOrd for SocketAddrV6 { + fn partial_cmp(&self, other: &SocketAddrV6) -> Option<Ordering> { + Some(self.cmp(other)) + } +} + +#[stable(feature = "socketaddr_ordering", since = "1.45.0")] +impl Ord for SocketAddrV4 { + fn cmp(&self, other: &SocketAddrV4) -> Ordering { + self.ip().cmp(other.ip()).then(self.port().cmp(&other.port())) + } +} + +#[stable(feature = "socketaddr_ordering", since = "1.45.0")] +impl Ord for SocketAddrV6 { + fn cmp(&self, other: &SocketAddrV6) -> Ordering { + self.ip().cmp(other.ip()).then(self.port().cmp(&other.port())) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl hash::Hash for SocketAddrV4 { + fn hash<H: hash::Hasher>(&self, s: &mut H) { + (self.port, self.ip).hash(s) + } +} +#[stable(feature = "rust1", since = "1.0.0")] +impl hash::Hash for SocketAddrV6 { + fn hash<H: hash::Hasher>(&self, s: &mut H) { + (self.port, &self.ip, self.flowinfo, self.scope_id).hash(s) + } +} diff --git a/library/core/src/num/dec2flt/mod.rs b/library/core/src/num/dec2flt/mod.rs index a888ced49..f8d493e8b 100644 --- a/library/core/src/num/dec2flt/mod.rs +++ b/library/core/src/num/dec2flt/mod.rs @@ -75,6 +75,7 @@ issue = "none" )] +use crate::error::Error; use crate::fmt; use crate::str::FromStr; @@ -182,15 +183,10 @@ enum FloatErrorKind { Invalid, } -impl ParseFloatError { - #[unstable( - feature = "int_error_internals", - reason = "available through Error trait and this method should \ - not be exposed publicly", - issue = "none" - )] - #[doc(hidden)] - pub fn __description(&self) -> &str { +#[stable(feature = "rust1", since = "1.0.0")] +impl Error for ParseFloatError { + #[allow(deprecated)] + fn description(&self) -> &str { match self.kind { FloatErrorKind::Empty => "cannot parse float from empty string", FloatErrorKind::Invalid => "invalid float literal", @@ -201,7 +197,8 @@ impl ParseFloatError { #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for ParseFloatError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.__description().fmt(f) + #[allow(deprecated)] + self.description().fmt(f) } } diff --git a/library/core/src/num/error.rs b/library/core/src/num/error.rs index 768dd8781..1bae4efe7 100644 --- a/library/core/src/num/error.rs +++ b/library/core/src/num/error.rs @@ -9,23 +9,19 @@ use crate::fmt; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct TryFromIntError(pub(crate) ()); -impl TryFromIntError { - #[unstable( - feature = "int_error_internals", - reason = "available through Error trait and this method should \ - not be exposed publicly", - issue = "none" - )] - #[doc(hidden)] - pub fn __description(&self) -> &str { - "out of range integral type conversion attempted" +#[stable(feature = "try_from", since = "1.34.0")] +impl fmt::Display for TryFromIntError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + #[allow(deprecated)] + self.description().fmt(fmt) } } #[stable(feature = "try_from", since = "1.34.0")] -impl fmt::Display for TryFromIntError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - self.__description().fmt(fmt) +impl Error for TryFromIntError { + #[allow(deprecated)] + fn description(&self) -> &str { + "out of range integral type conversion attempted" } } @@ -121,28 +117,13 @@ impl ParseIntError { pub fn kind(&self) -> &IntErrorKind { &self.kind } - #[unstable( - feature = "int_error_internals", - reason = "available through Error trait and this method should \ - not be exposed publicly", - issue = "none" - )] - #[doc(hidden)] - pub fn __description(&self) -> &str { - match self.kind { - IntErrorKind::Empty => "cannot parse integer from empty string", - IntErrorKind::InvalidDigit => "invalid digit found in string", - IntErrorKind::PosOverflow => "number too large to fit in target type", - IntErrorKind::NegOverflow => "number too small to fit in target type", - IntErrorKind::Zero => "number would be zero for non-zero type", - } - } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for ParseIntError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.__description().fmt(f) + #[allow(deprecated)] + self.description().fmt(f) } } @@ -150,14 +131,12 @@ impl fmt::Display for ParseIntError { impl Error for ParseIntError { #[allow(deprecated)] fn description(&self) -> &str { - self.__description() - } -} - -#[stable(feature = "try_from", since = "1.34.0")] -impl Error for TryFromIntError { - #[allow(deprecated)] - fn description(&self) -> &str { - self.__description() + match self.kind { + IntErrorKind::Empty => "cannot parse integer from empty string", + IntErrorKind::InvalidDigit => "invalid digit found in string", + IntErrorKind::PosOverflow => "number too large to fit in target type", + IntErrorKind::NegOverflow => "number too small to fit in target type", + IntErrorKind::Zero => "number would be zero for non-zero type", + } } } diff --git a/library/core/src/num/int_log10.rs b/library/core/src/num/int_log10.rs index 80472528f..0ce31b40a 100644 --- a/library/core/src/num/int_log10.rs +++ b/library/core/src/num/int_log10.rs @@ -138,3 +138,11 @@ pub const fn i64(val: i64) -> u32 { pub const fn i128(val: i128) -> u32 { u128(val as u128) } + +/// Instantiate this panic logic once, rather than for all the ilog methods +/// on every single primitive type. +#[cold] +#[track_caller] +pub const fn panic_for_nonpositive_argument() -> ! { + panic!("argument of integer logarithm must be positive") +} diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs index 2cae98b8e..aec15212d 100644 --- a/library/core/src/num/int_macros.rs +++ b/library/core/src/num/int_macros.rs @@ -1,11 +1,31 @@ macro_rules! int_impl { - ($SelfT:ty, $ActualT:ident, $UnsignedT:ty, $BITS:expr, $BITS_MINUS_ONE:expr, $Min:expr, $Max:expr, - $rot:expr, $rot_op:expr, $rot_result:expr, $swap_op:expr, $swapped:expr, - $reversed:expr, $le_bytes:expr, $be_bytes:expr, - $to_xe_bytes_doc:expr, $from_xe_bytes_doc:expr, - $bound_condition:expr) => { + ( + Self = $SelfT:ty, + ActualT = $ActualT:ident, + UnsignedT = $UnsignedT:ty, + + // There are all for use *only* in doc comments. + // As such, they're all passed as literals -- passing them as a string + // literal is fine if they need to be multiple code tokens. + // In non-comments, use the associated constants rather than these. + BITS = $BITS:literal, + BITS_MINUS_ONE = $BITS_MINUS_ONE:literal, + Min = $Min:literal, + Max = $Max:literal, + rot = $rot:literal, + rot_op = $rot_op:literal, + rot_result = $rot_result:literal, + swap_op = $swap_op:literal, + swapped = $swapped:literal, + reversed = $reversed:literal, + le_bytes = $le_bytes:literal, + be_bytes = $be_bytes:literal, + to_xe_bytes_doc = $to_xe_bytes_doc:expr, + from_xe_bytes_doc = $from_xe_bytes_doc:expr, + bound_condition = $bound_condition:literal, + ) => { /// The smallest value that can be represented by this integer type - #[doc = concat!("(−2<sup>", $BITS_MINUS_ONE, "</sup>", $bound_condition, ")")] + #[doc = concat!("(−2<sup>", $BITS_MINUS_ONE, "</sup>", $bound_condition, ").")] /// /// # Examples /// @@ -15,10 +35,10 @@ macro_rules! int_impl { #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN, ", stringify!($Min), ");")] /// ``` #[stable(feature = "assoc_int_consts", since = "1.43.0")] - pub const MIN: Self = !0 ^ ((!0 as $UnsignedT) >> 1) as Self; + pub const MIN: Self = !Self::MAX; /// The largest value that can be represented by this integer type - #[doc = concat!("(2<sup>", $BITS_MINUS_ONE, "</sup> − 1", $bound_condition, ")")] + #[doc = concat!("(2<sup>", $BITS_MINUS_ONE, "</sup> − 1", $bound_condition, ").")] /// /// # Examples /// @@ -28,7 +48,7 @@ macro_rules! int_impl { #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX, ", stringify!($Max), ");")] /// ``` #[stable(feature = "assoc_int_consts", since = "1.43.0")] - pub const MAX: Self = !Self::MIN; + pub const MAX: Self = (<$UnsignedT>::MAX >> 1) as Self; /// The size of this integer type in bits. /// @@ -38,7 +58,7 @@ macro_rules! int_impl { #[doc = concat!("assert_eq!(", stringify!($SelfT), "::BITS, ", stringify!($BITS), ");")] /// ``` #[stable(feature = "int_bits_const", since = "1.53.0")] - pub const BITS: u32 = $BITS; + pub const BITS: u32 = <$UnsignedT>::BITS; /// Converts a string slice in a given base to an integer. /// @@ -1365,7 +1385,7 @@ macro_rules! int_impl { // SAFETY: the masking by the bitsize of the type ensures that we do not shift // out of bounds unsafe { - self.unchecked_shl(rhs & ($BITS - 1)) + self.unchecked_shl(rhs & (Self::BITS - 1)) } } @@ -1395,7 +1415,7 @@ macro_rules! int_impl { // SAFETY: the masking by the bitsize of the type ensures that we do not shift // out of bounds unsafe { - self.unchecked_shr(rhs & ($BITS - 1)) + self.unchecked_shr(rhs & (Self::BITS - 1)) } } @@ -1901,7 +1921,7 @@ macro_rules! int_impl { without modifying the original"] #[inline] pub const fn overflowing_shl(self, rhs: u32) -> (Self, bool) { - (self.wrapping_shl(rhs), (rhs > ($BITS - 1))) + (self.wrapping_shl(rhs), rhs >= Self::BITS) } /// Shifts self right by `rhs` bits. @@ -1924,7 +1944,7 @@ macro_rules! int_impl { without modifying the original"] #[inline] pub const fn overflowing_shr(self, rhs: u32) -> (Self, bool) { - (self.wrapping_shr(rhs), (rhs > ($BITS - 1))) + (self.wrapping_shr(rhs), rhs >= Self::BITS) } /// Computes the absolute value of `self`. @@ -2331,14 +2351,17 @@ macro_rules! int_impl { /// ``` #[stable(feature = "int_log", since = "1.67.0")] #[rustc_const_stable(feature = "int_log", since = "1.67.0")] - #[rustc_allow_const_fn_unstable(const_option)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] #[track_caller] pub const fn ilog(self, base: Self) -> u32 { assert!(base >= 2, "base of integer logarithm must be at least 2"); - self.checked_ilog(base).expect("argument of integer logarithm must be positive") + if let Some(log) = self.checked_ilog(base) { + log + } else { + int_log10::panic_for_nonpositive_argument() + } } /// Returns the base 2 logarithm of the number, rounded down. @@ -2354,13 +2377,16 @@ macro_rules! int_impl { /// ``` #[stable(feature = "int_log", since = "1.67.0")] #[rustc_const_stable(feature = "int_log", since = "1.67.0")] - #[rustc_allow_const_fn_unstable(const_option)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] #[track_caller] pub const fn ilog2(self) -> u32 { - self.checked_ilog2().expect("argument of integer logarithm must be positive") + if let Some(log) = self.checked_ilog2() { + log + } else { + int_log10::panic_for_nonpositive_argument() + } } /// Returns the base 10 logarithm of the number, rounded down. @@ -2376,13 +2402,16 @@ macro_rules! int_impl { /// ``` #[stable(feature = "int_log", since = "1.67.0")] #[rustc_const_stable(feature = "int_log", since = "1.67.0")] - #[rustc_allow_const_fn_unstable(const_option)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] #[track_caller] pub const fn ilog10(self) -> u32 { - self.checked_ilog10().expect("argument of integer logarithm must be positive") + if let Some(log) = self.checked_ilog10() { + log + } else { + int_log10::panic_for_nonpositive_argument() + } } /// Returns the logarithm of the number with respect to an arbitrary base, @@ -2574,12 +2603,13 @@ macro_rules! int_impl { #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline(always)] + #[rustc_allow_const_fn_unstable(const_cmp)] pub const fn signum(self) -> Self { - match self { - n if n > 0 => 1, - 0 => 0, - _ => -1, - } + // Picking the right way to phrase this is complicated + // (<https://graphics.stanford.edu/~seander/bithacks.html#CopyIntegerSign>) + // so delegate it to `Ord` which is already producing -1/0/+1 + // exactly like we need and can be the place to deal with the complexity. + self.cmp(&0) as _ } /// Returns `true` if `self` is positive and `false` if the number is zero or diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs index ac7f579eb..a50c91579 100644 --- a/library/core/src/num/mod.rs +++ b/library/core/src/num/mod.rs @@ -9,9 +9,6 @@ use crate::mem; use crate::ops::{Add, Mul, Sub}; use crate::str::FromStr; -#[cfg(not(no_fp_fmt_parse))] -use crate::error::Error; - // Used because the `?` operator is not allowed in a const context. macro_rules! try_opt { ($e:expr) => { @@ -61,15 +58,6 @@ pub use wrapping::Wrapping; #[cfg(not(no_fp_fmt_parse))] pub use dec2flt::ParseFloatError; -#[cfg(not(no_fp_fmt_parse))] -#[stable(feature = "rust1", since = "1.0.0")] -impl Error for ParseFloatError { - #[allow(deprecated)] - fn description(&self) -> &str { - self.__description() - } -} - #[stable(feature = "rust1", since = "1.0.0")] pub use error::ParseIntError; @@ -238,72 +226,217 @@ macro_rules! widening_impl { } impl i8 { - int_impl! { i8, i8, u8, 8, 7, -128, 127, 2, "-0x7e", "0xa", "0x12", "0x12", "0x48", - "[0x12]", "[0x12]", "", "", "" } + int_impl! { + Self = i8, + ActualT = i8, + UnsignedT = u8, + BITS = 8, + BITS_MINUS_ONE = 7, + Min = -128, + Max = 127, + rot = 2, + rot_op = "-0x7e", + rot_result = "0xa", + swap_op = "0x12", + swapped = "0x12", + reversed = "0x48", + le_bytes = "[0x12]", + be_bytes = "[0x12]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } } impl i16 { - int_impl! { i16, i16, u16, 16, 15, -32768, 32767, 4, "-0x5ffd", "0x3a", "0x1234", "0x3412", - "0x2c48", "[0x34, 0x12]", "[0x12, 0x34]", "", "", "" } + int_impl! { + Self = i16, + ActualT = i16, + UnsignedT = u16, + BITS = 16, + BITS_MINUS_ONE = 15, + Min = -32768, + Max = 32767, + rot = 4, + rot_op = "-0x5ffd", + rot_result = "0x3a", + swap_op = "0x1234", + swapped = "0x3412", + reversed = "0x2c48", + le_bytes = "[0x34, 0x12]", + be_bytes = "[0x12, 0x34]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } } impl i32 { - int_impl! { i32, i32, u32, 32, 31, -2147483648, 2147483647, 8, "0x10000b3", "0xb301", - "0x12345678", "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78]", "", "", "" } + int_impl! { + Self = i32, + ActualT = i32, + UnsignedT = u32, + BITS = 32, + BITS_MINUS_ONE = 31, + Min = -2147483648, + Max = 2147483647, + rot = 8, + rot_op = "0x10000b3", + rot_result = "0xb301", + swap_op = "0x12345678", + swapped = "0x78563412", + reversed = "0x1e6a2c48", + le_bytes = "[0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } } impl i64 { - int_impl! { i64, i64, u64, 64, 63, -9223372036854775808, 9223372036854775807, 12, - "0xaa00000000006e1", "0x6e10aa", "0x1234567890123456", "0x5634129078563412", - "0x6a2c48091e6a2c48", "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", "", "", "" } + int_impl! { + Self = i64, + ActualT = i64, + UnsignedT = u64, + BITS = 64, + BITS_MINUS_ONE = 63, + Min = -9223372036854775808, + Max = 9223372036854775807, + rot = 12, + rot_op = "0xaa00000000006e1", + rot_result = "0x6e10aa", + swap_op = "0x1234567890123456", + swapped = "0x5634129078563412", + reversed = "0x6a2c48091e6a2c48", + le_bytes = "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } } impl i128 { - int_impl! { i128, i128, u128, 128, 127, -170141183460469231731687303715884105728, - 170141183460469231731687303715884105727, 16, - "0x13f40000000000000000000000004f76", "0x4f7613f4", "0x12345678901234567890123456789012", - "0x12907856341290785634129078563412", "0x48091e6a2c48091e6a2c48091e6a2c48", - "[0x12, 0x90, 0x78, 0x56, 0x34, 0x12, 0x90, 0x78, \ - 0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56, \ - 0x78, 0x90, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12]", "", "", "" } + int_impl! { + Self = i128, + ActualT = i128, + UnsignedT = u128, + BITS = 128, + BITS_MINUS_ONE = 127, + Min = -170141183460469231731687303715884105728, + Max = 170141183460469231731687303715884105727, + rot = 16, + rot_op = "0x13f40000000000000000000000004f76", + rot_result = "0x4f7613f4", + swap_op = "0x12345678901234567890123456789012", + swapped = "0x12907856341290785634129078563412", + reversed = "0x48091e6a2c48091e6a2c48091e6a2c48", + le_bytes = "[0x12, 0x90, 0x78, 0x56, 0x34, 0x12, 0x90, 0x78, \ + 0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56, \ + 0x78, 0x90, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } } #[cfg(target_pointer_width = "16")] impl isize { - int_impl! { isize, i16, usize, 16, 15, -32768, 32767, 4, "-0x5ffd", "0x3a", "0x1234", - "0x3412", "0x2c48", "[0x34, 0x12]", "[0x12, 0x34]", - usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(), - " on 16-bit targets" } + int_impl! { + Self = isize, + ActualT = i16, + UnsignedT = usize, + BITS = 16, + BITS_MINUS_ONE = 15, + Min = -32768, + Max = 32767, + rot = 4, + rot_op = "-0x5ffd", + rot_result = "0x3a", + swap_op = "0x1234", + swapped = "0x3412", + reversed = "0x2c48", + le_bytes = "[0x34, 0x12]", + be_bytes = "[0x12, 0x34]", + to_xe_bytes_doc = usize_isize_to_xe_bytes_doc!(), + from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), + bound_condition = " on 16-bit targets", + } } #[cfg(target_pointer_width = "32")] impl isize { - int_impl! { isize, i32, usize, 32, 31, -2147483648, 2147483647, 8, "0x10000b3", "0xb301", - "0x12345678", "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78]", - usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(), - " on 32-bit targets" } + int_impl! { + Self = isize, + ActualT = i32, + UnsignedT = usize, + BITS = 32, + BITS_MINUS_ONE = 31, + Min = -2147483648, + Max = 2147483647, + rot = 8, + rot_op = "0x10000b3", + rot_result = "0xb301", + swap_op = "0x12345678", + swapped = "0x78563412", + reversed = "0x1e6a2c48", + le_bytes = "[0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78]", + to_xe_bytes_doc = usize_isize_to_xe_bytes_doc!(), + from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), + bound_condition = " on 32-bit targets", + } } #[cfg(target_pointer_width = "64")] impl isize { - int_impl! { isize, i64, usize, 64, 63, -9223372036854775808, 9223372036854775807, - 12, "0xaa00000000006e1", "0x6e10aa", "0x1234567890123456", "0x5634129078563412", - "0x6a2c48091e6a2c48", "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", - usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(), - " on 64-bit targets" } + int_impl! { + Self = isize, + ActualT = i64, + UnsignedT = usize, + BITS = 64, + BITS_MINUS_ONE = 63, + Min = -9223372036854775808, + Max = 9223372036854775807, + rot = 12, + rot_op = "0xaa00000000006e1", + rot_result = "0x6e10aa", + swap_op = "0x1234567890123456", + swapped = "0x5634129078563412", + reversed = "0x6a2c48091e6a2c48", + le_bytes = "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", + to_xe_bytes_doc = usize_isize_to_xe_bytes_doc!(), + from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), + bound_condition = " on 64-bit targets", + } } /// If 6th bit set ascii is upper case. const ASCII_CASE_MASK: u8 = 0b0010_0000; impl u8 { - uint_impl! { u8, u8, i8, NonZeroU8, 8, 255, 2, "0x82", "0xa", "0x12", "0x12", "0x48", "[0x12]", - "[0x12]", "", "", "" } + uint_impl! { + Self = u8, + ActualT = u8, + SignedT = i8, + NonZeroT = NonZeroU8, + BITS = 8, + MAX = 255, + rot = 2, + rot_op = "0x82", + rot_result = "0xa", + swap_op = "0x12", + swapped = "0x12", + reversed = "0x48", + le_bytes = "[0x12]", + be_bytes = "[0x12]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } widening_impl! { u8, u16, 8, unsigned } /// Checks if the value is within the ASCII range. @@ -887,8 +1020,25 @@ impl u8 { } impl u16 { - uint_impl! { u16, u16, i16, NonZeroU16, 16, 65535, 4, "0xa003", "0x3a", "0x1234", "0x3412", "0x2c48", - "[0x34, 0x12]", "[0x12, 0x34]", "", "", "" } + uint_impl! { + Self = u16, + ActualT = u16, + SignedT = i16, + NonZeroT = NonZeroU16, + BITS = 16, + MAX = 65535, + rot = 4, + rot_op = "0xa003", + rot_result = "0x3a", + swap_op = "0x1234", + swapped = "0x3412", + reversed = "0x2c48", + le_bytes = "[0x34, 0x12]", + be_bytes = "[0x12, 0x34]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } widening_impl! { u16, u32, 16, unsigned } /// Checks if the value is a Unicode surrogate code point, which are disallowed values for [`char`]. @@ -918,56 +1068,144 @@ impl u16 { } impl u32 { - uint_impl! { u32, u32, i32, NonZeroU32, 32, 4294967295, 8, "0x10000b3", "0xb301", "0x12345678", - "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]", "[0x12, 0x34, 0x56, 0x78]", "", "", "" } + uint_impl! { + Self = u32, + ActualT = u32, + SignedT = i32, + NonZeroT = NonZeroU32, + BITS = 32, + MAX = 4294967295, + rot = 8, + rot_op = "0x10000b3", + rot_result = "0xb301", + swap_op = "0x12345678", + swapped = "0x78563412", + reversed = "0x1e6a2c48", + le_bytes = "[0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } widening_impl! { u32, u64, 32, unsigned } } impl u64 { - uint_impl! { u64, u64, i64, NonZeroU64, 64, 18446744073709551615, 12, "0xaa00000000006e1", "0x6e10aa", - "0x1234567890123456", "0x5634129078563412", "0x6a2c48091e6a2c48", - "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", - "", "", ""} + uint_impl! { + Self = u64, + ActualT = u64, + SignedT = i64, + NonZeroT = NonZeroU64, + BITS = 64, + MAX = 18446744073709551615, + rot = 12, + rot_op = "0xaa00000000006e1", + rot_result = "0x6e10aa", + swap_op = "0x1234567890123456", + swapped = "0x5634129078563412", + reversed = "0x6a2c48091e6a2c48", + le_bytes = "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } widening_impl! { u64, u128, 64, unsigned } } impl u128 { - uint_impl! { u128, u128, i128, NonZeroU128, 128, 340282366920938463463374607431768211455, 16, - "0x13f40000000000000000000000004f76", "0x4f7613f4", "0x12345678901234567890123456789012", - "0x12907856341290785634129078563412", "0x48091e6a2c48091e6a2c48091e6a2c48", - "[0x12, 0x90, 0x78, 0x56, 0x34, 0x12, 0x90, 0x78, \ - 0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56, \ - 0x78, 0x90, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12]", - "", "", ""} + uint_impl! { + Self = u128, + ActualT = u128, + SignedT = i128, + NonZeroT = NonZeroU128, + BITS = 128, + MAX = 340282366920938463463374607431768211455, + rot = 16, + rot_op = "0x13f40000000000000000000000004f76", + rot_result = "0x4f7613f4", + swap_op = "0x12345678901234567890123456789012", + swapped = "0x12907856341290785634129078563412", + reversed = "0x48091e6a2c48091e6a2c48091e6a2c48", + le_bytes = "[0x12, 0x90, 0x78, 0x56, 0x34, 0x12, 0x90, 0x78, \ + 0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56, \ + 0x78, 0x90, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12]", + to_xe_bytes_doc = "", + from_xe_bytes_doc = "", + bound_condition = "", + } } #[cfg(target_pointer_width = "16")] impl usize { - uint_impl! { usize, u16, isize, NonZeroUsize, 16, 65535, 4, "0xa003", "0x3a", "0x1234", "0x3412", "0x2c48", - "[0x34, 0x12]", "[0x12, 0x34]", - usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(), - " on 16-bit targets" } + uint_impl! { + Self = usize, + ActualT = u16, + SignedT = isize, + NonZeroT = NonZeroUsize, + BITS = 16, + MAX = 65535, + rot = 4, + rot_op = "0xa003", + rot_result = "0x3a", + swap_op = "0x1234", + swapped = "0x3412", + reversed = "0x2c48", + le_bytes = "[0x34, 0x12]", + be_bytes = "[0x12, 0x34]", + to_xe_bytes_doc = usize_isize_to_xe_bytes_doc!(), + from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), + bound_condition = " on 16-bit targets", + } widening_impl! { usize, u32, 16, unsigned } } + #[cfg(target_pointer_width = "32")] impl usize { - uint_impl! { usize, u32, isize, NonZeroUsize, 32, 4294967295, 8, "0x10000b3", "0xb301", "0x12345678", - "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]", "[0x12, 0x34, 0x56, 0x78]", - usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(), - " on 32-bit targets" } + uint_impl! { + Self = usize, + ActualT = u32, + SignedT = isize, + NonZeroT = NonZeroUsize, + BITS = 32, + MAX = 4294967295, + rot = 8, + rot_op = "0x10000b3", + rot_result = "0xb301", + swap_op = "0x12345678", + swapped = "0x78563412", + reversed = "0x1e6a2c48", + le_bytes = "[0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78]", + to_xe_bytes_doc = usize_isize_to_xe_bytes_doc!(), + from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), + bound_condition = " on 32-bit targets", + } widening_impl! { usize, u64, 32, unsigned } } #[cfg(target_pointer_width = "64")] impl usize { - uint_impl! { usize, u64, isize, NonZeroUsize, 64, 18446744073709551615, 12, "0xaa00000000006e1", "0x6e10aa", - "0x1234567890123456", "0x5634129078563412", "0x6a2c48091e6a2c48", - "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", - "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", - usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!(), - " on 64-bit targets" } + uint_impl! { + Self = usize, + ActualT = u64, + SignedT = isize, + NonZeroT = NonZeroUsize, + BITS = 64, + MAX = 18446744073709551615, + rot = 12, + rot_op = "0xaa00000000006e1", + rot_result = "0x6e10aa", + swap_op = "0x1234567890123456", + swapped = "0x5634129078563412", + reversed = "0x6a2c48091e6a2c48", + le_bytes = "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]", + be_bytes = "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", + to_xe_bytes_doc = usize_isize_to_xe_bytes_doc!(), + from_xe_bytes_doc = usize_isize_from_xe_bytes_doc!(), + bound_condition = " on 64-bit targets", + } widening_impl! { usize, u128, 64, unsigned } } diff --git a/library/core/src/num/shells/i128.rs b/library/core/src/num/shells/i128.rs index 7b048dc52..b3b3d3b48 100644 --- a/library/core/src/num/shells/i128.rs +++ b/library/core/src/num/shells/i128.rs @@ -1,6 +1,4 @@ -//! Constants for the 128-bit signed integer type. -//! -//! *[See also the `i128` primitive type][i128].* +//! Redundant constants module for the [`i128` primitive type][i128]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/i16.rs b/library/core/src/num/shells/i16.rs index 5c5812d5c..70a452e19 100644 --- a/library/core/src/num/shells/i16.rs +++ b/library/core/src/num/shells/i16.rs @@ -1,6 +1,4 @@ -//! Constants for the 16-bit signed integer type. -//! -//! *[See also the `i16` primitive type][i16].* +//! Redundant constants module for the [`i16` primitive type][i16]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/i32.rs b/library/core/src/num/shells/i32.rs index b283ac644..c30849e25 100644 --- a/library/core/src/num/shells/i32.rs +++ b/library/core/src/num/shells/i32.rs @@ -1,6 +1,4 @@ -//! Constants for the 32-bit signed integer type. -//! -//! *[See also the `i32` primitive type][i32].* +//! Redundant constants module for the [`i32` primitive type][i32]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/i64.rs b/library/core/src/num/shells/i64.rs index a416fa7e9..77d95d712 100644 --- a/library/core/src/num/shells/i64.rs +++ b/library/core/src/num/shells/i64.rs @@ -1,6 +1,4 @@ -//! Constants for the 64-bit signed integer type. -//! -//! *[See also the `i64` primitive type][i64].* +//! Redundant constants module for the [`i64` primitive type][i64]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/i8.rs b/library/core/src/num/shells/i8.rs index 02465013a..516ba8cde 100644 --- a/library/core/src/num/shells/i8.rs +++ b/library/core/src/num/shells/i8.rs @@ -1,6 +1,4 @@ -//! Constants for the 8-bit signed integer type. -//! -//! *[See also the `i8` primitive type][i8].* +//! Redundant constants module for the [`i8` primitive type][i8]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/isize.rs b/library/core/src/num/shells/isize.rs index 1579fbab6..828f7345b 100644 --- a/library/core/src/num/shells/isize.rs +++ b/library/core/src/num/shells/isize.rs @@ -1,6 +1,4 @@ -//! Constants for the pointer-sized signed integer type. -//! -//! *[See also the `isize` primitive type][isize].* +//! Redundant constants module for the [`isize` primitive type][isize]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/u128.rs b/library/core/src/num/shells/u128.rs index fe08cee58..b1e30e384 100644 --- a/library/core/src/num/shells/u128.rs +++ b/library/core/src/num/shells/u128.rs @@ -1,6 +1,4 @@ -//! Constants for the 128-bit unsigned integer type. -//! -//! *[See also the `u128` primitive type][u128].* +//! Redundant constants module for the [`u128` primitive type][u128]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/u16.rs b/library/core/src/num/shells/u16.rs index 36f8c6978..b203806f4 100644 --- a/library/core/src/num/shells/u16.rs +++ b/library/core/src/num/shells/u16.rs @@ -1,6 +1,4 @@ -//! Constants for the 16-bit unsigned integer type. -//! -//! *[See also the `u16` primitive type][u16].* +//! Redundant constants module for the [`i16` primitive type][i16]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/u32.rs b/library/core/src/num/shells/u32.rs index 1c369097d..4c84274e7 100644 --- a/library/core/src/num/shells/u32.rs +++ b/library/core/src/num/shells/u32.rs @@ -1,6 +1,4 @@ -//! Constants for the 32-bit unsigned integer type. -//! -//! *[See also the `u32` primitive type][u32].* +//! Redundant constants module for the [`u32` primitive type][u32]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/u64.rs b/library/core/src/num/shells/u64.rs index e8b691d15..47a95c682 100644 --- a/library/core/src/num/shells/u64.rs +++ b/library/core/src/num/shells/u64.rs @@ -1,6 +1,4 @@ -//! Constants for the 64-bit unsigned integer type. -//! -//! *[See also the `u64` primitive type][u64].* +//! Redundant constants module for the [`u64` primitive type][u64]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/u8.rs b/library/core/src/num/shells/u8.rs index 817c6a18a..360baef72 100644 --- a/library/core/src/num/shells/u8.rs +++ b/library/core/src/num/shells/u8.rs @@ -1,6 +1,4 @@ -//! Constants for the 8-bit unsigned integer type. -//! -//! *[See also the `u8` primitive type][u8].* +//! Redundant constants module for the [`u8` primitive type][u8]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/shells/usize.rs b/library/core/src/num/shells/usize.rs index 3e1bec5ec..44c24dfc2 100644 --- a/library/core/src/num/shells/usize.rs +++ b/library/core/src/num/shells/usize.rs @@ -1,6 +1,4 @@ -//! Constants for the pointer-sized unsigned integer type. -//! -//! *[See also the `usize` primitive type][usize].* +//! Redundant constants module for the [`usize` primitive type][usize]. //! //! New code should use the associated constants directly on the primitive type. diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index 1c97c4686..932038a0b 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -1,10 +1,28 @@ macro_rules! uint_impl { - ($SelfT:ty, $ActualT:ident, $SignedT:ident, $NonZeroT:ident, - $BITS:expr, $MaxV:expr, - $rot:expr, $rot_op:expr, $rot_result:expr, $swap_op:expr, $swapped:expr, - $reversed:expr, $le_bytes:expr, $be_bytes:expr, - $to_xe_bytes_doc:expr, $from_xe_bytes_doc:expr, - $bound_condition:expr) => { + ( + Self = $SelfT:ty, + ActualT = $ActualT:ident, + SignedT = $SignedT:ident, + NonZeroT = $NonZeroT:ident, + + // There are all for use *only* in doc comments. + // As such, they're all passed as literals -- passing them as a string + // literal is fine if they need to be multiple code tokens. + // In non-comments, use the associated constants rather than these. + BITS = $BITS:literal, + MAX = $MaxV:literal, + rot = $rot:literal, + rot_op = $rot_op:literal, + rot_result = $rot_result:literal, + swap_op = $swap_op:literal, + swapped = $swapped:literal, + reversed = $reversed:literal, + le_bytes = $le_bytes:literal, + be_bytes = $be_bytes:literal, + to_xe_bytes_doc = $to_xe_bytes_doc:expr, + from_xe_bytes_doc = $from_xe_bytes_doc:expr, + bound_condition = $bound_condition:literal, + ) => { /// The smallest value that can be represented by this integer type. /// /// # Examples @@ -18,7 +36,7 @@ macro_rules! uint_impl { pub const MIN: Self = 0; /// The largest value that can be represented by this integer type - #[doc = concat!("(2<sup>", $BITS, "</sup> − 1", $bound_condition, ")")] + #[doc = concat!("(2<sup>", $BITS, "</sup> − 1", $bound_condition, ").")] /// /// # Examples /// @@ -38,7 +56,7 @@ macro_rules! uint_impl { #[doc = concat!("assert_eq!(", stringify!($SelfT), "::BITS, ", stringify!($BITS), ");")] /// ``` #[stable(feature = "int_bits_const", since = "1.53.0")] - pub const BITS: u32 = $BITS; + pub const BITS: u32 = Self::MAX.count_ones(); /// Converts a string slice in a given base to an integer. /// @@ -705,14 +723,17 @@ macro_rules! uint_impl { /// ``` #[stable(feature = "int_log", since = "1.67.0")] #[rustc_const_stable(feature = "int_log", since = "1.67.0")] - #[rustc_allow_const_fn_unstable(const_option)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] #[track_caller] pub const fn ilog(self, base: Self) -> u32 { assert!(base >= 2, "base of integer logarithm must be at least 2"); - self.checked_ilog(base).expect("argument of integer logarithm must be positive") + if let Some(log) = self.checked_ilog(base) { + log + } else { + int_log10::panic_for_nonpositive_argument() + } } /// Returns the base 2 logarithm of the number, rounded down. @@ -728,13 +749,16 @@ macro_rules! uint_impl { /// ``` #[stable(feature = "int_log", since = "1.67.0")] #[rustc_const_stable(feature = "int_log", since = "1.67.0")] - #[rustc_allow_const_fn_unstable(const_option)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] #[track_caller] pub const fn ilog2(self) -> u32 { - self.checked_ilog2().expect("argument of integer logarithm must be positive") + if let Some(log) = self.checked_ilog2() { + log + } else { + int_log10::panic_for_nonpositive_argument() + } } /// Returns the base 10 logarithm of the number, rounded down. @@ -750,13 +774,16 @@ macro_rules! uint_impl { /// ``` #[stable(feature = "int_log", since = "1.67.0")] #[rustc_const_stable(feature = "int_log", since = "1.67.0")] - #[rustc_allow_const_fn_unstable(const_option)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] #[track_caller] pub const fn ilog10(self) -> u32 { - self.checked_ilog10().expect("argument of integer logarithm must be positive") + if let Some(log) = self.checked_ilog10() { + log + } else { + int_log10::panic_for_nonpositive_argument() + } } /// Returns the logarithm of the number with respect to an arbitrary base, @@ -1381,7 +1408,7 @@ macro_rules! uint_impl { // SAFETY: the masking by the bitsize of the type ensures that we do not shift // out of bounds unsafe { - self.unchecked_shl(rhs & ($BITS - 1)) + self.unchecked_shl(rhs & (Self::BITS - 1)) } } @@ -1414,7 +1441,7 @@ macro_rules! uint_impl { // SAFETY: the masking by the bitsize of the type ensures that we do not shift // out of bounds unsafe { - self.unchecked_shr(rhs & ($BITS - 1)) + self.unchecked_shr(rhs & (Self::BITS - 1)) } } @@ -1838,7 +1865,7 @@ macro_rules! uint_impl { without modifying the original"] #[inline(always)] pub const fn overflowing_shl(self, rhs: u32) -> (Self, bool) { - (self.wrapping_shl(rhs), (rhs > ($BITS - 1))) + (self.wrapping_shl(rhs), rhs >= Self::BITS) } /// Shifts self right by `rhs` bits. @@ -1863,7 +1890,7 @@ macro_rules! uint_impl { without modifying the original"] #[inline(always)] pub const fn overflowing_shr(self, rhs: u32) -> (Self, bool) { - (self.wrapping_shr(rhs), (rhs > ($BITS - 1))) + (self.wrapping_shr(rhs), rhs >= Self::BITS) } /// Raises self to the power of `exp`, using exponentiation by squaring. diff --git a/library/core/src/ops/arith.rs b/library/core/src/ops/arith.rs index 75c52d3ec..0c7ee9630 100644 --- a/library/core/src/ops/arith.rs +++ b/library/core/src/ops/arith.rs @@ -86,7 +86,8 @@ pub trait Add<Rhs = Self> { /// ``` /// assert_eq!(12 + 1, 13); /// ``` - #[must_use] + #[must_use = "this returns the result of the operation, without modifying the original"] + #[rustc_diagnostic_item = "add"] #[stable(feature = "rust1", since = "1.0.0")] fn add(self, rhs: Rhs) -> Self::Output; } @@ -195,7 +196,8 @@ pub trait Sub<Rhs = Self> { /// ``` /// assert_eq!(12 - 1, 11); /// ``` - #[must_use] + #[must_use = "this returns the result of the operation, without modifying the original"] + #[rustc_diagnostic_item = "sub"] #[stable(feature = "rust1", since = "1.0.0")] fn sub(self, rhs: Rhs) -> Self::Output; } @@ -325,7 +327,8 @@ pub trait Mul<Rhs = Self> { /// ``` /// assert_eq!(12 * 2, 24); /// ``` - #[must_use] + #[must_use = "this returns the result of the operation, without modifying the original"] + #[rustc_diagnostic_item = "mul"] #[stable(feature = "rust1", since = "1.0.0")] fn mul(self, rhs: Rhs) -> Self::Output; } @@ -459,7 +462,8 @@ pub trait Div<Rhs = Self> { /// ``` /// assert_eq!(12 / 2, 6); /// ``` - #[must_use] + #[must_use = "this returns the result of the operation, without modifying the original"] + #[rustc_diagnostic_item = "div"] #[stable(feature = "rust1", since = "1.0.0")] fn div(self, rhs: Rhs) -> Self::Output; } @@ -545,7 +549,7 @@ div_impl_float! { f32 f64 } #[lang = "rem"] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_on_unimplemented( - message = "cannot mod `{Self}` by `{Rhs}`", + message = "cannot calculate the remainder of `{Self}` divided by `{Rhs}`", label = "no implementation for `{Self} % {Rhs}`" )] #[doc(alias = "%")] @@ -562,7 +566,8 @@ pub trait Rem<Rhs = Self> { /// ``` /// assert_eq!(12 % 10, 2); /// ``` - #[must_use] + #[must_use = "this returns the result of the operation, without modifying the original"] + #[rustc_diagnostic_item = "rem"] #[stable(feature = "rust1", since = "1.0.0")] fn rem(self, rhs: Rhs) -> Self::Output; } @@ -678,7 +683,8 @@ pub trait Neg { /// let x: i32 = 12; /// assert_eq!(-x, -12); /// ``` - #[must_use] + #[must_use = "this returns the result of the operation, without modifying the original"] + #[rustc_diagnostic_item = "neg"] #[stable(feature = "rust1", since = "1.0.0")] fn neg(self) -> Self::Output; } @@ -981,7 +987,7 @@ div_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } #[lang = "rem_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] #[rustc_on_unimplemented( - message = "cannot mod-assign `{Self}` by `{Rhs}``", + message = "cannot calculate and assign the remainder of `{Self}` divided by `{Rhs}`", label = "no implementation for `{Self} %= {Rhs}`" )] #[doc(alias = "%")] diff --git a/library/core/src/ops/control_flow.rs b/library/core/src/ops/control_flow.rs index cd183540c..117706fb4 100644 --- a/library/core/src/ops/control_flow.rs +++ b/library/core/src/ops/control_flow.rs @@ -259,46 +259,3 @@ impl<R: ops::Try> ControlFlow<R, R::Output> { } } } - -impl<B> ControlFlow<B, ()> { - /// It's frequently the case that there's no value needed with `Continue`, - /// so this provides a way to avoid typing `(())`, if you prefer it. - /// - /// # Examples - /// - /// ``` - /// #![feature(control_flow_enum)] - /// use std::ops::ControlFlow; - /// - /// let mut partial_sum = 0; - /// let last_used = (1..10).chain(20..25).try_for_each(|x| { - /// partial_sum += x; - /// if partial_sum > 100 { ControlFlow::Break(x) } - /// else { ControlFlow::CONTINUE } - /// }); - /// assert_eq!(last_used.break_value(), Some(22)); - /// ``` - #[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")] - pub const CONTINUE: Self = ControlFlow::Continue(()); -} - -impl<C> ControlFlow<(), C> { - /// APIs like `try_for_each` don't need values with `Break`, - /// so this provides a way to avoid typing `(())`, if you prefer it. - /// - /// # Examples - /// - /// ``` - /// #![feature(control_flow_enum)] - /// use std::ops::ControlFlow; - /// - /// let mut partial_sum = 0; - /// (1..10).chain(20..25).try_for_each(|x| { - /// if partial_sum > 100 { ControlFlow::BREAK } - /// else { partial_sum += x; ControlFlow::CONTINUE } - /// }); - /// assert_eq!(partial_sum, 108); - /// ``` - #[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")] - pub const BREAK: Self = ControlFlow::Break(()); -} diff --git a/library/core/src/ops/range.rs b/library/core/src/ops/range.rs index d29ae3561..b8ab26564 100644 --- a/library/core/src/ops/range.rs +++ b/library/core/src/ops/range.rs @@ -96,7 +96,7 @@ impl<Idx: fmt::Debug> fmt::Debug for Range<Idx> { } } -impl<Idx: PartialOrd<Idx>> Range<Idx> { +impl<Idx: ~const PartialOrd<Idx>> Range<Idx> { /// Returns `true` if `item` is contained in the range. /// /// # Examples @@ -116,10 +116,11 @@ impl<Idx: PartialOrd<Idx>> Range<Idx> { /// assert!(!(f32::NAN..1.0).contains(&0.5)); /// ``` #[stable(feature = "range_contains", since = "1.35.0")] - pub fn contains<U>(&self, item: &U) -> bool + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn contains<U>(&self, item: &U) -> bool where - Idx: PartialOrd<U>, - U: ?Sized + PartialOrd<Idx>, + Idx: ~const PartialOrd<U>, + U: ?Sized + ~const PartialOrd<Idx>, { <Self as RangeBounds<Idx>>::contains(self, item) } @@ -142,7 +143,8 @@ impl<Idx: PartialOrd<Idx>> Range<Idx> { /// assert!( (f32::NAN..5.0).is_empty()); /// ``` #[stable(feature = "range_is_empty", since = "1.47.0")] - pub fn is_empty(&self) -> bool { + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn is_empty(&self) -> bool { !(self.start < self.end) } } @@ -199,7 +201,7 @@ impl<Idx: fmt::Debug> fmt::Debug for RangeFrom<Idx> { } } -impl<Idx: PartialOrd<Idx>> RangeFrom<Idx> { +impl<Idx: ~const PartialOrd<Idx>> RangeFrom<Idx> { /// Returns `true` if `item` is contained in the range. /// /// # Examples @@ -214,10 +216,11 @@ impl<Idx: PartialOrd<Idx>> RangeFrom<Idx> { /// assert!(!(f32::NAN..).contains(&0.5)); /// ``` #[stable(feature = "range_contains", since = "1.35.0")] - pub fn contains<U>(&self, item: &U) -> bool + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn contains<U>(&self, item: &U) -> bool where - Idx: PartialOrd<U>, - U: ?Sized + PartialOrd<Idx>, + Idx: ~const PartialOrd<U>, + U: ?Sized + ~const PartialOrd<Idx>, { <Self as RangeBounds<Idx>>::contains(self, item) } @@ -280,7 +283,7 @@ impl<Idx: fmt::Debug> fmt::Debug for RangeTo<Idx> { } } -impl<Idx: PartialOrd<Idx>> RangeTo<Idx> { +impl<Idx: ~const PartialOrd<Idx>> RangeTo<Idx> { /// Returns `true` if `item` is contained in the range. /// /// # Examples @@ -295,10 +298,11 @@ impl<Idx: PartialOrd<Idx>> RangeTo<Idx> { /// assert!(!(..f32::NAN).contains(&0.5)); /// ``` #[stable(feature = "range_contains", since = "1.35.0")] - pub fn contains<U>(&self, item: &U) -> bool + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn contains<U>(&self, item: &U) -> bool where - Idx: PartialOrd<U>, - U: ?Sized + PartialOrd<Idx>, + Idx: ~const PartialOrd<U>, + U: ?Sized + ~const PartialOrd<Idx>, { <Self as RangeBounds<Idx>>::contains(self, item) } @@ -437,7 +441,8 @@ impl<Idx> RangeInclusive<Idx> { /// ``` #[stable(feature = "inclusive_range_methods", since = "1.27.0")] #[inline] - pub fn into_inner(self) -> (Idx, Idx) { + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn into_inner(self) -> (Idx, Idx) { (self.start, self.end) } } @@ -469,7 +474,7 @@ impl<Idx: fmt::Debug> fmt::Debug for RangeInclusive<Idx> { } } -impl<Idx: PartialOrd<Idx>> RangeInclusive<Idx> { +impl<Idx: ~const PartialOrd<Idx>> RangeInclusive<Idx> { /// Returns `true` if `item` is contained in the range. /// /// # Examples @@ -500,10 +505,11 @@ impl<Idx: PartialOrd<Idx>> RangeInclusive<Idx> { /// assert!(!r.contains(&3) && !r.contains(&5)); /// ``` #[stable(feature = "range_contains", since = "1.35.0")] - pub fn contains<U>(&self, item: &U) -> bool + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn contains<U>(&self, item: &U) -> bool where - Idx: PartialOrd<U>, - U: ?Sized + PartialOrd<Idx>, + Idx: ~const PartialOrd<U>, + U: ?Sized + ~const PartialOrd<Idx>, { <Self as RangeBounds<Idx>>::contains(self, item) } @@ -535,8 +541,9 @@ impl<Idx: PartialOrd<Idx>> RangeInclusive<Idx> { /// assert!(r.is_empty()); /// ``` #[stable(feature = "range_is_empty", since = "1.47.0")] + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] #[inline] - pub fn is_empty(&self) -> bool { + pub const fn is_empty(&self) -> bool { self.exhausted || !(self.start <= self.end) } } @@ -598,7 +605,7 @@ impl<Idx: fmt::Debug> fmt::Debug for RangeToInclusive<Idx> { } } -impl<Idx: PartialOrd<Idx>> RangeToInclusive<Idx> { +impl<Idx: ~const PartialOrd<Idx>> RangeToInclusive<Idx> { /// Returns `true` if `item` is contained in the range. /// /// # Examples @@ -613,10 +620,11 @@ impl<Idx: PartialOrd<Idx>> RangeToInclusive<Idx> { /// assert!(!(..=f32::NAN).contains(&0.5)); /// ``` #[stable(feature = "range_contains", since = "1.35.0")] - pub fn contains<U>(&self, item: &U) -> bool + #[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] + pub const fn contains<U>(&self, item: &U) -> bool where - Idx: PartialOrd<U>, - U: ?Sized + PartialOrd<Idx>, + Idx: ~const PartialOrd<U>, + U: ?Sized + ~const PartialOrd<Idx>, { <Self as RangeBounds<Idx>>::contains(self, item) } @@ -757,6 +765,7 @@ impl<T: Clone> Bound<&T> { /// `RangeBounds` is implemented by Rust's built-in range types, produced /// by range syntax like `..`, `a..`, `..b`, `..=c`, `d..e`, or `f..=g`. #[stable(feature = "collections_range", since = "1.28.0")] +#[const_trait] pub trait RangeBounds<T: ?Sized> { /// Start index bound. /// @@ -809,8 +818,8 @@ pub trait RangeBounds<T: ?Sized> { #[stable(feature = "range_contains", since = "1.35.0")] fn contains<U>(&self, item: &U) -> bool where - T: PartialOrd<U>, - U: ?Sized + PartialOrd<T>, + T: ~const PartialOrd<U>, + U: ?Sized + ~const PartialOrd<T>, { (match self.start_bound() { Included(start) => start <= item, @@ -827,7 +836,8 @@ pub trait RangeBounds<T: ?Sized> { use self::Bound::{Excluded, Included, Unbounded}; #[stable(feature = "collections_range", since = "1.28.0")] -impl<T: ?Sized> RangeBounds<T> for RangeFull { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T: ?Sized> const RangeBounds<T> for RangeFull { fn start_bound(&self) -> Bound<&T> { Unbounded } @@ -837,7 +847,8 @@ impl<T: ?Sized> RangeBounds<T> for RangeFull { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for RangeFrom<T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for RangeFrom<T> { fn start_bound(&self) -> Bound<&T> { Included(&self.start) } @@ -847,7 +858,8 @@ impl<T> RangeBounds<T> for RangeFrom<T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for RangeTo<T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for RangeTo<T> { fn start_bound(&self) -> Bound<&T> { Unbounded } @@ -857,7 +869,8 @@ impl<T> RangeBounds<T> for RangeTo<T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for Range<T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for Range<T> { fn start_bound(&self) -> Bound<&T> { Included(&self.start) } @@ -867,7 +880,8 @@ impl<T> RangeBounds<T> for Range<T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for RangeInclusive<T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for RangeInclusive<T> { fn start_bound(&self) -> Bound<&T> { Included(&self.start) } @@ -883,7 +897,8 @@ impl<T> RangeBounds<T> for RangeInclusive<T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for RangeToInclusive<T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for RangeToInclusive<T> { fn start_bound(&self) -> Bound<&T> { Unbounded } @@ -893,7 +908,8 @@ impl<T> RangeBounds<T> for RangeToInclusive<T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for (Bound<T>, Bound<T>) { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for (Bound<T>, Bound<T>) { fn start_bound(&self) -> Bound<&T> { match *self { (Included(ref start), _) => Included(start), @@ -912,7 +928,8 @@ impl<T> RangeBounds<T> for (Bound<T>, Bound<T>) { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<'a, T: ?Sized + 'a> RangeBounds<T> for (Bound<&'a T>, Bound<&'a T>) { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<'a, T: ?Sized + 'a> const RangeBounds<T> for (Bound<&'a T>, Bound<&'a T>) { fn start_bound(&self) -> Bound<&T> { self.0 } @@ -923,7 +940,8 @@ impl<'a, T: ?Sized + 'a> RangeBounds<T> for (Bound<&'a T>, Bound<&'a T>) { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for RangeFrom<&T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for RangeFrom<&T> { fn start_bound(&self) -> Bound<&T> { Included(self.start) } @@ -933,7 +951,8 @@ impl<T> RangeBounds<T> for RangeFrom<&T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for RangeTo<&T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for RangeTo<&T> { fn start_bound(&self) -> Bound<&T> { Unbounded } @@ -943,7 +962,8 @@ impl<T> RangeBounds<T> for RangeTo<&T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for Range<&T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for Range<&T> { fn start_bound(&self) -> Bound<&T> { Included(self.start) } @@ -953,7 +973,8 @@ impl<T> RangeBounds<T> for Range<&T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for RangeInclusive<&T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for RangeInclusive<&T> { fn start_bound(&self) -> Bound<&T> { Included(self.start) } @@ -963,7 +984,8 @@ impl<T> RangeBounds<T> for RangeInclusive<&T> { } #[stable(feature = "collections_range", since = "1.28.0")] -impl<T> RangeBounds<T> for RangeToInclusive<&T> { +#[rustc_const_unstable(feature = "const_range_bounds", issue = "108082")] +impl<T> const RangeBounds<T> for RangeToInclusive<&T> { fn start_bound(&self) -> Bound<&T> { Unbounded } diff --git a/library/core/src/ops/try_trait.rs b/library/core/src/ops/try_trait.rs index 84a690468..86aa1e4fd 100644 --- a/library/core/src/ops/try_trait.rs +++ b/library/core/src/ops/try_trait.rs @@ -379,13 +379,27 @@ pub(crate) type ChangeOutputType<T, V> = <<T as Try>::Residual as Residual<V>>:: pub(crate) struct NeverShortCircuit<T>(pub T); impl<T> NeverShortCircuit<T> { - /// Implementation for building `ConstFnMutClosure` for wrapping the output of a ~const FnMut in a `NeverShortCircuit`. + /// Wraps a unary function to produce one that wraps the output into a `NeverShortCircuit`. + /// + /// This is useful for implementing infallible functions in terms of the `try_` ones, + /// without accidentally capturing extra generic parameters in a closure. + #[inline] + pub fn wrap_mut_1<A>(mut f: impl FnMut(A) -> T) -> impl FnMut(A) -> NeverShortCircuit<T> { + move |a| NeverShortCircuit(f(a)) + } + #[inline] - pub const fn wrap_mut_2_imp<A, B, F: ~const FnMut(A, B) -> T>( - f: &mut F, - (a, b): (A, B), - ) -> NeverShortCircuit<T> { - NeverShortCircuit(f(a, b)) + pub fn wrap_mut_2<A, B>( + mut f: impl ~const FnMut(A, B) -> T, + ) -> impl ~const FnMut(A, B) -> Self { + cfg_if! { + if #[cfg(bootstrap)] { + #[allow(unused_parens)] + (const move |a, b| NeverShortCircuit(f(a, b))) + } else { + const move |a, b| NeverShortCircuit(f(a, b)) + } + } } } diff --git a/library/core/src/option.rs b/library/core/src/option.rs index 7cc00e3f8..994c08d1f 100644 --- a/library/core/src/option.rs +++ b/library/core/src/option.rs @@ -551,8 +551,9 @@ use crate::marker::Destruct; use crate::panicking::{panic, panic_str}; use crate::pin::Pin; use crate::{ - convert, hint, mem, + cmp, convert, hint, mem, ops::{self, ControlFlow, Deref, DerefMut}, + slice, }; /// The `Option` type. See [the module level documentation](self) for more. @@ -734,6 +735,124 @@ impl<T> Option<T> { } } + const fn get_some_offset() -> isize { + if mem::size_of::<Option<T>>() == mem::size_of::<T>() { + // niche optimization means the `T` is always stored at the same position as the Option. + 0 + } else { + assert!(mem::size_of::<Option<T>>() == mem::size_of::<Option<mem::MaybeUninit<T>>>()); + let some_uninit = Some(mem::MaybeUninit::<T>::uninit()); + // SAFETY: This gets the byte offset of the `Some(_)` value following the fact that + // niche optimization is not active, and thus Option<T> and Option<MaybeUninit<t>> share + // the same layout. + unsafe { + (some_uninit.as_ref().unwrap() as *const mem::MaybeUninit<T>) + .byte_offset_from(&some_uninit as *const Option<mem::MaybeUninit<T>>) + } + } + } + + /// Returns a slice of the contained value, if any. If this is `None`, an + /// empty slice is returned. This can be useful to have a single type of + /// iterator over an `Option` or slice. + /// + /// Note: Should you have an `Option<&T>` and wish to get a slice of `T`, + /// you can unpack it via `opt.map_or(&[], std::slice::from_ref)`. + /// + /// # Examples + /// + /// ```rust + /// #![feature(option_as_slice)] + /// + /// assert_eq!( + /// [Some(1234).as_slice(), None.as_slice()], + /// [&[1234][..], &[][..]], + /// ); + /// ``` + /// + /// The inverse of this function is (discounting + /// borrowing) [`[_]::first`](slice::first): + /// + /// ```rust + /// #![feature(option_as_slice)] + /// + /// for i in [Some(1234_u16), None] { + /// assert_eq!(i.as_ref(), i.as_slice().first()); + /// } + /// ``` + #[inline] + #[must_use] + #[unstable(feature = "option_as_slice", issue = "108545")] + pub fn as_slice(&self) -> &[T] { + // SAFETY: This is sound as long as `get_some_offset` returns the + // correct offset. Though in the `None` case, the slice may be located + // at a pointer pointing into padding, the fact that the slice is + // empty, and the padding is at a properly aligned position for a + // value of that type makes it sound. + unsafe { + slice::from_raw_parts( + (self as *const Option<T>).wrapping_byte_offset(Self::get_some_offset()) + as *const T, + self.is_some() as usize, + ) + } + } + + /// Returns a mutable slice of the contained value, if any. If this is + /// `None`, an empty slice is returned. This can be useful to have a + /// single type of iterator over an `Option` or slice. + /// + /// Note: Should you have an `Option<&mut T>` instead of a + /// `&mut Option<T>`, which this method takes, you can obtain a mutable + /// slice via `opt.map_or(&mut [], std::slice::from_mut)`. + /// + /// # Examples + /// + /// ```rust + /// #![feature(option_as_slice)] + /// + /// assert_eq!( + /// [Some(1234).as_mut_slice(), None.as_mut_slice()], + /// [&mut [1234][..], &mut [][..]], + /// ); + /// ``` + /// + /// The result is a mutable slice of zero or one items that points into + /// our original `Option`: + /// + /// ```rust + /// #![feature(option_as_slice)] + /// + /// let mut x = Some(1234); + /// x.as_mut_slice()[0] += 1; + /// assert_eq!(x, Some(1235)); + /// ``` + /// + /// The inverse of this method (discounting borrowing) + /// is [`[_]::first_mut`](slice::first_mut): + /// + /// ```rust + /// #![feature(option_as_slice)] + /// + /// assert_eq!(Some(123).as_mut_slice().first_mut(), Some(&mut 123)) + /// ``` + #[inline] + #[must_use] + #[unstable(feature = "option_as_slice", issue = "108545")] + pub fn as_mut_slice(&mut self) -> &mut [T] { + // SAFETY: This is sound as long as `get_some_offset` returns the + // correct offset. Though in the `None` case, the slice may be located + // at a pointer pointing into padding, the fact that the slice is + // empty, and the padding is at a properly aligned position for a + // value of that type makes it sound. + unsafe { + slice::from_raw_parts_mut( + (self as *mut Option<T>).wrapping_byte_offset(Self::get_some_offset()) as *mut T, + self.is_some() as usize, + ) + } + } + ///////////////////////////////////////////////////////////////////////// // Getting to contained values ///////////////////////////////////////////////////////////////////////// @@ -943,7 +1062,7 @@ impl<T> Option<T> { // Transforming contained values ///////////////////////////////////////////////////////////////////////// - /// Maps an `Option<T>` to `Option<U>` by applying a function to a contained value. + /// Maps an `Option<T>` to `Option<U>` by applying a function to a contained value (if `Some`) or returns `None` (if `None`). /// /// # Examples /// @@ -955,8 +1074,10 @@ impl<T> Option<T> { /// let maybe_some_string = Some(String::from("Hello, World!")); /// // `Option::map` takes self *by value*, consuming `maybe_some_string` /// let maybe_some_len = maybe_some_string.map(|s| s.len()); - /// /// assert_eq!(maybe_some_len, Some(13)); + /// + /// let x: Option<&str> = None; + /// assert_eq!(x.map(|s| s.len()), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] @@ -2090,6 +2211,12 @@ impl<T: PartialEq> PartialEq for Option<T> { } } +/// This specialization trait is a workaround for LLVM not currently (2023-01) +/// being able to optimize this itself, even though Alive confirms that it would +/// be legal to do so: <https://github.com/llvm/llvm-project/issues/52622> +/// +/// Once that's fixed, `Option` should go back to deriving `PartialEq`, as +/// it used to do before <https://github.com/rust-lang/rust/pull/103556>. #[unstable(feature = "spec_option_partial_eq", issue = "none", reason = "exposed only for rustc")] #[doc(hidden)] pub trait SpecOptionPartialEq: Sized { @@ -2146,6 +2273,14 @@ impl<T> SpecOptionPartialEq for crate::ptr::NonNull<T> { } } +#[stable(feature = "rust1", since = "1.0.0")] +impl SpecOptionPartialEq for cmp::Ordering { + #[inline] + fn eq(l: &Option<Self>, r: &Option<Self>) -> bool { + l.map_or(2, |x| x as i8) == r.map_or(2, |x| x as i8) + } +} + ///////////////////////////////////////////////////////////////////////////// // The Option Iterators ///////////////////////////////////////////////////////////////////////////// diff --git a/library/core/src/panicking.rs b/library/core/src/panicking.rs index 48e90e6d7..805a1e51a 100644 --- a/library/core/src/panicking.rs +++ b/library/core/src/panicking.rs @@ -117,7 +117,7 @@ pub const fn panic(expr: &'static str) -> ! { /// Like `panic`, but without unwinding and track_caller to reduce the impact on codesize. #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)] #[cfg_attr(feature = "panic_immediate_abort", inline)] -#[cfg_attr(not(bootstrap), lang = "panic_nounwind")] // needed by codegen for non-unwinding panics +#[lang = "panic_nounwind"] // needed by codegen for non-unwinding panics #[rustc_nounwind] pub fn panic_nounwind(expr: &'static str) -> ! { panic_nounwind_fmt(fmt::Arguments::new_v1(&[expr], &[])); @@ -165,8 +165,7 @@ fn panic_bounds_check(index: usize, len: usize) -> ! { /// any extra arguments (including those synthesized by track_caller). #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)] #[cfg_attr(feature = "panic_immediate_abort", inline)] -#[cfg_attr(bootstrap, lang = "panic_no_unwind")] // needed by codegen for panic in nounwind function -#[cfg_attr(not(bootstrap), lang = "panic_cannot_unwind")] // needed by codegen for panic in nounwind function +#[lang = "panic_cannot_unwind"] // needed by codegen for panic in nounwind function #[rustc_nounwind] fn panic_cannot_unwind() -> ! { panic_nounwind("panic in a function that cannot unwind") diff --git a/library/core/src/primitive_docs.rs b/library/core/src/primitive_docs.rs index d6e9da187..6f78811a1 100644 --- a/library/core/src/primitive_docs.rs +++ b/library/core/src/primitive_docs.rs @@ -587,8 +587,10 @@ mod prim_pointer {} /// There are two syntactic forms for creating an array: /// /// * A list with each element, i.e., `[x, y, z]`. -/// * A repeat expression `[x; N]`, which produces an array with `N` copies of `x`. -/// The type of `x` must be [`Copy`]. +/// * A repeat expression `[expr; N]` where `N` is how many times to repeat `expr` in the array. `expr` must either be: +/// +/// * A value of a type implementing the [`Copy`] trait +/// * A `const` value /// /// Note that `[expr; 0]` is allowed, and produces an empty array. /// This will still evaluate `expr`, however, and immediately drop the resulting value, so diff --git a/library/core/src/ptr/alignment.rs b/library/core/src/ptr/alignment.rs index 2123147c7..efe6d4183 100644 --- a/library/core/src/ptr/alignment.rs +++ b/library/core/src/ptr/alignment.rs @@ -41,7 +41,7 @@ impl Alignment { /// Returns the alignment for a type. /// /// This provides the same numerical value as [`mem::align_of`], - /// but in an `Alignment` instead of a `usize. + /// but in an `Alignment` instead of a `usize`. #[unstable(feature = "ptr_alignment_type", issue = "102070")] #[inline] pub const fn of<T>() -> Self { diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs index 7b1cb5488..57e2ffe5d 100644 --- a/library/core/src/ptr/const_ptr.rs +++ b/library/core/src/ptr/const_ptr.rs @@ -23,8 +23,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let s: &str = "Follow the rabbit"; /// let ptr: *const u8 = s.as_ptr(); @@ -323,8 +321,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let ptr: *const u8 = &10u8 as *const u8; /// @@ -384,8 +380,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// #![feature(ptr_as_uninit)] /// @@ -449,8 +443,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); @@ -526,8 +518,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; @@ -731,7 +721,7 @@ impl<T: ?Sized> *const T { /// This computes the same value that [`offset_from`](#method.offset_from) /// would compute, but with the added precondition that the offset is /// guaranteed to be non-negative. This method is equivalent to - /// `usize::from(self.offset_from(origin)).unwrap_unchecked()`, + /// `usize::try_from(self.offset_from(origin)).unwrap_unchecked()`, /// but it provides slightly more information to the optimizer, which can /// sometimes allow it to optimize slightly better with some backends. /// @@ -908,8 +898,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); @@ -993,8 +981,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let s: &str = "123"; /// @@ -1072,8 +1058,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; @@ -1152,8 +1136,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// // Iterate using a raw pointer in increments of two elements (backwards) /// let data = [1u8, 2, 3, 4, 5]; @@ -1359,7 +1341,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: /// ``` /// #![feature(pointer_is_aligned)] /// #![feature(pointer_byte_offsets)] @@ -1482,7 +1463,6 @@ impl<T: ?Sized> *const T { /// /// # Examples /// - /// Basic usage: /// ``` /// #![feature(pointer_is_aligned)] /// #![feature(pointer_byte_offsets)] diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs index ed1e3bd48..422d0f2b8 100644 --- a/library/core/src/ptr/mut_ptr.rs +++ b/library/core/src/ptr/mut_ptr.rs @@ -22,8 +22,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); @@ -332,8 +330,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let ptr: *mut u8 = &mut 10u8 as *mut u8; /// @@ -396,8 +392,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// #![feature(ptr_as_uninit)] /// @@ -461,8 +455,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); @@ -539,8 +531,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let mut data = [1u8, 2, 3, 4, 5]; @@ -660,8 +650,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let mut s = [1, 2, 3]; /// let ptr: *mut u32 = s.as_mut_ptr(); @@ -904,7 +892,7 @@ impl<T: ?Sized> *mut T { /// This computes the same value that [`offset_from`](#method.offset_from) /// would compute, but with the added precondition that the offset is /// guaranteed to be non-negative. This method is equivalent to - /// `usize::from(self.offset_from(origin)).unwrap_unchecked()`, + /// `usize::try_from(self.offset_from(origin)).unwrap_unchecked()`, /// but it provides slightly more information to the optimizer, which can /// sometimes allow it to optimize slightly better with some backends. /// @@ -1010,8 +998,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); @@ -1095,8 +1081,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let s: &str = "123"; /// @@ -1174,8 +1158,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; @@ -1254,8 +1236,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// // Iterate using a raw pointer in increments of two elements (backwards) /// let data = [1u8, 2, 3, 4, 5]; @@ -1627,7 +1607,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: /// ``` /// #![feature(pointer_is_aligned)] /// #![feature(pointer_byte_offsets)] @@ -1752,7 +1731,6 @@ impl<T: ?Sized> *mut T { /// /// # Examples /// - /// Basic usage: /// ``` /// #![feature(pointer_is_aligned)] /// #![feature(pointer_byte_offsets)] diff --git a/library/core/src/result.rs b/library/core/src/result.rs index f00c40f35..208b220c2 100644 --- a/library/core/src/result.rs +++ b/library/core/src/result.rs @@ -458,7 +458,7 @@ //! [`Result`] of a collection of each contained value of the original //! [`Result`] values, or [`Err`] if any of the elements was [`Err`]. //! -//! [impl-FromIterator]: Result#impl-FromIterator%3CResult%3CA%2C%20E%3E%3E-for-Result%3CV%2C%20E%3E +//! [impl-FromIterator]: Result#impl-FromIterator%3CResult%3CA,+E%3E%3E-for-Result%3CV,+E%3E //! //! ``` //! let v = [Ok(2), Ok(4), Err("err!"), Ok(8)]; @@ -474,8 +474,8 @@ //! to provide the [`product`][Iterator::product] and //! [`sum`][Iterator::sum] methods. //! -//! [impl-Product]: Result#impl-Product%3CResult%3CU%2C%20E%3E%3E-for-Result%3CT%2C%20E%3E -//! [impl-Sum]: Result#impl-Sum%3CResult%3CU%2C%20E%3E%3E-for-Result%3CT%2C%20E%3E +//! [impl-Product]: Result#impl-Product%3CResult%3CU,+E%3E%3E-for-Result%3CT,+E%3E +//! [impl-Sum]: Result#impl-Sum%3CResult%3CU,+E%3E%3E-for-Result%3CT,+E%3E //! //! ``` //! let v = [Err("error!"), Ok(1), Ok(2), Ok(3), Err("foo")]; @@ -525,8 +525,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result<i32, &str> = Ok(-3); /// assert_eq!(x.is_ok(), true); @@ -572,8 +570,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result<i32, &str> = Ok(-3); /// assert_eq!(x.is_err(), false); @@ -627,8 +623,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result<u32, &str> = Ok(2); /// assert_eq!(x.ok(), Some(2)); @@ -658,8 +652,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result<u32, &str> = Ok(2); /// assert_eq!(x.err(), None); @@ -693,8 +685,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result<u32, &str> = Ok(2); /// assert_eq!(x.as_ref(), Ok(&2)); @@ -716,8 +706,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// fn mutate(r: &mut Result<i32, i32>) { /// match r.as_mut() { @@ -812,8 +800,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let k = 21; /// @@ -841,8 +827,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// fn stringify(x: u32) -> String { format!("error code: {x}") } /// @@ -968,8 +952,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result<u32, &str> = Ok(7); /// assert_eq!(x.iter().next(), Some(&7)); @@ -989,8 +971,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let mut x: Result<u32, &str> = Ok(7); /// match x.iter_mut().next() { @@ -1031,8 +1011,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ```should_panic /// let x: Result<u32, &str> = Err("emergency failure"); /// x.expect("Testing expect"); // panics with `Testing expect: emergency failure` @@ -1160,8 +1138,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ```should_panic /// let x: Result<u32, &str> = Ok(10); /// x.expect_err("Testing expect_err"); // panics with `Testing expect_err: 10` @@ -1222,8 +1198,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// # #![feature(never_type)] /// # #![feature(unwrap_infallible)] @@ -1259,8 +1233,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// # #![feature(never_type)] /// # #![feature(unwrap_infallible)] @@ -1298,8 +1270,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result<u32, &str> = Ok(2); /// let y: Result<&str, &str> = Err("late error"); @@ -1383,8 +1353,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result<u32, &str> = Ok(2); /// let y: Result<u32, &str> = Err("late error"); @@ -1426,8 +1394,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// fn sq(x: u32) -> Result<u32, u32> { Ok(x * x) } /// fn err(x: u32) -> Result<u32, u32> { Err(x) } @@ -1456,8 +1422,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let default = 2; /// let x: Result<u32, &str> = Ok(9); @@ -1487,8 +1451,6 @@ impl<T, E> Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// fn count(x: &str) -> usize { x.len() } /// @@ -1752,8 +1714,6 @@ impl<T, E> Result<Result<T, E>, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// #![feature(result_flattening)] /// let x: Result<Result<&'static str, u32>, u32> = Ok(Ok("hello")); @@ -1842,8 +1802,6 @@ impl<T, E> IntoIterator for Result<T, E> { /// /// # Examples /// - /// Basic usage: - /// /// ``` /// let x: Result<u32, &str> = Ok(5); /// let v: Vec<u32> = x.into_iter().collect(); diff --git a/library/core/src/slice/cmp.rs b/library/core/src/slice/cmp.rs index 5e1b218e5..7601dd3c7 100644 --- a/library/core/src/slice/cmp.rs +++ b/library/core/src/slice/cmp.rs @@ -1,6 +1,6 @@ //! Comparison traits for `[T]`. -use crate::cmp::{self, Ordering}; +use crate::cmp::{self, BytewiseEq, Ordering}; use crate::ffi; use crate::mem; @@ -77,7 +77,7 @@ where // Use memcmp for bytewise equality when the types allow impl<A, B> SlicePartialEq<B> for [A] where - A: BytewiseEquality<B>, + A: BytewiseEq<B>, { fn equal(&self, other: &[B]) -> bool { if self.len() != other.len() { @@ -203,29 +203,6 @@ impl SliceOrd for u8 { } } -// Hack to allow specializing on `Eq` even though `Eq` has a method. -#[rustc_unsafe_specialization_marker] -trait MarkerEq<T>: PartialEq<T> {} - -impl<T: Eq> MarkerEq<T> for T {} - -#[doc(hidden)] -/// Trait implemented for types that can be compared for equality using -/// their bytewise representation -#[rustc_specialization_trait] -trait BytewiseEquality<T>: MarkerEq<T> + Copy {} - -macro_rules! impl_marker_for { - ($traitname:ident, $($ty:ty)*) => { - $( - impl $traitname<$ty> for $ty { } - )* - } -} - -impl_marker_for!(BytewiseEquality, - u8 i8 u16 i16 u32 i32 u64 i64 u128 i128 usize isize char bool); - pub(super) trait SliceContains: Sized { fn slice_contains(&self, x: &[Self]) -> bool; } diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs index 90ab43d12..c4317799b 100644 --- a/library/core/src/slice/iter.rs +++ b/library/core/src/slice/iter.rs @@ -7,7 +7,9 @@ use crate::cmp; use crate::cmp::Ordering; use crate::fmt; use crate::intrinsics::assume; -use crate::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce}; +use crate::iter::{ + FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce, UncheckedIterator, +}; use crate::marker::{PhantomData, Send, Sized, Sync}; use crate::mem::{self, SizedTypeProperties}; use crate::num::NonZeroUsize; diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs index 0fd57b197..89b92a7d5 100644 --- a/library/core/src/slice/iter/macros.rs +++ b/library/core/src/slice/iter/macros.rs @@ -384,6 +384,15 @@ macro_rules! iterator { #[unstable(feature = "trusted_len", issue = "37572")] unsafe impl<T> TrustedLen for $name<'_, T> {} + + impl<'a, T> UncheckedIterator for $name<'a, T> { + unsafe fn next_unchecked(&mut self) -> $elem { + // SAFETY: The caller promised there's at least one more item. + unsafe { + next_unchecked!(self) + } + } + } } } diff --git a/library/core/src/slice/memchr.rs b/library/core/src/slice/memchr.rs index c848c2e18..98c8349eb 100644 --- a/library/core/src/slice/memchr.rs +++ b/library/core/src/slice/memchr.rs @@ -16,25 +16,29 @@ const USIZE_BYTES: usize = mem::size_of::<usize>(); /// bytes where the borrow propagated all the way to the most significant /// bit." #[inline] +#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")] const fn contains_zero_byte(x: usize) -> bool { x.wrapping_sub(LO_USIZE) & !x & HI_USIZE != 0 } -#[cfg(target_pointer_width = "16")] #[inline] +#[cfg(target_pointer_width = "16")] +#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")] const fn repeat_byte(b: u8) -> usize { (b as usize) << 8 | b as usize } -#[cfg(not(target_pointer_width = "16"))] #[inline] +#[cfg(not(target_pointer_width = "16"))] +#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")] const fn repeat_byte(b: u8) -> usize { (b as usize) * (usize::MAX / 255) } /// Returns the first index matching the byte `x` in `text`. -#[must_use] #[inline] +#[must_use] +#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")] pub const fn memchr(x: u8, text: &[u8]) -> Option<usize> { // Fast path for small slices. if text.len() < 2 * USIZE_BYTES { @@ -45,6 +49,7 @@ pub const fn memchr(x: u8, text: &[u8]) -> Option<usize> { } #[inline] +#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")] const fn memchr_naive(x: u8, text: &[u8]) -> Option<usize> { let mut i = 0; @@ -60,6 +65,10 @@ const fn memchr_naive(x: u8, text: &[u8]) -> Option<usize> { None } +#[rustc_allow_const_fn_unstable(const_cmp)] +#[rustc_allow_const_fn_unstable(const_slice_index)] +#[rustc_allow_const_fn_unstable(const_align_offset)] +#[rustc_const_stable(feature = "const_memchr", since = "1.65.0")] const fn memchr_aligned(x: u8, text: &[u8]) -> Option<usize> { // Scan for a single byte value by reading two `usize` words at a time. // diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs index d93a3a57e..1cd86b445 100644 --- a/library/core/src/slice/mod.rs +++ b/library/core/src/slice/mod.rs @@ -805,8 +805,9 @@ impl<T> [T] { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] + #[track_caller] pub fn windows(&self, size: usize) -> Windows<'_, T> { - let size = NonZeroUsize::new(size).expect("size is zero"); + let size = NonZeroUsize::new(size).expect("window size must be non-zero"); Windows::new(self, size) } @@ -839,8 +840,9 @@ impl<T> [T] { /// [`rchunks`]: slice::rchunks #[stable(feature = "rust1", since = "1.0.0")] #[inline] + #[track_caller] pub fn chunks(&self, chunk_size: usize) -> Chunks<'_, T> { - assert_ne!(chunk_size, 0, "chunks cannot have a size of zero"); + assert!(chunk_size != 0, "chunk size must be non-zero"); Chunks::new(self, chunk_size) } @@ -877,8 +879,9 @@ impl<T> [T] { /// [`rchunks_mut`]: slice::rchunks_mut #[stable(feature = "rust1", since = "1.0.0")] #[inline] + #[track_caller] pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> { - assert_ne!(chunk_size, 0, "chunks cannot have a size of zero"); + assert!(chunk_size != 0, "chunk size must be non-zero"); ChunksMut::new(self, chunk_size) } @@ -914,8 +917,9 @@ impl<T> [T] { /// [`rchunks_exact`]: slice::rchunks_exact #[stable(feature = "chunks_exact", since = "1.31.0")] #[inline] + #[track_caller] pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> { - assert_ne!(chunk_size, 0, "chunks cannot have a size of zero"); + assert!(chunk_size != 0, "chunk size must be non-zero"); ChunksExact::new(self, chunk_size) } @@ -956,8 +960,9 @@ impl<T> [T] { /// [`rchunks_exact_mut`]: slice::rchunks_exact_mut #[stable(feature = "chunks_exact", since = "1.31.0")] #[inline] + #[track_caller] pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> { - assert_ne!(chunk_size, 0, "chunks cannot have a size of zero"); + assert!(chunk_size != 0, "chunk size must be non-zero"); ChunksExactMut::new(self, chunk_size) } @@ -1037,9 +1042,10 @@ impl<T> [T] { /// ``` #[unstable(feature = "slice_as_chunks", issue = "74985")] #[inline] + #[track_caller] #[must_use] pub fn as_chunks<const N: usize>(&self) -> (&[[T; N]], &[T]) { - assert_ne!(N, 0, "chunks cannot have a size of zero"); + assert!(N != 0, "chunk size must be non-zero"); let len = self.len() / N; let (multiple_of_n, remainder) = self.split_at(len * N); // SAFETY: We already panicked for zero, and ensured by construction @@ -1068,9 +1074,10 @@ impl<T> [T] { /// ``` #[unstable(feature = "slice_as_chunks", issue = "74985")] #[inline] + #[track_caller] #[must_use] pub fn as_rchunks<const N: usize>(&self) -> (&[T], &[[T; N]]) { - assert_ne!(N, 0, "chunks cannot have a size of zero"); + assert!(N != 0, "chunk size must be non-zero"); let len = self.len() / N; let (remainder, multiple_of_n) = self.split_at(self.len() - len * N); // SAFETY: We already panicked for zero, and ensured by construction @@ -1108,8 +1115,9 @@ impl<T> [T] { /// [`chunks_exact`]: slice::chunks_exact #[unstable(feature = "array_chunks", issue = "74985")] #[inline] + #[track_caller] pub fn array_chunks<const N: usize>(&self) -> ArrayChunks<'_, T, N> { - assert_ne!(N, 0, "chunks cannot have a size of zero"); + assert!(N != 0, "chunk size must be non-zero"); ArrayChunks::new(self) } @@ -1186,9 +1194,10 @@ impl<T> [T] { /// ``` #[unstable(feature = "slice_as_chunks", issue = "74985")] #[inline] + #[track_caller] #[must_use] pub fn as_chunks_mut<const N: usize>(&mut self) -> (&mut [[T; N]], &mut [T]) { - assert_ne!(N, 0, "chunks cannot have a size of zero"); + assert!(N != 0, "chunk size must be non-zero"); let len = self.len() / N; let (multiple_of_n, remainder) = self.split_at_mut(len * N); // SAFETY: We already panicked for zero, and ensured by construction @@ -1223,9 +1232,10 @@ impl<T> [T] { /// ``` #[unstable(feature = "slice_as_chunks", issue = "74985")] #[inline] + #[track_caller] #[must_use] pub fn as_rchunks_mut<const N: usize>(&mut self) -> (&mut [T], &mut [[T; N]]) { - assert_ne!(N, 0, "chunks cannot have a size of zero"); + assert!(N != 0, "chunk size must be non-zero"); let len = self.len() / N; let (remainder, multiple_of_n) = self.split_at_mut(self.len() - len * N); // SAFETY: We already panicked for zero, and ensured by construction @@ -1265,8 +1275,9 @@ impl<T> [T] { /// [`chunks_exact_mut`]: slice::chunks_exact_mut #[unstable(feature = "array_chunks", issue = "74985")] #[inline] + #[track_caller] pub fn array_chunks_mut<const N: usize>(&mut self) -> ArrayChunksMut<'_, T, N> { - assert_ne!(N, 0, "chunks cannot have a size of zero"); + assert!(N != 0, "chunk size must be non-zero"); ArrayChunksMut::new(self) } @@ -1297,8 +1308,9 @@ impl<T> [T] { /// [`windows`]: slice::windows #[unstable(feature = "array_windows", issue = "75027")] #[inline] + #[track_caller] pub fn array_windows<const N: usize>(&self) -> ArrayWindows<'_, T, N> { - assert_ne!(N, 0, "windows cannot have a size of zero"); + assert!(N != 0, "window size must be non-zero"); ArrayWindows::new(self) } @@ -1331,8 +1343,9 @@ impl<T> [T] { /// [`chunks`]: slice::chunks #[stable(feature = "rchunks", since = "1.31.0")] #[inline] + #[track_caller] pub fn rchunks(&self, chunk_size: usize) -> RChunks<'_, T> { - assert!(chunk_size != 0); + assert!(chunk_size != 0, "chunk size must be non-zero"); RChunks::new(self, chunk_size) } @@ -1369,8 +1382,9 @@ impl<T> [T] { /// [`chunks_mut`]: slice::chunks_mut #[stable(feature = "rchunks", since = "1.31.0")] #[inline] + #[track_caller] pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<'_, T> { - assert!(chunk_size != 0); + assert!(chunk_size != 0, "chunk size must be non-zero"); RChunksMut::new(self, chunk_size) } @@ -1408,8 +1422,9 @@ impl<T> [T] { /// [`chunks_exact`]: slice::chunks_exact #[stable(feature = "rchunks", since = "1.31.0")] #[inline] + #[track_caller] pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<'_, T> { - assert!(chunk_size != 0); + assert!(chunk_size != 0, "chunk size must be non-zero"); RChunksExact::new(self, chunk_size) } @@ -1451,8 +1466,9 @@ impl<T> [T] { /// [`chunks_exact_mut`]: slice::chunks_exact_mut #[stable(feature = "rchunks", since = "1.31.0")] #[inline] + #[track_caller] pub fn rchunks_exact_mut(&mut self, chunk_size: usize) -> RChunksExactMut<'_, T> { - assert!(chunk_size != 0); + assert!(chunk_size != 0, "chunk size must be non-zero"); RChunksExactMut::new(self, chunk_size) } @@ -2714,8 +2730,10 @@ impl<T> [T] { /// This reordering has the additional property that any value at position `i < index` will be /// less than or equal to any value at a position `j > index`. Additionally, this reordering is /// unstable (i.e. any number of equal elements may end up at position `index`), in-place - /// (i.e. does not allocate), and *O*(*n*) worst-case. This function is also/ known as "kth - /// element" in other libraries. It returns a triplet of the following from the reordered slice: + /// (i.e. does not allocate), and *O*(*n*) on average. The worst-case performance is *O*(*n* log *n*). + /// This function is also known as "kth element" in other libraries. + /// + /// It returns a triplet of the following from the reordered slice: /// the subslice prior to `index`, the element at `index`, and the subslice after `index`; /// accordingly, the values in those two subslices will respectively all be less-than-or-equal-to /// and greater-than-or-equal-to the value of the element at `index`. @@ -2761,8 +2779,11 @@ impl<T> [T] { /// This reordering has the additional property that any value at position `i < index` will be /// less than or equal to any value at a position `j > index` using the comparator function. /// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at - /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function - /// is also known as "kth element" in other libraries. It returns a triplet of the following from + /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) on average. + /// The worst-case performance is *O*(*n* log *n*). This function is also known as + /// "kth element" in other libraries. + /// + /// It returns a triplet of the following from /// the slice reordered according to the provided comparator function: the subslice prior to /// `index`, the element at `index`, and the subslice after `index`; accordingly, the values in /// those two subslices will respectively all be less-than-or-equal-to and greater-than-or-equal-to @@ -2813,8 +2834,11 @@ impl<T> [T] { /// This reordering has the additional property that any value at position `i < index` will be /// less than or equal to any value at a position `j > index` using the key extraction function. /// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at - /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function - /// is also known as "kth element" in other libraries. It returns a triplet of the following from + /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) on average. + /// The worst-case performance is *O*(*n* log *n*). + /// This function is also known as "kth element" in other libraries. + /// + /// It returns a triplet of the following from /// the slice reordered according to the provided key extraction function: the subslice prior to /// `index`, the element at `index`, and the subslice after `index`; accordingly, the values in /// those two subslices will respectively all be less-than-or-equal-to and greater-than-or-equal-to @@ -2931,7 +2955,7 @@ impl<T> [T] { // This operation is still `O(n)`. // // Example: We start in this state, where `r` represents "next - // read" and `w` represents "next_write`. + // read" and `w` represents "next_write". // // r // +---+---+---+---+---+---+ diff --git a/library/core/src/slice/sort.rs b/library/core/src/slice/sort.rs index 2181f9a81..2333f60a8 100644 --- a/library/core/src/slice/sort.rs +++ b/library/core/src/slice/sort.rs @@ -13,115 +13,184 @@ use crate::cmp; use crate::mem::{self, MaybeUninit, SizedTypeProperties}; use crate::ptr; -/// When dropped, copies from `src` into `dest`. -struct CopyOnDrop<T> { +// When dropped, copies from `src` into `dest`. +struct InsertionHole<T> { src: *const T, dest: *mut T, } -impl<T> Drop for CopyOnDrop<T> { +impl<T> Drop for InsertionHole<T> { fn drop(&mut self) { - // SAFETY: This is a helper class. - // Please refer to its usage for correctness. - // Namely, one must be sure that `src` and `dst` does not overlap as required by `ptr::copy_nonoverlapping`. + // SAFETY: This is a helper class. Please refer to its usage for correctness. Namely, one + // must be sure that `src` and `dst` does not overlap as required by + // `ptr::copy_nonoverlapping` and are both valid for writes. unsafe { ptr::copy_nonoverlapping(self.src, self.dest, 1); } } } -/// Shifts the first element to the right until it encounters a greater or equal element. -fn shift_head<T, F>(v: &mut [T], is_less: &mut F) +/// Inserts `v[v.len() - 1]` into pre-sorted sequence `v[..v.len() - 1]` so that whole `v[..]` +/// becomes sorted. +unsafe fn insert_tail<T, F>(v: &mut [T], is_less: &mut F) where F: FnMut(&T, &T) -> bool, { - let len = v.len(); - // SAFETY: The unsafe operations below involves indexing without a bounds check (by offsetting a - // pointer) and copying memory (`ptr::copy_nonoverlapping`). - // - // a. Indexing: - // 1. We checked the size of the array to >=2. - // 2. All the indexing that we will do is always between {0 <= index < len} at most. - // - // b. Memory copying - // 1. We are obtaining pointers to references which are guaranteed to be valid. - // 2. They cannot overlap because we obtain pointers to difference indices of the slice. - // Namely, `i` and `i-1`. - // 3. If the slice is properly aligned, the elements are properly aligned. - // It is the caller's responsibility to make sure the slice is properly aligned. - // - // See comments below for further detail. + debug_assert!(v.len() >= 2); + + let arr_ptr = v.as_mut_ptr(); + let i = v.len() - 1; + + // SAFETY: caller must ensure v is at least len 2. unsafe { - // If the first two elements are out-of-order... - if len >= 2 && is_less(v.get_unchecked(1), v.get_unchecked(0)) { - // Read the first element into a stack-allocated variable. If a following comparison - // operation panics, `hole` will get dropped and automatically write the element back - // into the slice. - let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(0))); - let v = v.as_mut_ptr(); - let mut hole = CopyOnDrop { src: &*tmp, dest: v.add(1) }; - ptr::copy_nonoverlapping(v.add(1), v.add(0), 1); - - for i in 2..len { - if !is_less(&*v.add(i), &*tmp) { + // See insert_head which talks about why this approach is beneficial. + let i_ptr = arr_ptr.add(i); + + // It's important that we use i_ptr here. If this check is positive and we continue, + // We want to make sure that no other copy of the value was seen by is_less. + // Otherwise we would have to copy it back. + if is_less(&*i_ptr, &*i_ptr.sub(1)) { + // It's important, that we use tmp for comparison from now on. As it is the value that + // will be copied back. And notionally we could have created a divergence if we copy + // back the wrong value. + let tmp = mem::ManuallyDrop::new(ptr::read(i_ptr)); + // Intermediate state of the insertion process is always tracked by `hole`, which + // serves two purposes: + // 1. Protects integrity of `v` from panics in `is_less`. + // 2. Fills the remaining hole in `v` in the end. + // + // Panic safety: + // + // If `is_less` panics at any point during the process, `hole` will get dropped and + // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it + // initially held exactly once. + let mut hole = InsertionHole { src: &*tmp, dest: i_ptr.sub(1) }; + ptr::copy_nonoverlapping(hole.dest, i_ptr, 1); + + // SAFETY: We know i is at least 1. + for j in (0..(i - 1)).rev() { + let j_ptr = arr_ptr.add(j); + if !is_less(&*tmp, &*j_ptr) { break; } - // Move `i`-th element one place to the left, thus shifting the hole to the right. - ptr::copy_nonoverlapping(v.add(i), v.add(i - 1), 1); - hole.dest = v.add(i); + ptr::copy_nonoverlapping(j_ptr, hole.dest, 1); + hole.dest = j_ptr; } // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`. } } } -/// Shifts the last element to the left until it encounters a smaller or equal element. -fn shift_tail<T, F>(v: &mut [T], is_less: &mut F) +/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted. +/// +/// This is the integral subroutine of insertion sort. +unsafe fn insert_head<T, F>(v: &mut [T], is_less: &mut F) where F: FnMut(&T, &T) -> bool, { - let len = v.len(); - // SAFETY: The unsafe operations below involves indexing without a bound check (by offsetting a - // pointer) and copying memory (`ptr::copy_nonoverlapping`). - // - // a. Indexing: - // 1. We checked the size of the array to >= 2. - // 2. All the indexing that we will do is always between `0 <= index < len-1` at most. - // - // b. Memory copying - // 1. We are obtaining pointers to references which are guaranteed to be valid. - // 2. They cannot overlap because we obtain pointers to difference indices of the slice. - // Namely, `i` and `i+1`. - // 3. If the slice is properly aligned, the elements are properly aligned. - // It is the caller's responsibility to make sure the slice is properly aligned. - // - // See comments below for further detail. + debug_assert!(v.len() >= 2); + + // SAFETY: caller must ensure v is at least len 2. unsafe { - // If the last two elements are out-of-order... - if len >= 2 && is_less(v.get_unchecked(len - 1), v.get_unchecked(len - 2)) { - // Read the last element into a stack-allocated variable. If a following comparison - // operation panics, `hole` will get dropped and automatically write the element back - // into the slice. - let tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(len - 1))); - let v = v.as_mut_ptr(); - let mut hole = CopyOnDrop { src: &*tmp, dest: v.add(len - 2) }; - ptr::copy_nonoverlapping(v.add(len - 2), v.add(len - 1), 1); - - for i in (0..len - 2).rev() { - if !is_less(&*tmp, &*v.add(i)) { + if is_less(v.get_unchecked(1), v.get_unchecked(0)) { + let arr_ptr = v.as_mut_ptr(); + + // There are three ways to implement insertion here: + // + // 1. Swap adjacent elements until the first one gets to its final destination. + // However, this way we copy data around more than is necessary. If elements are big + // structures (costly to copy), this method will be slow. + // + // 2. Iterate until the right place for the first element is found. Then shift the + // elements succeeding it to make room for it and finally place it into the + // remaining hole. This is a good method. + // + // 3. Copy the first element into a temporary variable. Iterate until the right place + // for it is found. As we go along, copy every traversed element into the slot + // preceding it. Finally, copy data from the temporary variable into the remaining + // hole. This method is very good. Benchmarks demonstrated slightly better + // performance than with the 2nd method. + // + // All methods were benchmarked, and the 3rd showed best results. So we chose that one. + let tmp = mem::ManuallyDrop::new(ptr::read(arr_ptr)); + + // Intermediate state of the insertion process is always tracked by `hole`, which + // serves two purposes: + // 1. Protects integrity of `v` from panics in `is_less`. + // 2. Fills the remaining hole in `v` in the end. + // + // Panic safety: + // + // If `is_less` panics at any point during the process, `hole` will get dropped and + // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it + // initially held exactly once. + let mut hole = InsertionHole { src: &*tmp, dest: arr_ptr.add(1) }; + ptr::copy_nonoverlapping(arr_ptr.add(1), arr_ptr.add(0), 1); + + for i in 2..v.len() { + if !is_less(&v.get_unchecked(i), &*tmp) { break; } - - // Move `i`-th element one place to the right, thus shifting the hole to the left. - ptr::copy_nonoverlapping(v.add(i), v.add(i + 1), 1); - hole.dest = v.add(i); + ptr::copy_nonoverlapping(arr_ptr.add(i), arr_ptr.add(i - 1), 1); + hole.dest = arr_ptr.add(i); } // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`. } } } +/// Sort `v` assuming `v[..offset]` is already sorted. +/// +/// Never inline this function to avoid code bloat. It still optimizes nicely and has practically no +/// performance impact. Even improving performance in some cases. +#[inline(never)] +fn insertion_sort_shift_left<T, F>(v: &mut [T], offset: usize, is_less: &mut F) +where + F: FnMut(&T, &T) -> bool, +{ + let len = v.len(); + + // Using assert here improves performance. + assert!(offset != 0 && offset <= len); + + // Shift each element of the unsorted region v[i..] as far left as is needed to make v sorted. + for i in offset..len { + // SAFETY: we tested that `offset` must be at least 1, so this loop is only entered if len + // >= 2. The range is exclusive and we know `i` must be at least 1 so this slice has at + // >least len 2. + unsafe { + insert_tail(&mut v[..=i], is_less); + } + } +} + +/// Sort `v` assuming `v[offset..]` is already sorted. +/// +/// Never inline this function to avoid code bloat. It still optimizes nicely and has practically no +/// performance impact. Even improving performance in some cases. +#[inline(never)] +fn insertion_sort_shift_right<T, F>(v: &mut [T], offset: usize, is_less: &mut F) +where + F: FnMut(&T, &T) -> bool, +{ + let len = v.len(); + + // Using assert here improves performance. + assert!(offset != 0 && offset <= len && len >= 2); + + // Shift each element of the unsorted region v[..i] as far left as is needed to make v sorted. + for i in (0..offset).rev() { + // SAFETY: we tested that `offset` must be at least 1, so this loop is only entered if len + // >= 2.We ensured that the slice length is always at least 2 long. We know that start_found + // will be at least one less than end, and the range is exclusive. Which gives us i always + // <= (end - 2). + unsafe { + insert_head(&mut v[i..len], is_less); + } + } +} + /// Partially sorts a slice by shifting several out-of-order elements around. /// /// Returns `true` if the slice is sorted at the end. This function is *O*(*n*) worst-case. @@ -161,26 +230,19 @@ where // Swap the found pair of elements. This puts them in correct order. v.swap(i - 1, i); - // Shift the smaller element to the left. - shift_tail(&mut v[..i], is_less); - // Shift the greater element to the right. - shift_head(&mut v[i..], is_less); + if i >= 2 { + // Shift the smaller element to the left. + insertion_sort_shift_left(&mut v[..i], i - 1, is_less); + + // Shift the greater element to the right. + insertion_sort_shift_right(&mut v[..i], 1, is_less); + } } // Didn't manage to sort the slice in the limited number of steps. false } -/// Sorts a slice using insertion sort, which is *O*(*n*^2) worst-case. -fn insertion_sort<T, F>(v: &mut [T], is_less: &mut F) -where - F: FnMut(&T, &T) -> bool, -{ - for i in 1..v.len() { - shift_tail(&mut v[..i + 1], is_less); - } -} - /// Sorts `v` using heapsort, which guarantees *O*(*n* \* log(*n*)) worst-case. #[cold] #[unstable(feature = "sort_internals", reason = "internal to sort module", issue = "none")] @@ -198,8 +260,11 @@ where } // Choose the greater child. - if child + 1 < v.len() && is_less(&v[child], &v[child + 1]) { - child += 1; + if child + 1 < v.len() { + // We need a branch to be sure not to out-of-bounds index, + // but it's highly predictable. The comparison, however, + // is better done branchless, especially for primitives. + child += is_less(&v[child], &v[child + 1]) as usize; } // Stop if the invariant holds at `node`. @@ -252,7 +317,7 @@ where // 1. `block` - Number of elements in the block. // 2. `start` - Start pointer into the `offsets` array. // 3. `end` - End pointer into the `offsets` array. - // 4. `offsets - Indices of out-of-order elements within the block. + // 4. `offsets` - Indices of out-of-order elements within the block. // The current block on the left side (from `l` to `l.add(block_l)`). let mut l = v.as_mut_ptr(); @@ -262,7 +327,7 @@ where let mut offsets_l = [MaybeUninit::<u8>::uninit(); BLOCK]; // The current block on the right side (from `r.sub(block_r)` to `r`). - // SAFETY: The documentation for .add() specifically mention that `vec.as_ptr().add(vec.len())` is always safe` + // SAFETY: The documentation for .add() specifically mention that `vec.as_ptr().add(vec.len())` is always safe let mut r = unsafe { l.add(v.len()) }; let mut block_r = BLOCK; let mut start_r = ptr::null_mut(); @@ -507,7 +572,7 @@ where // SAFETY: `pivot` is a reference to the first element of `v`, so `ptr::read` is safe. let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); - let _pivot_guard = CopyOnDrop { src: &*tmp, dest: pivot }; + let _pivot_guard = InsertionHole { src: &*tmp, dest: pivot }; let pivot = &*tmp; // Find the first pair of out-of-order elements. @@ -560,7 +625,7 @@ where // operation panics, the pivot will be automatically written back into the slice. // SAFETY: The pointer here is valid because it is obtained from a reference to a slice. let tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); - let _pivot_guard = CopyOnDrop { src: &*tmp, dest: pivot }; + let _pivot_guard = InsertionHole { src: &*tmp, dest: pivot }; let pivot = &*tmp; // Now partition the slice. @@ -608,19 +673,23 @@ where fn break_patterns<T>(v: &mut [T]) { let len = v.len(); if len >= 8 { - // Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia. - let mut random = len as u32; - let mut gen_u32 = || { - random ^= random << 13; - random ^= random >> 17; - random ^= random << 5; - random - }; + let mut seed = len; let mut gen_usize = || { + // Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia. if usize::BITS <= 32 { - gen_u32() as usize + let mut r = seed as u32; + r ^= r << 13; + r ^= r >> 17; + r ^= r << 5; + seed = r as usize; + seed } else { - (((gen_u32() as u64) << 32) | (gen_u32() as u64)) as usize + let mut r = seed as u64; + r ^= r << 13; + r ^= r >> 7; + r ^= r << 17; + seed = r as usize; + seed } }; @@ -742,7 +811,9 @@ where // Very short slices get sorted using insertion sort. if len <= MAX_INSERTION { - insertion_sort(v, is_less); + if len >= 2 { + insertion_sort_shift_left(v, 1, is_less); + } return; } @@ -844,10 +915,14 @@ fn partition_at_index_loop<'a, T, F>( let mut was_balanced = true; loop { + let len = v.len(); + // For slices of up to this length it's probably faster to simply sort them. const MAX_INSERTION: usize = 10; - if v.len() <= MAX_INSERTION { - insertion_sort(v, is_less); + if len <= MAX_INSERTION { + if len >= 2 { + insertion_sort_shift_left(v, 1, is_less); + } return; } @@ -887,7 +962,7 @@ fn partition_at_index_loop<'a, T, F>( } let (mid, _) = partition(v, pivot, is_less); - was_balanced = cmp::min(mid, v.len() - mid) >= v.len() / 8; + was_balanced = cmp::min(mid, len - mid) >= len / 8; // Split the slice into `left`, `pivot`, and `right`. let (left, right) = v.split_at_mut(mid); @@ -954,75 +1029,6 @@ where (left, pivot, right) } -/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted. -/// -/// This is the integral subroutine of insertion sort. -fn insert_head<T, F>(v: &mut [T], is_less: &mut F) -where - F: FnMut(&T, &T) -> bool, -{ - if v.len() >= 2 && is_less(&v[1], &v[0]) { - // SAFETY: Copy tmp back even if panic, and ensure unique observation. - unsafe { - // There are three ways to implement insertion here: - // - // 1. Swap adjacent elements until the first one gets to its final destination. - // However, this way we copy data around more than is necessary. If elements are big - // structures (costly to copy), this method will be slow. - // - // 2. Iterate until the right place for the first element is found. Then shift the - // elements succeeding it to make room for it and finally place it into the - // remaining hole. This is a good method. - // - // 3. Copy the first element into a temporary variable. Iterate until the right place - // for it is found. As we go along, copy every traversed element into the slot - // preceding it. Finally, copy data from the temporary variable into the remaining - // hole. This method is very good. Benchmarks demonstrated slightly better - // performance than with the 2nd method. - // - // All methods were benchmarked, and the 3rd showed best results. So we chose that one. - let tmp = mem::ManuallyDrop::new(ptr::read(&v[0])); - - // Intermediate state of the insertion process is always tracked by `hole`, which - // serves two purposes: - // 1. Protects integrity of `v` from panics in `is_less`. - // 2. Fills the remaining hole in `v` in the end. - // - // Panic safety: - // - // If `is_less` panics at any point during the process, `hole` will get dropped and - // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it - // initially held exactly once. - let mut hole = InsertionHole { src: &*tmp, dest: &mut v[1] }; - ptr::copy_nonoverlapping(&v[1], &mut v[0], 1); - - for i in 2..v.len() { - if !is_less(&v[i], &*tmp) { - break; - } - ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1); - hole.dest = &mut v[i]; - } - // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`. - } - } - - // When dropped, copies from `src` into `dest`. - struct InsertionHole<T> { - src: *const T, - dest: *mut T, - } - - impl<T> Drop for InsertionHole<T> { - fn drop(&mut self) { - // SAFETY: The caller must ensure that src and dest are correctly set. - unsafe { - ptr::copy_nonoverlapping(self.src, self.dest, 1); - } - } - } -} - /// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and /// stores the result into `v[..]`. /// @@ -1180,8 +1186,6 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( { // Slices of up to this length get sorted using insertion sort. const MAX_INSERTION: usize = 20; - // Very short runs are extended using insertion sort to span at least this many elements. - const MIN_RUN: usize = 10; // The caller should have already checked that. debug_assert!(!T::IS_ZST); @@ -1191,9 +1195,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( // Short arrays get sorted in-place via insertion sort to avoid allocations. if len <= MAX_INSERTION { if len >= 2 { - for i in (0..len - 1).rev() { - insert_head(&mut v[i..], is_less); - } + insertion_sort_shift_left(v, 1, is_less); } return; } @@ -1203,59 +1205,43 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run, // which will always have length at most `len / 2`. let buf = BufGuard::new(len / 2, elem_alloc_fn, elem_dealloc_fn); - let buf_ptr = buf.buf_ptr; + let buf_ptr = buf.buf_ptr.as_ptr(); let mut runs = RunVec::new(run_alloc_fn, run_dealloc_fn); - // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a - // strange decision, but consider the fact that merges more often go in the opposite direction - // (forwards). According to benchmarks, merging forwards is slightly faster than merging - // backwards. To conclude, identifying runs by traversing backwards improves performance. - let mut end = len; - while end > 0 { - // Find the next natural run, and reverse it if it's strictly descending. - let mut start = end - 1; - if start > 0 { - start -= 1; - - // SAFETY: The v.get_unchecked must be fed with correct inbound indicies. - unsafe { - if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) { - while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) { - start -= 1; - } - v[start..end].reverse(); - } else { - while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) - { - start -= 1; - } - } - } + let mut end = 0; + let mut start = 0; + + // Scan forward. Memory pre-fetching prefers forward scanning vs backwards scanning, and the + // code-gen is usually better. For the most sensitive types such as integers, these are merged + // bidirectionally at once. So there is no benefit in scanning backwards. + while end < len { + let (streak_end, was_reversed) = find_streak(&v[start..], is_less); + end += streak_end; + if was_reversed { + v[start..end].reverse(); } // Insert some more elements into the run if it's too short. Insertion sort is faster than // merge sort on short sequences, so this significantly improves performance. - while start > 0 && end - start < MIN_RUN { - start -= 1; - insert_head(&mut v[start..end], is_less); - } + end = provide_sorted_batch(v, start, end, is_less); // Push this run onto the stack. runs.push(TimSortRun { start, len: end - start }); - end = start; + start = end; // Merge some pairs of adjacent runs to satisfy the invariants. - while let Some(r) = collapse(runs.as_slice()) { - let left = runs[r + 1]; - let right = runs[r]; + while let Some(r) = collapse(runs.as_slice(), len) { + let left = runs[r]; + let right = runs[r + 1]; + let merge_slice = &mut v[left.start..right.start + right.len]; // SAFETY: `buf_ptr` must hold enough capacity for the shorter of the two sides, and // neither side may be on length 0. unsafe { - merge(&mut v[left.start..right.start + right.len], left.len, buf_ptr, is_less); + merge(merge_slice, left.len, buf_ptr, is_less); } - runs[r] = TimSortRun { start: left.start, len: left.len + right.len }; - runs.remove(r + 1); + runs[r + 1] = TimSortRun { start: left.start, len: left.len + right.len }; + runs.remove(r); } } @@ -1277,10 +1263,10 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( // run starts at index 0, it will always demand a merge operation until the stack is fully // collapsed, in order to complete the sort. #[inline] - fn collapse(runs: &[TimSortRun]) -> Option<usize> { + fn collapse(runs: &[TimSortRun], stop: usize) -> Option<usize> { let n = runs.len(); if n >= 2 - && (runs[n - 1].start == 0 + && (runs[n - 1].start + runs[n - 1].len == stop || runs[n - 2].len <= runs[n - 1].len || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len) || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len)) @@ -1298,7 +1284,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( where ElemDeallocF: Fn(*mut T, usize), { - buf_ptr: *mut T, + buf_ptr: ptr::NonNull<T>, capacity: usize, elem_dealloc_fn: ElemDeallocF, } @@ -1315,7 +1301,11 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( where ElemAllocF: Fn(usize) -> *mut T, { - Self { buf_ptr: elem_alloc_fn(len), capacity: len, elem_dealloc_fn } + Self { + buf_ptr: ptr::NonNull::new(elem_alloc_fn(len)).unwrap(), + capacity: len, + elem_dealloc_fn, + } } } @@ -1324,7 +1314,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( ElemDeallocF: Fn(*mut T, usize), { fn drop(&mut self) { - (self.elem_dealloc_fn)(self.buf_ptr, self.capacity); + (self.elem_dealloc_fn)(self.buf_ptr.as_ptr(), self.capacity); } } @@ -1333,7 +1323,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( RunAllocF: Fn(usize) -> *mut TimSortRun, RunDeallocF: Fn(*mut TimSortRun, usize), { - buf_ptr: *mut TimSortRun, + buf_ptr: ptr::NonNull<TimSortRun>, capacity: usize, len: usize, run_alloc_fn: RunAllocF, @@ -1350,7 +1340,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( const START_RUN_CAPACITY: usize = 16; Self { - buf_ptr: run_alloc_fn(START_RUN_CAPACITY), + buf_ptr: ptr::NonNull::new(run_alloc_fn(START_RUN_CAPACITY)).unwrap(), capacity: START_RUN_CAPACITY, len: 0, run_alloc_fn, @@ -1361,15 +1351,15 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( fn push(&mut self, val: TimSortRun) { if self.len == self.capacity { let old_capacity = self.capacity; - let old_buf_ptr = self.buf_ptr; + let old_buf_ptr = self.buf_ptr.as_ptr(); self.capacity = self.capacity * 2; - self.buf_ptr = (self.run_alloc_fn)(self.capacity); + self.buf_ptr = ptr::NonNull::new((self.run_alloc_fn)(self.capacity)).unwrap(); // SAFETY: buf_ptr new and old were correctly allocated and old_buf_ptr has // old_capacity valid elements. unsafe { - ptr::copy_nonoverlapping(old_buf_ptr, self.buf_ptr, old_capacity); + ptr::copy_nonoverlapping(old_buf_ptr, self.buf_ptr.as_ptr(), old_capacity); } (self.run_dealloc_fn)(old_buf_ptr, old_capacity); @@ -1377,7 +1367,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( // SAFETY: The invariant was just checked. unsafe { - self.buf_ptr.add(self.len).write(val); + self.buf_ptr.as_ptr().add(self.len).write(val); } self.len += 1; } @@ -1390,7 +1380,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( // SAFETY: buf_ptr needs to be valid and len invariant upheld. unsafe { // the place we are taking from. - let ptr = self.buf_ptr.add(index); + let ptr = self.buf_ptr.as_ptr().add(index); // Shift everything down to fill in that spot. ptr::copy(ptr.add(1), ptr, self.len - index - 1); @@ -1400,7 +1390,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( fn as_slice(&self) -> &[TimSortRun] { // SAFETY: Safe as long as buf_ptr is valid and len invariant was upheld. - unsafe { &*ptr::slice_from_raw_parts(self.buf_ptr, self.len) } + unsafe { &*ptr::slice_from_raw_parts(self.buf_ptr.as_ptr(), self.len) } } fn len(&self) -> usize { @@ -1419,7 +1409,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( if index < self.len { // SAFETY: buf_ptr and len invariant must be upheld. unsafe { - return &*(self.buf_ptr.add(index)); + return &*(self.buf_ptr.as_ptr().add(index)); } } @@ -1436,7 +1426,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( if index < self.len { // SAFETY: buf_ptr and len invariant must be upheld. unsafe { - return &mut *(self.buf_ptr.add(index)); + return &mut *(self.buf_ptr.as_ptr().add(index)); } } @@ -1452,7 +1442,7 @@ pub fn merge_sort<T, CmpF, ElemAllocF, ElemDeallocF, RunAllocF, RunDeallocF>( fn drop(&mut self) { // As long as TimSortRun is Copy we don't need to drop them individually but just the // whole allocation. - (self.run_dealloc_fn)(self.buf_ptr, self.capacity); + (self.run_dealloc_fn)(self.buf_ptr.as_ptr(), self.capacity); } } } @@ -1463,3 +1453,71 @@ pub struct TimSortRun { len: usize, start: usize, } + +/// Takes a range as denoted by start and end, that is already sorted and extends it to the right if +/// necessary with sorts optimized for smaller ranges such as insertion sort. +#[cfg(not(no_global_oom_handling))] +fn provide_sorted_batch<T, F>(v: &mut [T], start: usize, mut end: usize, is_less: &mut F) -> usize +where + F: FnMut(&T, &T) -> bool, +{ + let len = v.len(); + assert!(end >= start && end <= len); + + // This value is a balance between least comparisons and best performance, as + // influenced by for example cache locality. + const MIN_INSERTION_RUN: usize = 10; + + // Insert some more elements into the run if it's too short. Insertion sort is faster than + // merge sort on short sequences, so this significantly improves performance. + let start_end_diff = end - start; + + if start_end_diff < MIN_INSERTION_RUN && end < len { + // v[start_found..end] are elements that are already sorted in the input. We want to extend + // the sorted region to the left, so we push up MIN_INSERTION_RUN - 1 to the right. Which is + // more efficient that trying to push those already sorted elements to the left. + end = cmp::min(start + MIN_INSERTION_RUN, len); + let presorted_start = cmp::max(start_end_diff, 1); + + insertion_sort_shift_left(&mut v[start..end], presorted_start, is_less); + } + + end +} + +/// Finds a streak of presorted elements starting at the beginning of the slice. Returns the first +/// value that is not part of said streak, and a bool denoting wether the streak was reversed. +/// Streaks can be increasing or decreasing. +fn find_streak<T, F>(v: &[T], is_less: &mut F) -> (usize, bool) +where + F: FnMut(&T, &T) -> bool, +{ + let len = v.len(); + + if len < 2 { + return (len, false); + } + + let mut end = 2; + + // SAFETY: See below specific. + unsafe { + // SAFETY: We checked that len >= 2, so 0 and 1 are valid indices. + let assume_reverse = is_less(v.get_unchecked(1), v.get_unchecked(0)); + + // SAFETY: We know end >= 2 and check end < len. + // From that follows that accessing v at end and end - 1 is safe. + if assume_reverse { + while end < len && is_less(v.get_unchecked(end), v.get_unchecked(end - 1)) { + end += 1; + } + + (end, true) + } else { + while end < len && !is_less(v.get_unchecked(end), v.get_unchecked(end - 1)) { + end += 1; + } + (end, false) + } + } +} diff --git a/library/core/src/str/iter.rs b/library/core/src/str/iter.rs index d969475aa..95c682f42 100644 --- a/library/core/src/str/iter.rs +++ b/library/core/src/str/iter.rs @@ -1,6 +1,6 @@ //! Iterators for `str` methods. -use crate::char; +use crate::char as char_mod; use crate::fmt::{self, Write}; use crate::iter::{Chain, FlatMap, Flatten}; use crate::iter::{Copied, Filter, FusedIterator, Map, TrustedLen}; @@ -1455,8 +1455,8 @@ impl FusedIterator for EncodeUtf16<'_> {} #[derive(Clone, Debug)] pub struct EscapeDebug<'a> { pub(super) inner: Chain< - Flatten<option::IntoIter<char::EscapeDebug>>, - FlatMap<Chars<'a>, char::EscapeDebug, CharEscapeDebugContinue>, + Flatten<option::IntoIter<char_mod::EscapeDebug>>, + FlatMap<Chars<'a>, char_mod::EscapeDebug, CharEscapeDebugContinue>, >, } @@ -1464,14 +1464,14 @@ pub struct EscapeDebug<'a> { #[stable(feature = "str_escape", since = "1.34.0")] #[derive(Clone, Debug)] pub struct EscapeDefault<'a> { - pub(super) inner: FlatMap<Chars<'a>, char::EscapeDefault, CharEscapeDefault>, + pub(super) inner: FlatMap<Chars<'a>, char_mod::EscapeDefault, CharEscapeDefault>, } /// The return type of [`str::escape_unicode`]. #[stable(feature = "str_escape", since = "1.34.0")] #[derive(Clone, Debug)] pub struct EscapeUnicode<'a> { - pub(super) inner: FlatMap<Chars<'a>, char::EscapeUnicode, CharEscapeUnicode>, + pub(super) inner: FlatMap<Chars<'a>, char_mod::EscapeUnicode, CharEscapeUnicode>, } macro_rules! escape_types_impls { diff --git a/library/core/src/str/traits.rs b/library/core/src/str/traits.rs index d3ed811b1..68f62ce8b 100644 --- a/library/core/src/str/traits.rs +++ b/library/core/src/str/traits.rs @@ -28,10 +28,6 @@ impl PartialEq for str { fn eq(&self, other: &str) -> bool { self.as_bytes() == other.as_bytes() } - #[inline] - fn ne(&self, other: &str) -> bool { - !(*self).eq(other) - } } #[stable(feature = "rust1", since = "1.0.0")] diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index 14367eb09..040a59184 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -305,6 +305,50 @@ impl AtomicBool { AtomicBool { v: UnsafeCell::new(v as u8) } } + /// Creates a new `AtomicBool` from a pointer. + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_from_ptr, pointer_is_aligned)] + /// use std::sync::atomic::{self, AtomicBool}; + /// use std::mem::align_of; + /// + /// // Get a pointer to an allocated value + /// let ptr: *mut bool = Box::into_raw(Box::new(false)); + /// + /// assert!(ptr.is_aligned_to(align_of::<AtomicBool>())); + /// + /// { + /// // Create an atomic view of the allocated value + /// let atomic = unsafe { AtomicBool::from_ptr(ptr) }; + /// + /// // Use `atomic` for atomic operations, possibly share it with other threads + /// atomic.store(true, atomic::Ordering::Relaxed); + /// } + /// + /// // It's ok to non-atomically access the value behind `ptr`, + /// // since the reference to the atomic ended its lifetime in the block above + /// assert_eq!(unsafe { *ptr }, true); + /// + /// // Deallocate the value + /// unsafe { drop(Box::from_raw(ptr)) } + /// ``` + /// + /// # Safety + /// + /// * `ptr` must be aligned to `align_of::<AtomicBool>()` (note that on some platforms this can be bigger than `align_of::<bool>()`). + /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`. + /// * The value behind `ptr` must not be accessed through non-atomic operations for the whole lifetime `'a`. + /// + /// [valid]: crate::ptr#safety + #[unstable(feature = "atomic_from_ptr", issue = "108652")] + #[rustc_const_unstable(feature = "atomic_from_ptr", issue = "108652")] + pub const unsafe fn from_ptr<'a>(ptr: *mut bool) -> &'a AtomicBool { + // SAFETY: guaranteed by the caller + unsafe { &*ptr.cast() } + } + /// Returns a mutable reference to the underlying [`bool`]. /// /// This is safe because the mutable reference guarantees that no other threads are @@ -922,14 +966,14 @@ impl AtomicBool { /// /// let mut atomic = AtomicBool::new(true); /// unsafe { - /// my_atomic_op(atomic.as_mut_ptr()); + /// my_atomic_op(atomic.as_ptr()); /// } /// # } /// ``` #[inline] #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")] - pub fn as_mut_ptr(&self) -> *mut bool { - self.v.get() as *mut bool + pub const fn as_ptr(&self) -> *mut bool { + self.v.get().cast() } /// Fetches the value, and applies a function to it that returns an optional @@ -1017,6 +1061,50 @@ impl<T> AtomicPtr<T> { AtomicPtr { p: UnsafeCell::new(p) } } + /// Creates a new `AtomicPtr` from a pointer. + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_from_ptr, pointer_is_aligned)] + /// use std::sync::atomic::{self, AtomicPtr}; + /// use std::mem::align_of; + /// + /// // Get a pointer to an allocated value + /// let ptr: *mut *mut u8 = Box::into_raw(Box::new(std::ptr::null_mut())); + /// + /// assert!(ptr.is_aligned_to(align_of::<AtomicPtr<u8>>())); + /// + /// { + /// // Create an atomic view of the allocated value + /// let atomic = unsafe { AtomicPtr::from_ptr(ptr) }; + /// + /// // Use `atomic` for atomic operations, possibly share it with other threads + /// atomic.store(std::ptr::NonNull::dangling().as_ptr(), atomic::Ordering::Relaxed); + /// } + /// + /// // It's ok to non-atomically access the value behind `ptr`, + /// // since the reference to the atomic ended its lifetime in the block above + /// assert!(!unsafe { *ptr }.is_null()); + /// + /// // Deallocate the value + /// unsafe { drop(Box::from_raw(ptr)) } + /// ``` + /// + /// # Safety + /// + /// * `ptr` must be aligned to `align_of::<AtomicPtr<T>>()` (note that on some platforms this can be bigger than `align_of::<*mut T>()`). + /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`. + /// * The value behind `ptr` must not be accessed through non-atomic operations for the whole lifetime `'a`. + /// + /// [valid]: crate::ptr#safety + #[unstable(feature = "atomic_from_ptr", issue = "108652")] + #[rustc_const_unstable(feature = "atomic_from_ptr", issue = "108652")] + pub const unsafe fn from_ptr<'a>(ptr: *mut *mut T) -> &'a AtomicPtr<T> { + // SAFETY: guaranteed by the caller + unsafe { &*ptr.cast() } + } + /// Returns a mutable reference to the underlying pointer. /// /// This is safe because the mutable reference guarantees that no other threads are @@ -1803,7 +1891,7 @@ impl<T> AtomicPtr<T> { /// /// ```ignore (extern-declaration) /// #![feature(atomic_mut_ptr)] - //// use std::sync::atomic::AtomicPtr; + /// use std::sync::atomic::AtomicPtr; /// /// extern "C" { /// fn my_atomic_op(arg: *mut *mut u32); @@ -1814,12 +1902,12 @@ impl<T> AtomicPtr<T> { /// /// // SAFETY: Safe as long as `my_atomic_op` is atomic. /// unsafe { - /// my_atomic_op(atomic.as_mut_ptr()); + /// my_atomic_op(atomic.as_ptr()); /// } /// ``` #[inline] #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")] - pub fn as_mut_ptr(&self) -> *mut *mut T { + pub const fn as_ptr(&self) -> *mut *mut T { self.p.get() } } @@ -1861,7 +1949,8 @@ macro_rules! if_not_8_bit { ($_:ident, $($tt:tt)*) => { $($tt)* }; } -#[cfg(target_has_atomic_load_store = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic_load_store))] +#[cfg_attr(bootstrap, cfg(target_has_atomic_load_store = "8"))] macro_rules! atomic_int { ($cfg_cas:meta, $cfg_align:meta, @@ -1957,6 +2046,53 @@ macro_rules! atomic_int { Self {v: UnsafeCell::new(v)} } + /// Creates a new reference to an atomic integer from a pointer. + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_from_ptr, pointer_is_aligned)] + #[doc = concat!($extra_feature, "use std::sync::atomic::{self, ", stringify!($atomic_type), "};")] + /// use std::mem::align_of; + /// + /// // Get a pointer to an allocated value + #[doc = concat!("let ptr: *mut ", stringify!($int_type), " = Box::into_raw(Box::new(0));")] + /// + #[doc = concat!("assert!(ptr.is_aligned_to(align_of::<", stringify!($atomic_type), ">()));")] + /// + /// { + /// // Create an atomic view of the allocated value + // SAFETY: this is a doc comment, tidy, it can't hurt you (also guaranteed by the construction of `ptr` and the assert above) + #[doc = concat!(" let atomic = unsafe {", stringify!($atomic_type), "::from_ptr(ptr) };")] + /// + /// // Use `atomic` for atomic operations, possibly share it with other threads + /// atomic.store(1, atomic::Ordering::Relaxed); + /// } + /// + /// // It's ok to non-atomically access the value behind `ptr`, + /// // since the reference to the atomic ended its lifetime in the block above + /// assert_eq!(unsafe { *ptr }, 1); + /// + /// // Deallocate the value + /// unsafe { drop(Box::from_raw(ptr)) } + /// ``` + /// + /// # Safety + /// + /// * `ptr` must be aligned to `align_of::<AtomicBool>()` (note that on some platforms this can be bigger than `align_of::<bool>()`). + #[doc = concat!(" * `ptr` must be aligned to `align_of::<", stringify!($atomic_type), ">()` (note that on some platforms this can be bigger than `align_of::<", stringify!($int_type), ">()`).")] + /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`. + /// * The value behind `ptr` must not be accessed through non-atomic operations for the whole lifetime `'a`. + /// + /// [valid]: crate::ptr#safety + #[unstable(feature = "atomic_from_ptr", issue = "108652")] + #[rustc_const_unstable(feature = "atomic_from_ptr", issue = "108652")] + pub const unsafe fn from_ptr<'a>(ptr: *mut $int_type) -> &'a $atomic_type { + // SAFETY: guaranteed by the caller + unsafe { &*ptr.cast() } + } + + /// Returns a mutable reference to the underlying integer. /// /// This is safe because the mutable reference guarantees that no other threads are @@ -2718,7 +2854,7 @@ macro_rules! atomic_int { /// /// // SAFETY: Safe as long as `my_atomic_op` is atomic. /// unsafe { - /// my_atomic_op(atomic.as_mut_ptr()); + /// my_atomic_op(atomic.as_ptr()); /// } /// # } /// ``` @@ -2726,7 +2862,7 @@ macro_rules! atomic_int { #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")] - pub fn as_mut_ptr(&self) -> *mut $int_type { + pub const fn as_ptr(&self) -> *mut $int_type { self.v.get() } } @@ -2988,7 +3124,8 @@ atomic_int_ptr_sized! { } #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] fn strongest_failure_ordering(order: Ordering) -> Ordering { match order { Release => Relaxed, @@ -3030,7 +3167,8 @@ unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T { } #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_swap`. @@ -3047,7 +3185,8 @@ unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { /// Returns the previous value (like __sync_fetch_and_add). #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_add`. @@ -3064,7 +3203,8 @@ unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { /// Returns the previous value (like __sync_fetch_and_sub). #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_sub`. @@ -3080,7 +3220,8 @@ unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { } #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_compare_exchange<T: Copy>( dst: *mut T, @@ -3115,7 +3256,8 @@ unsafe fn atomic_compare_exchange<T: Copy>( } #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_compare_exchange_weak<T: Copy>( dst: *mut T, @@ -3150,7 +3292,8 @@ unsafe fn atomic_compare_exchange_weak<T: Copy>( } #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_and` @@ -3166,7 +3309,8 @@ unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { } #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_nand` @@ -3182,7 +3326,8 @@ unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { } #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_or` @@ -3198,7 +3343,8 @@ unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { } #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_xor` @@ -3215,7 +3361,8 @@ unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { /// returns the max value (signed comparison) #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_max` @@ -3232,7 +3379,8 @@ unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { /// returns the min value (signed comparison) #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_min` @@ -3249,7 +3397,8 @@ unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { /// returns the max value (unsigned comparison) #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_umax` @@ -3266,7 +3415,8 @@ unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { /// returns the min value (unsigned comparison) #[inline] -#[cfg(target_has_atomic = "8")] +#[cfg_attr(not(bootstrap), cfg(target_has_atomic))] +#[cfg_attr(bootstrap, cfg(target_has_atomic = "8"))] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_umin` diff --git a/library/core/src/task/poll.rs b/library/core/src/task/poll.rs index 25b61c0e6..af5bf441b 100644 --- a/library/core/src/task/poll.rs +++ b/library/core/src/task/poll.rs @@ -45,6 +45,7 @@ impl<T> Poll<T> { /// assert_eq!(poll_some_len, Poll::Ready(13)); /// ``` #[stable(feature = "futures_api", since = "1.36.0")] + #[inline] pub fn map<U, F>(self, f: F) -> Poll<U> where F: FnOnce(T) -> U, @@ -144,6 +145,7 @@ impl<T, E> Poll<Result<T, E>> { /// assert_eq!(squared, Poll::Ready(Ok(144))); /// ``` #[stable(feature = "futures_api", since = "1.36.0")] + #[inline] pub fn map_ok<U, F>(self, f: F) -> Poll<Result<U, E>> where F: FnOnce(T) -> U, @@ -171,6 +173,7 @@ impl<T, E> Poll<Result<T, E>> { /// assert_eq!(res, Poll::Ready(Err(0))); /// ``` #[stable(feature = "futures_api", since = "1.36.0")] + #[inline] pub fn map_err<U, F>(self, f: F) -> Poll<Result<T, U>> where F: FnOnce(E) -> U, @@ -199,6 +202,7 @@ impl<T, E> Poll<Option<Result<T, E>>> { /// assert_eq!(squared, Poll::Ready(Some(Ok(144)))); /// ``` #[stable(feature = "poll_map", since = "1.51.0")] + #[inline] pub fn map_ok<U, F>(self, f: F) -> Poll<Option<Result<U, E>>> where F: FnOnce(T) -> U, @@ -228,6 +232,7 @@ impl<T, E> Poll<Option<Result<T, E>>> { /// assert_eq!(res, Poll::Ready(Some(Err(0)))); /// ``` #[stable(feature = "poll_map", since = "1.51.0")] + #[inline] pub fn map_err<U, F>(self, f: F) -> Poll<Option<Result<T, U>>> where F: FnOnce(E) -> U, diff --git a/library/core/src/task/wake.rs b/library/core/src/task/wake.rs index 89adfccd9..808825326 100644 --- a/library/core/src/task/wake.rs +++ b/library/core/src/task/wake.rs @@ -174,7 +174,7 @@ impl RawWakerVTable { /// Currently, `Context` only serves to provide access to a [`&Waker`](Waker) /// which can be used to wake the current task. #[stable(feature = "futures_api", since = "1.36.0")] -#[cfg_attr(not(bootstrap), lang = "Context")] +#[lang = "Context"] pub struct Context<'a> { waker: &'a Waker, // Ensure we future-proof against variance changes by forcing diff --git a/library/core/tests/array.rs b/library/core/tests/array.rs index f268fe3ae..5327e4f81 100644 --- a/library/core/tests/array.rs +++ b/library/core/tests/array.rs @@ -700,3 +700,28 @@ fn array_into_iter_rfold() { let s = it.rfold(10, |a, b| 10 * a + b); assert_eq!(s, 10432); } + +#[cfg(not(panic = "abort"))] +#[test] +fn array_map_drops_unmapped_elements_on_panic() { + struct DropCounter<'a>(usize, &'a AtomicUsize); + impl Drop for DropCounter<'_> { + fn drop(&mut self) { + self.1.fetch_add(1, Ordering::SeqCst); + } + } + + const MAX: usize = 11; + for panic_after in 0..MAX { + let counter = AtomicUsize::new(0); + let a = array::from_fn::<_, 11, _>(|i| DropCounter(i, &counter)); + let success = std::panic::catch_unwind(|| { + let _ = a.map(|x| { + assert!(x.0 < panic_after); + assert_eq!(counter.load(Ordering::SeqCst), x.0); + }); + }); + assert!(success.is_err()); + assert_eq!(counter.load(Ordering::SeqCst), MAX); + } +} diff --git a/library/core/tests/iter/adapters/mod.rs b/library/core/tests/iter/adapters/mod.rs index ffd5f3857..ca3463aa7 100644 --- a/library/core/tests/iter/adapters/mod.rs +++ b/library/core/tests/iter/adapters/mod.rs @@ -24,7 +24,7 @@ mod zip; use core::cell::Cell; -/// An iterator that panics whenever `next` or next_back` is called +/// An iterator that panics whenever `next` or `next_back` is called /// after `None` has already been returned. This does not violate /// `Iterator`'s contract. Used to test that iterator adapters don't /// poll their inner iterators after exhausting them. diff --git a/library/core/tests/iter/range.rs b/library/core/tests/iter/range.rs index 84498a8ea..0f91ffe2d 100644 --- a/library/core/tests/iter/range.rs +++ b/library/core/tests/iter/range.rs @@ -26,7 +26,6 @@ fn test_range() { #[test] fn test_char_range() { - use std::char; // Miri is too slow let from = if cfg!(miri) { char::from_u32(0xD800 - 10).unwrap() } else { '\0' }; let to = if cfg!(miri) { char::from_u32(0xDFFF + 10).unwrap() } else { char::MAX }; diff --git a/library/core/tests/iter/traits/iterator.rs b/library/core/tests/iter/traits/iterator.rs index 37345c1d3..62566a950 100644 --- a/library/core/tests/iter/traits/iterator.rs +++ b/library/core/tests/iter/traits/iterator.rs @@ -582,6 +582,9 @@ fn test_next_chunk() { assert_eq!(it.next_chunk().unwrap(), []); assert_eq!(it.next_chunk().unwrap(), [4, 5, 6, 7, 8, 9]); assert_eq!(it.next_chunk::<4>().unwrap_err().as_slice(), &[10, 11]); + + let mut it = std::iter::repeat_with(|| panic!()); + assert_eq!(it.next_chunk::<0>().unwrap(), []); } // just tests by whether or not this compiles diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs index 42a26ae16..ccb7be68e 100644 --- a/library/core/tests/lib.rs +++ b/library/core/tests/lib.rs @@ -66,6 +66,8 @@ #![feature(try_trait_v2)] #![feature(slice_internals)] #![feature(slice_partition_dedup)] +#![feature(ip)] +#![feature(ip_in_core)] #![feature(iter_advance_by)] #![feature(iter_array_chunks)] #![feature(iter_collect_into)] @@ -77,6 +79,9 @@ #![feature(iter_repeat_n)] #![feature(iterator_try_collect)] #![feature(iterator_try_reduce)] +#![feature(const_ip)] +#![feature(const_ipv4)] +#![feature(const_ipv6)] #![feature(const_mut_refs)] #![feature(const_pin)] #![feature(const_waker)] @@ -135,6 +140,7 @@ mod lazy; mod macros; mod manually_drop; mod mem; +mod net; mod nonzero; mod num; mod ops; diff --git a/library/core/tests/net/ip_addr.rs b/library/core/tests/net/ip_addr.rs new file mode 100644 index 000000000..5a6ac08c0 --- /dev/null +++ b/library/core/tests/net/ip_addr.rs @@ -0,0 +1,1035 @@ +use super::{sa4, sa6}; +use core::net::{ + IpAddr, Ipv4Addr, Ipv6Addr, Ipv6MulticastScope, SocketAddr, SocketAddrV4, SocketAddrV6, +}; +use core::str::FromStr; + +#[test] +fn test_from_str_ipv4() { + assert_eq!(Ok(Ipv4Addr::new(127, 0, 0, 1)), "127.0.0.1".parse()); + assert_eq!(Ok(Ipv4Addr::new(255, 255, 255, 255)), "255.255.255.255".parse()); + assert_eq!(Ok(Ipv4Addr::new(0, 0, 0, 0)), "0.0.0.0".parse()); + + // out of range + let none: Option<Ipv4Addr> = "256.0.0.1".parse().ok(); + assert_eq!(None, none); + // too short + let none: Option<Ipv4Addr> = "255.0.0".parse().ok(); + assert_eq!(None, none); + // too long + let none: Option<Ipv4Addr> = "255.0.0.1.2".parse().ok(); + assert_eq!(None, none); + // no number between dots + let none: Option<Ipv4Addr> = "255.0..1".parse().ok(); + assert_eq!(None, none); + // octal + let none: Option<Ipv4Addr> = "255.0.0.01".parse().ok(); + assert_eq!(None, none); + // octal zero + let none: Option<Ipv4Addr> = "255.0.0.00".parse().ok(); + assert_eq!(None, none); + let none: Option<Ipv4Addr> = "255.0.00.0".parse().ok(); + assert_eq!(None, none); +} + +#[test] +fn test_from_str_ipv6() { + assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), "0:0:0:0:0:0:0:0".parse()); + assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), "0:0:0:0:0:0:0:1".parse()); + + assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), "::1".parse()); + assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), "::".parse()); + + assert_eq!(Ok(Ipv6Addr::new(0x2a02, 0x6b8, 0, 0, 0, 0, 0x11, 0x11)), "2a02:6b8::11:11".parse()); + + // too long group + let none: Option<Ipv6Addr> = "::00000".parse().ok(); + assert_eq!(None, none); + // too short + let none: Option<Ipv6Addr> = "1:2:3:4:5:6:7".parse().ok(); + assert_eq!(None, none); + // too long + let none: Option<Ipv6Addr> = "1:2:3:4:5:6:7:8:9".parse().ok(); + assert_eq!(None, none); + // triple colon + let none: Option<Ipv6Addr> = "1:2:::6:7:8".parse().ok(); + assert_eq!(None, none); + // two double colons + let none: Option<Ipv6Addr> = "1:2::6::8".parse().ok(); + assert_eq!(None, none); + // `::` indicating zero groups of zeros + let none: Option<Ipv6Addr> = "1:2:3:4::5:6:7:8".parse().ok(); + assert_eq!(None, none); +} + +#[test] +fn test_from_str_ipv4_in_ipv6() { + assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 49152, 545)), "::192.0.2.33".parse()); + assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0xFFFF, 49152, 545)), "::FFFF:192.0.2.33".parse()); + assert_eq!( + Ok(Ipv6Addr::new(0x64, 0xff9b, 0, 0, 0, 0, 49152, 545)), + "64:ff9b::192.0.2.33".parse() + ); + assert_eq!( + Ok(Ipv6Addr::new(0x2001, 0xdb8, 0x122, 0xc000, 0x2, 0x2100, 49152, 545)), + "2001:db8:122:c000:2:2100:192.0.2.33".parse() + ); + + // colon after v4 + let none: Option<Ipv4Addr> = "::127.0.0.1:".parse().ok(); + assert_eq!(None, none); + // not enough groups + let none: Option<Ipv6Addr> = "1:2:3:4:5:127.0.0.1".parse().ok(); + assert_eq!(None, none); + // too many groups + let none: Option<Ipv6Addr> = "1:2:3:4:5:6:7:127.0.0.1".parse().ok(); + assert_eq!(None, none); +} + +#[test] +fn test_from_str_socket_addr() { + assert_eq!(Ok(sa4(Ipv4Addr::new(77, 88, 21, 11), 80)), "77.88.21.11:80".parse()); + assert_eq!(Ok(SocketAddrV4::new(Ipv4Addr::new(77, 88, 21, 11), 80)), "77.88.21.11:80".parse()); + assert_eq!( + Ok(sa6(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53)), + "[2a02:6b8:0:1::1]:53".parse() + ); + assert_eq!( + Ok(SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53, 0, 0)), + "[2a02:6b8:0:1::1]:53".parse() + ); + assert_eq!(Ok(sa6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x7F00, 1), 22)), "[::127.0.0.1]:22".parse()); + assert_eq!( + Ok(SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x7F00, 1), 22, 0, 0)), + "[::127.0.0.1]:22".parse() + ); + + // without port + let none: Option<SocketAddr> = "127.0.0.1".parse().ok(); + assert_eq!(None, none); + // without port + let none: Option<SocketAddr> = "127.0.0.1:".parse().ok(); + assert_eq!(None, none); + // wrong brackets around v4 + let none: Option<SocketAddr> = "[127.0.0.1]:22".parse().ok(); + assert_eq!(None, none); + // port out of range + let none: Option<SocketAddr> = "127.0.0.1:123456".parse().ok(); + assert_eq!(None, none); +} + +#[test] +fn ipv4_addr_to_string() { + assert_eq!(Ipv4Addr::new(127, 0, 0, 1).to_string(), "127.0.0.1"); + // Short address + assert_eq!(Ipv4Addr::new(1, 1, 1, 1).to_string(), "1.1.1.1"); + // Long address + assert_eq!(Ipv4Addr::new(127, 127, 127, 127).to_string(), "127.127.127.127"); + + // Test padding + assert_eq!(format!("{:16}", Ipv4Addr::new(1, 1, 1, 1)), "1.1.1.1 "); + assert_eq!(format!("{:>16}", Ipv4Addr::new(1, 1, 1, 1)), " 1.1.1.1"); +} + +#[test] +fn ipv6_addr_to_string() { + // ipv4-mapped address + let a1 = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x280); + assert_eq!(a1.to_string(), "::ffff:192.0.2.128"); + + // ipv4-compatible address + let a1 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc000, 0x280); + assert_eq!(a1.to_string(), "::192.0.2.128"); + + // v6 address with no zero segments + assert_eq!(Ipv6Addr::new(8, 9, 10, 11, 12, 13, 14, 15).to_string(), "8:9:a:b:c:d:e:f"); + + // longest possible IPv6 length + assert_eq!( + Ipv6Addr::new(0x1111, 0x2222, 0x3333, 0x4444, 0x5555, 0x6666, 0x7777, 0x8888).to_string(), + "1111:2222:3333:4444:5555:6666:7777:8888" + ); + // padding + assert_eq!(format!("{:20}", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8)), "1:2:3:4:5:6:7:8 "); + assert_eq!(format!("{:>20}", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8)), " 1:2:3:4:5:6:7:8"); + + // reduce a single run of zeros + assert_eq!( + "ae::ffff:102:304", + Ipv6Addr::new(0xae, 0, 0, 0, 0, 0xffff, 0x0102, 0x0304).to_string() + ); + + // don't reduce just a single zero segment + assert_eq!("1:2:3:4:5:6:0:8", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 0, 8).to_string()); + + // 'any' address + assert_eq!("::", Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).to_string()); + + // loopback address + assert_eq!("::1", Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_string()); + + // ends in zeros + assert_eq!("1::", Ipv6Addr::new(1, 0, 0, 0, 0, 0, 0, 0).to_string()); + + // two runs of zeros, second one is longer + assert_eq!("1:0:0:4::8", Ipv6Addr::new(1, 0, 0, 4, 0, 0, 0, 8).to_string()); + + // two runs of zeros, equal length + assert_eq!("1::4:5:0:0:8", Ipv6Addr::new(1, 0, 0, 4, 5, 0, 0, 8).to_string()); + + // don't prefix `0x` to each segment in `dbg!`. + assert_eq!("1::4:5:0:0:8", &format!("{:#?}", Ipv6Addr::new(1, 0, 0, 4, 5, 0, 0, 8))); +} + +#[test] +fn ipv4_to_ipv6() { + assert_eq!( + Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678), + Ipv4Addr::new(0x12, 0x34, 0x56, 0x78).to_ipv6_mapped() + ); + assert_eq!( + Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678), + Ipv4Addr::new(0x12, 0x34, 0x56, 0x78).to_ipv6_compatible() + ); +} + +#[test] +fn ipv6_to_ipv4_mapped() { + assert_eq!( + Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678).to_ipv4_mapped(), + Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78)) + ); + assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678).to_ipv4_mapped(), None); +} + +#[test] +fn ipv6_to_ipv4() { + assert_eq!( + Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678).to_ipv4(), + Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78)) + ); + assert_eq!( + Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678).to_ipv4(), + Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78)) + ); + assert_eq!(Ipv6Addr::new(0, 0, 1, 0, 0, 0, 0x1234, 0x5678).to_ipv4(), None); +} + +#[test] +fn ip_properties() { + macro_rules! ip { + ($s:expr) => { + IpAddr::from_str($s).unwrap() + }; + } + + macro_rules! check { + ($s:expr) => { + check!($s, 0); + }; + + ($s:expr, $mask:expr) => {{ + let unspec: u8 = 1 << 0; + let loopback: u8 = 1 << 1; + let global: u8 = 1 << 2; + let multicast: u8 = 1 << 3; + let doc: u8 = 1 << 4; + let benchmarking: u8 = 1 << 5; + + if ($mask & unspec) == unspec { + assert!(ip!($s).is_unspecified()); + } else { + assert!(!ip!($s).is_unspecified()); + } + + if ($mask & loopback) == loopback { + assert!(ip!($s).is_loopback()); + } else { + assert!(!ip!($s).is_loopback()); + } + + if ($mask & global) == global { + assert!(ip!($s).is_global()); + } else { + assert!(!ip!($s).is_global()); + } + + if ($mask & multicast) == multicast { + assert!(ip!($s).is_multicast()); + } else { + assert!(!ip!($s).is_multicast()); + } + + if ($mask & doc) == doc { + assert!(ip!($s).is_documentation()); + } else { + assert!(!ip!($s).is_documentation()); + } + + if ($mask & benchmarking) == benchmarking { + assert!(ip!($s).is_benchmarking()); + } else { + assert!(!ip!($s).is_benchmarking()); + } + }}; + } + + let unspec: u8 = 1 << 0; + let loopback: u8 = 1 << 1; + let global: u8 = 1 << 2; + let multicast: u8 = 1 << 3; + let doc: u8 = 1 << 4; + let benchmarking: u8 = 1 << 5; + + check!("0.0.0.0", unspec); + check!("0.0.0.1"); + check!("0.1.0.0"); + check!("10.9.8.7"); + check!("127.1.2.3", loopback); + check!("172.31.254.253"); + check!("169.254.253.242"); + check!("192.0.2.183", doc); + check!("192.1.2.183", global); + check!("192.168.254.253"); + check!("198.51.100.0", doc); + check!("203.0.113.0", doc); + check!("203.2.113.0", global); + check!("224.0.0.0", global | multicast); + check!("239.255.255.255", global | multicast); + check!("255.255.255.255"); + // make sure benchmarking addresses are not global + check!("198.18.0.0", benchmarking); + check!("198.18.54.2", benchmarking); + check!("198.19.255.255", benchmarking); + // make sure addresses reserved for protocol assignment are not global + check!("192.0.0.0"); + check!("192.0.0.255"); + check!("192.0.0.100"); + // make sure reserved addresses are not global + check!("240.0.0.0"); + check!("251.54.1.76"); + check!("254.255.255.255"); + // make sure shared addresses are not global + check!("100.64.0.0"); + check!("100.127.255.255"); + check!("100.100.100.0"); + + check!("::", unspec); + check!("::1", loopback); + check!("::0.0.0.2", global); + check!("1::", global); + check!("fc00::"); + check!("fdff:ffff::"); + check!("fe80:ffff::"); + check!("febf:ffff::"); + check!("fec0::", global); + check!("ff01::", global | multicast); + check!("ff02::", global | multicast); + check!("ff03::", global | multicast); + check!("ff04::", global | multicast); + check!("ff05::", global | multicast); + check!("ff08::", global | multicast); + check!("ff0e::", global | multicast); + check!("2001:db8:85a3::8a2e:370:7334", doc); + check!("2001:2::ac32:23ff:21", benchmarking); + check!("102:304:506:708:90a:b0c:d0e:f10", global); +} + +#[test] +fn ipv4_properties() { + macro_rules! ip { + ($s:expr) => { + Ipv4Addr::from_str($s).unwrap() + }; + } + + macro_rules! check { + ($s:expr) => { + check!($s, 0); + }; + + ($s:expr, $mask:expr) => {{ + let unspec: u16 = 1 << 0; + let loopback: u16 = 1 << 1; + let private: u16 = 1 << 2; + let link_local: u16 = 1 << 3; + let global: u16 = 1 << 4; + let multicast: u16 = 1 << 5; + let broadcast: u16 = 1 << 6; + let documentation: u16 = 1 << 7; + let benchmarking: u16 = 1 << 8; + let reserved: u16 = 1 << 10; + let shared: u16 = 1 << 11; + + if ($mask & unspec) == unspec { + assert!(ip!($s).is_unspecified()); + } else { + assert!(!ip!($s).is_unspecified()); + } + + if ($mask & loopback) == loopback { + assert!(ip!($s).is_loopback()); + } else { + assert!(!ip!($s).is_loopback()); + } + + if ($mask & private) == private { + assert!(ip!($s).is_private()); + } else { + assert!(!ip!($s).is_private()); + } + + if ($mask & link_local) == link_local { + assert!(ip!($s).is_link_local()); + } else { + assert!(!ip!($s).is_link_local()); + } + + if ($mask & global) == global { + assert!(ip!($s).is_global()); + } else { + assert!(!ip!($s).is_global()); + } + + if ($mask & multicast) == multicast { + assert!(ip!($s).is_multicast()); + } else { + assert!(!ip!($s).is_multicast()); + } + + if ($mask & broadcast) == broadcast { + assert!(ip!($s).is_broadcast()); + } else { + assert!(!ip!($s).is_broadcast()); + } + + if ($mask & documentation) == documentation { + assert!(ip!($s).is_documentation()); + } else { + assert!(!ip!($s).is_documentation()); + } + + if ($mask & benchmarking) == benchmarking { + assert!(ip!($s).is_benchmarking()); + } else { + assert!(!ip!($s).is_benchmarking()); + } + + if ($mask & reserved) == reserved { + assert!(ip!($s).is_reserved()); + } else { + assert!(!ip!($s).is_reserved()); + } + + if ($mask & shared) == shared { + assert!(ip!($s).is_shared()); + } else { + assert!(!ip!($s).is_shared()); + } + }}; + } + + let unspec: u16 = 1 << 0; + let loopback: u16 = 1 << 1; + let private: u16 = 1 << 2; + let link_local: u16 = 1 << 3; + let global: u16 = 1 << 4; + let multicast: u16 = 1 << 5; + let broadcast: u16 = 1 << 6; + let documentation: u16 = 1 << 7; + let benchmarking: u16 = 1 << 8; + let reserved: u16 = 1 << 10; + let shared: u16 = 1 << 11; + + check!("0.0.0.0", unspec); + check!("0.0.0.1"); + check!("0.1.0.0"); + check!("10.9.8.7", private); + check!("127.1.2.3", loopback); + check!("172.31.254.253", private); + check!("169.254.253.242", link_local); + check!("192.0.2.183", documentation); + check!("192.1.2.183", global); + check!("192.168.254.253", private); + check!("198.51.100.0", documentation); + check!("203.0.113.0", documentation); + check!("203.2.113.0", global); + check!("224.0.0.0", global | multicast); + check!("239.255.255.255", global | multicast); + check!("255.255.255.255", broadcast); + check!("198.18.0.0", benchmarking); + check!("198.18.54.2", benchmarking); + check!("198.19.255.255", benchmarking); + check!("192.0.0.0"); + check!("192.0.0.255"); + check!("192.0.0.100"); + check!("240.0.0.0", reserved); + check!("251.54.1.76", reserved); + check!("254.255.255.255", reserved); + check!("100.64.0.0", shared); + check!("100.127.255.255", shared); + check!("100.100.100.0", shared); +} + +#[test] +fn ipv6_properties() { + macro_rules! ip { + ($s:expr) => { + Ipv6Addr::from_str($s).unwrap() + }; + } + + macro_rules! check { + ($s:expr, &[$($octet:expr),*], $mask:expr) => { + assert_eq!($s, ip!($s).to_string()); + let octets = &[$($octet),*]; + assert_eq!(&ip!($s).octets(), octets); + assert_eq!(Ipv6Addr::from(*octets), ip!($s)); + + let unspecified: u32 = 1 << 0; + let loopback: u32 = 1 << 1; + let unique_local: u32 = 1 << 2; + let global: u32 = 1 << 3; + let unicast_link_local: u32 = 1 << 4; + let unicast_global: u32 = 1 << 7; + let documentation: u32 = 1 << 8; + let benchmarking: u32 = 1 << 16; + let multicast_interface_local: u32 = 1 << 9; + let multicast_link_local: u32 = 1 << 10; + let multicast_realm_local: u32 = 1 << 11; + let multicast_admin_local: u32 = 1 << 12; + let multicast_site_local: u32 = 1 << 13; + let multicast_organization_local: u32 = 1 << 14; + let multicast_global: u32 = 1 << 15; + let multicast: u32 = multicast_interface_local + | multicast_admin_local + | multicast_global + | multicast_link_local + | multicast_realm_local + | multicast_site_local + | multicast_organization_local; + + if ($mask & unspecified) == unspecified { + assert!(ip!($s).is_unspecified()); + } else { + assert!(!ip!($s).is_unspecified()); + } + if ($mask & loopback) == loopback { + assert!(ip!($s).is_loopback()); + } else { + assert!(!ip!($s).is_loopback()); + } + if ($mask & unique_local) == unique_local { + assert!(ip!($s).is_unique_local()); + } else { + assert!(!ip!($s).is_unique_local()); + } + if ($mask & global) == global { + assert!(ip!($s).is_global()); + } else { + assert!(!ip!($s).is_global()); + } + if ($mask & unicast_link_local) == unicast_link_local { + assert!(ip!($s).is_unicast_link_local()); + } else { + assert!(!ip!($s).is_unicast_link_local()); + } + if ($mask & unicast_global) == unicast_global { + assert!(ip!($s).is_unicast_global()); + } else { + assert!(!ip!($s).is_unicast_global()); + } + if ($mask & documentation) == documentation { + assert!(ip!($s).is_documentation()); + } else { + assert!(!ip!($s).is_documentation()); + } + if ($mask & benchmarking) == benchmarking { + assert!(ip!($s).is_benchmarking()); + } else { + assert!(!ip!($s).is_benchmarking()); + } + if ($mask & multicast) != 0 { + assert!(ip!($s).multicast_scope().is_some()); + assert!(ip!($s).is_multicast()); + } else { + assert!(ip!($s).multicast_scope().is_none()); + assert!(!ip!($s).is_multicast()); + } + if ($mask & multicast_interface_local) == multicast_interface_local { + assert_eq!(ip!($s).multicast_scope().unwrap(), + Ipv6MulticastScope::InterfaceLocal); + } + if ($mask & multicast_link_local) == multicast_link_local { + assert_eq!(ip!($s).multicast_scope().unwrap(), + Ipv6MulticastScope::LinkLocal); + } + if ($mask & multicast_realm_local) == multicast_realm_local { + assert_eq!(ip!($s).multicast_scope().unwrap(), + Ipv6MulticastScope::RealmLocal); + } + if ($mask & multicast_admin_local) == multicast_admin_local { + assert_eq!(ip!($s).multicast_scope().unwrap(), + Ipv6MulticastScope::AdminLocal); + } + if ($mask & multicast_site_local) == multicast_site_local { + assert_eq!(ip!($s).multicast_scope().unwrap(), + Ipv6MulticastScope::SiteLocal); + } + if ($mask & multicast_organization_local) == multicast_organization_local { + assert_eq!(ip!($s).multicast_scope().unwrap(), + Ipv6MulticastScope::OrganizationLocal); + } + if ($mask & multicast_global) == multicast_global { + assert_eq!(ip!($s).multicast_scope().unwrap(), + Ipv6MulticastScope::Global); + } + } + } + + let unspecified: u32 = 1 << 0; + let loopback: u32 = 1 << 1; + let unique_local: u32 = 1 << 2; + let global: u32 = 1 << 3; + let unicast_link_local: u32 = 1 << 4; + let unicast_global: u32 = 1 << 7; + let documentation: u32 = 1 << 8; + let benchmarking: u32 = 1 << 16; + let multicast_interface_local: u32 = 1 << 9; + let multicast_link_local: u32 = 1 << 10; + let multicast_realm_local: u32 = 1 << 11; + let multicast_admin_local: u32 = 1 << 12; + let multicast_site_local: u32 = 1 << 13; + let multicast_organization_local: u32 = 1 << 14; + let multicast_global: u32 = 1 << 15; + + check!("::", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unspecified); + + check!("::1", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], loopback); + + check!("::0.0.0.2", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], global | unicast_global); + + check!("1::", &[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], global | unicast_global); + + check!( + "::ffff:127.0.0.1", + &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x7f, 0, 0, 1], + unicast_global + ); + + check!( + "64:ff9b:1::", + &[0, 0x64, 0xff, 0x9b, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + unicast_global + ); + + check!("100::", &[0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_global); + + check!("2001::", &[0x20, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_global); + + check!( + "2001:1::1", + &[0x20, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], + global | unicast_global + ); + + check!( + "2001:1::2", + &[0x20, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], + global | unicast_global + ); + + check!( + "2001:3::", + &[0x20, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + global | unicast_global + ); + + check!( + "2001:4:112::", + &[0x20, 1, 0, 4, 1, 0x12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + global | unicast_global + ); + + check!( + "2001:20::", + &[0x20, 1, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + global | unicast_global + ); + + check!("2001:30::", &[0x20, 1, 0, 0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_global); + + check!( + "2001:200::", + &[0x20, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + global | unicast_global + ); + + check!("fc00::", &[0xfc, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unique_local); + + check!( + "fdff:ffff::", + &[0xfd, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + unique_local + ); + + check!( + "fe80:ffff::", + &[0xfe, 0x80, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + unicast_link_local + ); + + check!("fe80::", &[0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_link_local); + + check!( + "febf:ffff::", + &[0xfe, 0xbf, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + unicast_link_local + ); + + check!("febf::", &[0xfe, 0xbf, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_link_local); + + check!( + "febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + &[ + 0xfe, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff + ], + unicast_link_local + ); + + check!( + "fe80::ffff:ffff:ffff:ffff", + &[ + 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff + ], + unicast_link_local + ); + + check!( + "fe80:0:0:1::", + &[0xfe, 0x80, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + unicast_link_local + ); + + check!( + "fec0::", + &[0xfe, 0xc0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + unicast_global | global + ); + + check!( + "ff01::", + &[0xff, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + multicast_interface_local | global + ); + + check!( + "ff02::", + &[0xff, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + multicast_link_local | global + ); + + check!( + "ff03::", + &[0xff, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + multicast_realm_local | global + ); + + check!( + "ff04::", + &[0xff, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + multicast_admin_local | global + ); + + check!( + "ff05::", + &[0xff, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + multicast_site_local | global + ); + + check!( + "ff08::", + &[0xff, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + multicast_organization_local | global + ); + + check!( + "ff0e::", + &[0xff, 0xe, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + multicast_global | global + ); + + check!( + "2001:db8:85a3::8a2e:370:7334", + &[0x20, 1, 0xd, 0xb8, 0x85, 0xa3, 0, 0, 0, 0, 0x8a, 0x2e, 3, 0x70, 0x73, 0x34], + documentation + ); + + check!( + "2001:2::ac32:23ff:21", + &[0x20, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0xac, 0x32, 0x23, 0xff, 0, 0x21], + benchmarking + ); + + check!( + "102:304:506:708:90a:b0c:d0e:f10", + &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + global | unicast_global + ); +} + +#[test] +fn test_ipv4_to_int() { + let a = Ipv4Addr::new(0x11, 0x22, 0x33, 0x44); + assert_eq!(u32::from(a), 0x11223344); +} + +#[test] +fn test_int_to_ipv4() { + let a = Ipv4Addr::new(0x11, 0x22, 0x33, 0x44); + assert_eq!(Ipv4Addr::from(0x11223344), a); +} + +#[test] +fn test_ipv6_to_int() { + let a = Ipv6Addr::new(0x1122, 0x3344, 0x5566, 0x7788, 0x99aa, 0xbbcc, 0xddee, 0xff11); + assert_eq!(u128::from(a), 0x112233445566778899aabbccddeeff11u128); +} + +#[test] +fn test_int_to_ipv6() { + let a = Ipv6Addr::new(0x1122, 0x3344, 0x5566, 0x7788, 0x99aa, 0xbbcc, 0xddee, 0xff11); + assert_eq!(Ipv6Addr::from(0x112233445566778899aabbccddeeff11u128), a); +} + +#[test] +fn ipv4_from_constructors() { + assert_eq!(Ipv4Addr::LOCALHOST, Ipv4Addr::new(127, 0, 0, 1)); + assert!(Ipv4Addr::LOCALHOST.is_loopback()); + assert_eq!(Ipv4Addr::UNSPECIFIED, Ipv4Addr::new(0, 0, 0, 0)); + assert!(Ipv4Addr::UNSPECIFIED.is_unspecified()); + assert_eq!(Ipv4Addr::BROADCAST, Ipv4Addr::new(255, 255, 255, 255)); + assert!(Ipv4Addr::BROADCAST.is_broadcast()); +} + +#[test] +fn ipv6_from_constructors() { + assert_eq!(Ipv6Addr::LOCALHOST, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)); + assert!(Ipv6Addr::LOCALHOST.is_loopback()); + assert_eq!(Ipv6Addr::UNSPECIFIED, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)); + assert!(Ipv6Addr::UNSPECIFIED.is_unspecified()); +} + +#[test] +fn ipv4_from_octets() { + assert_eq!(Ipv4Addr::from([127, 0, 0, 1]), Ipv4Addr::new(127, 0, 0, 1)) +} + +#[test] +fn ipv6_from_segments() { + let from_u16s = + Ipv6Addr::from([0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff]); + let new = Ipv6Addr::new(0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff); + assert_eq!(new, from_u16s); +} + +#[test] +fn ipv6_from_octets() { + let from_u16s = + Ipv6Addr::from([0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff]); + let from_u8s = Ipv6Addr::from([ + 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, + 0xff, + ]); + assert_eq!(from_u16s, from_u8s); +} + +#[test] +fn cmp() { + let v41 = Ipv4Addr::new(100, 64, 3, 3); + let v42 = Ipv4Addr::new(192, 0, 2, 2); + let v61 = "2001:db8:f00::1002".parse::<Ipv6Addr>().unwrap(); + let v62 = "2001:db8:f00::2001".parse::<Ipv6Addr>().unwrap(); + assert!(v41 < v42); + assert!(v61 < v62); + + assert_eq!(v41, IpAddr::V4(v41)); + assert_eq!(v61, IpAddr::V6(v61)); + assert!(v41 != IpAddr::V4(v42)); + assert!(v61 != IpAddr::V6(v62)); + + assert!(v41 < IpAddr::V4(v42)); + assert!(v61 < IpAddr::V6(v62)); + assert!(IpAddr::V4(v41) < v42); + assert!(IpAddr::V6(v61) < v62); + + assert!(v41 < IpAddr::V6(v61)); + assert!(IpAddr::V4(v41) < v61); +} + +#[test] +fn is_v4() { + let ip = IpAddr::V4(Ipv4Addr::new(100, 64, 3, 3)); + assert!(ip.is_ipv4()); + assert!(!ip.is_ipv6()); +} + +#[test] +fn is_v6() { + let ip = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678)); + assert!(!ip.is_ipv4()); + assert!(ip.is_ipv6()); +} + +#[test] +fn ipv4_const() { + // test that the methods of `Ipv4Addr` are usable in a const context + + const IP_ADDRESS: Ipv4Addr = Ipv4Addr::new(127, 0, 0, 1); + assert_eq!(IP_ADDRESS, Ipv4Addr::LOCALHOST); + + const OCTETS: [u8; 4] = IP_ADDRESS.octets(); + assert_eq!(OCTETS, [127, 0, 0, 1]); + + const IS_UNSPECIFIED: bool = IP_ADDRESS.is_unspecified(); + assert!(!IS_UNSPECIFIED); + + const IS_LOOPBACK: bool = IP_ADDRESS.is_loopback(); + assert!(IS_LOOPBACK); + + const IS_PRIVATE: bool = IP_ADDRESS.is_private(); + assert!(!IS_PRIVATE); + + const IS_LINK_LOCAL: bool = IP_ADDRESS.is_link_local(); + assert!(!IS_LINK_LOCAL); + + const IS_GLOBAL: bool = IP_ADDRESS.is_global(); + assert!(!IS_GLOBAL); + + const IS_SHARED: bool = IP_ADDRESS.is_shared(); + assert!(!IS_SHARED); + + const IS_BENCHMARKING: bool = IP_ADDRESS.is_benchmarking(); + assert!(!IS_BENCHMARKING); + + const IS_RESERVED: bool = IP_ADDRESS.is_reserved(); + assert!(!IS_RESERVED); + + const IS_MULTICAST: bool = IP_ADDRESS.is_multicast(); + assert!(!IS_MULTICAST); + + const IS_BROADCAST: bool = IP_ADDRESS.is_broadcast(); + assert!(!IS_BROADCAST); + + const IS_DOCUMENTATION: bool = IP_ADDRESS.is_documentation(); + assert!(!IS_DOCUMENTATION); + + const IP_V6_COMPATIBLE: Ipv6Addr = IP_ADDRESS.to_ipv6_compatible(); + assert_eq!( + IP_V6_COMPATIBLE, + Ipv6Addr::from([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 0, 0, 1]) + ); + + const IP_V6_MAPPED: Ipv6Addr = IP_ADDRESS.to_ipv6_mapped(); + assert_eq!( + IP_V6_MAPPED, + Ipv6Addr::from([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 127, 0, 0, 1]) + ); +} + +#[test] +fn ipv6_const() { + // test that the methods of `Ipv6Addr` are usable in a const context + + const IP_ADDRESS: Ipv6Addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + assert_eq!(IP_ADDRESS, Ipv6Addr::LOCALHOST); + + const SEGMENTS: [u16; 8] = IP_ADDRESS.segments(); + assert_eq!(SEGMENTS, [0, 0, 0, 0, 0, 0, 0, 1]); + + const OCTETS: [u8; 16] = IP_ADDRESS.octets(); + assert_eq!(OCTETS, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]); + + const IS_UNSPECIFIED: bool = IP_ADDRESS.is_unspecified(); + assert!(!IS_UNSPECIFIED); + + const IS_LOOPBACK: bool = IP_ADDRESS.is_loopback(); + assert!(IS_LOOPBACK); + + const IS_GLOBAL: bool = IP_ADDRESS.is_global(); + assert!(!IS_GLOBAL); + + const IS_UNIQUE_LOCAL: bool = IP_ADDRESS.is_unique_local(); + assert!(!IS_UNIQUE_LOCAL); + + const IS_UNICAST_LINK_LOCAL: bool = IP_ADDRESS.is_unicast_link_local(); + assert!(!IS_UNICAST_LINK_LOCAL); + + const IS_DOCUMENTATION: bool = IP_ADDRESS.is_documentation(); + assert!(!IS_DOCUMENTATION); + + const IS_BENCHMARKING: bool = IP_ADDRESS.is_benchmarking(); + assert!(!IS_BENCHMARKING); + + const IS_UNICAST_GLOBAL: bool = IP_ADDRESS.is_unicast_global(); + assert!(!IS_UNICAST_GLOBAL); + + const MULTICAST_SCOPE: Option<Ipv6MulticastScope> = IP_ADDRESS.multicast_scope(); + assert_eq!(MULTICAST_SCOPE, None); + + const IS_MULTICAST: bool = IP_ADDRESS.is_multicast(); + assert!(!IS_MULTICAST); + + const IP_V4: Option<Ipv4Addr> = IP_ADDRESS.to_ipv4(); + assert_eq!(IP_V4.unwrap(), Ipv4Addr::new(0, 0, 0, 1)); +} + +#[test] +fn ip_const() { + // test that the methods of `IpAddr` are usable in a const context + + const IP_ADDRESS: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST); + + const IS_UNSPECIFIED: bool = IP_ADDRESS.is_unspecified(); + assert!(!IS_UNSPECIFIED); + + const IS_LOOPBACK: bool = IP_ADDRESS.is_loopback(); + assert!(IS_LOOPBACK); + + const IS_GLOBAL: bool = IP_ADDRESS.is_global(); + assert!(!IS_GLOBAL); + + const IS_MULTICAST: bool = IP_ADDRESS.is_multicast(); + assert!(!IS_MULTICAST); + + const IS_IP_V4: bool = IP_ADDRESS.is_ipv4(); + assert!(IS_IP_V4); + + const IS_IP_V6: bool = IP_ADDRESS.is_ipv6(); + assert!(!IS_IP_V6); +} + +#[test] +fn structural_match() { + // test that all IP types can be structurally matched upon + + const IPV4: Ipv4Addr = Ipv4Addr::LOCALHOST; + match IPV4 { + Ipv4Addr::LOCALHOST => {} + _ => unreachable!(), + } + + const IPV6: Ipv6Addr = Ipv6Addr::LOCALHOST; + match IPV6 { + Ipv6Addr::LOCALHOST => {} + _ => unreachable!(), + } + + const IP: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST); + match IP { + IpAddr::V4(Ipv4Addr::LOCALHOST) => {} + _ => unreachable!(), + } +} diff --git a/library/core/tests/net/mod.rs b/library/core/tests/net/mod.rs new file mode 100644 index 000000000..8f17bbe55 --- /dev/null +++ b/library/core/tests/net/mod.rs @@ -0,0 +1,13 @@ +use core::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; + +mod ip_addr; +mod parser; +mod socket_addr; + +pub fn sa4(a: Ipv4Addr, p: u16) -> SocketAddr { + SocketAddr::V4(SocketAddrV4::new(a, p)) +} + +pub fn sa6(a: Ipv6Addr, p: u16) -> SocketAddr { + SocketAddr::V6(SocketAddrV6::new(a, p, 0, 0)) +} diff --git a/library/std/src/net/parser/tests.rs b/library/core/tests/net/parser.rs index 6d2d48eca..36b87d7c1 100644 --- a/library/std/src/net/parser/tests.rs +++ b/library/core/tests/net/parser.rs @@ -1,6 +1,6 @@ // FIXME: These tests are all excellent candidates for AFL fuzz testing -use crate::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; -use crate::str::FromStr; +use core::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; +use core::str::FromStr; const PORT: u16 = 8080; const SCOPE_ID: u32 = 1337; diff --git a/library/core/tests/net/socket_addr.rs b/library/core/tests/net/socket_addr.rs new file mode 100644 index 000000000..68c7cd94d --- /dev/null +++ b/library/core/tests/net/socket_addr.rs @@ -0,0 +1,233 @@ +use core::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; + +#[test] +fn ipv4_socket_addr_to_string() { + // Shortest possible IPv4 length. + assert_eq!(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), 0).to_string(), "0.0.0.0:0"); + + // Longest possible IPv4 length. + assert_eq!( + SocketAddrV4::new(Ipv4Addr::new(255, 255, 255, 255), u16::MAX).to_string(), + "255.255.255.255:65535" + ); + + // Test padding. + assert_eq!( + &format!("{:16}", SocketAddrV4::new(Ipv4Addr::new(1, 1, 1, 1), 53)), + "1.1.1.1:53 " + ); + assert_eq!( + &format!("{:>16}", SocketAddrV4::new(Ipv4Addr::new(1, 1, 1, 1), 53)), + " 1.1.1.1:53" + ); +} + +#[test] +fn ipv6_socket_addr_to_string() { + // IPv4-mapped address. + assert_eq!( + SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x280), 8080, 0, 0) + .to_string(), + "[::ffff:192.0.2.128]:8080" + ); + + // IPv4-compatible address. + assert_eq!( + SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc000, 0x280), 8080, 0, 0).to_string(), + "[::192.0.2.128]:8080" + ); + + // IPv6 address with no zero segments. + assert_eq!( + SocketAddrV6::new(Ipv6Addr::new(8, 9, 10, 11, 12, 13, 14, 15), 80, 0, 0).to_string(), + "[8:9:a:b:c:d:e:f]:80" + ); + + // Shortest possible IPv6 length. + assert_eq!(SocketAddrV6::new(Ipv6Addr::UNSPECIFIED, 0, 0, 0).to_string(), "[::]:0"); + + // Longest possible IPv6 length. + assert_eq!( + SocketAddrV6::new( + Ipv6Addr::new(0x1111, 0x2222, 0x3333, 0x4444, 0x5555, 0x6666, 0x7777, 0x8888), + u16::MAX, + u32::MAX, + u32::MAX, + ) + .to_string(), + "[1111:2222:3333:4444:5555:6666:7777:8888%4294967295]:65535" + ); + + // Test padding. + assert_eq!( + &format!("{:22}", SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9, 0, 0)), + "[1:2:3:4:5:6:7:8]:9 " + ); + assert_eq!( + &format!("{:>22}", SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9, 0, 0)), + " [1:2:3:4:5:6:7:8]:9" + ); +} + +#[test] +fn set_ip() { + fn ip4(low: u8) -> Ipv4Addr { + Ipv4Addr::new(77, 88, 21, low) + } + fn ip6(low: u16) -> Ipv6Addr { + Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, low) + } + + let mut v4 = SocketAddrV4::new(ip4(11), 80); + assert_eq!(v4.ip(), &ip4(11)); + v4.set_ip(ip4(12)); + assert_eq!(v4.ip(), &ip4(12)); + + let mut addr = SocketAddr::V4(v4); + assert_eq!(addr.ip(), IpAddr::V4(ip4(12))); + addr.set_ip(IpAddr::V4(ip4(13))); + assert_eq!(addr.ip(), IpAddr::V4(ip4(13))); + addr.set_ip(IpAddr::V6(ip6(14))); + assert_eq!(addr.ip(), IpAddr::V6(ip6(14))); + + let mut v6 = SocketAddrV6::new(ip6(1), 80, 0, 0); + assert_eq!(v6.ip(), &ip6(1)); + v6.set_ip(ip6(2)); + assert_eq!(v6.ip(), &ip6(2)); + + let mut addr = SocketAddr::V6(v6); + assert_eq!(addr.ip(), IpAddr::V6(ip6(2))); + addr.set_ip(IpAddr::V6(ip6(3))); + assert_eq!(addr.ip(), IpAddr::V6(ip6(3))); + addr.set_ip(IpAddr::V4(ip4(4))); + assert_eq!(addr.ip(), IpAddr::V4(ip4(4))); +} + +#[test] +fn set_port() { + let mut v4 = SocketAddrV4::new(Ipv4Addr::new(77, 88, 21, 11), 80); + assert_eq!(v4.port(), 80); + v4.set_port(443); + assert_eq!(v4.port(), 443); + + let mut addr = SocketAddr::V4(v4); + assert_eq!(addr.port(), 443); + addr.set_port(8080); + assert_eq!(addr.port(), 8080); + + let mut v6 = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 80, 0, 0); + assert_eq!(v6.port(), 80); + v6.set_port(443); + assert_eq!(v6.port(), 443); + + let mut addr = SocketAddr::V6(v6); + assert_eq!(addr.port(), 443); + addr.set_port(8080); + assert_eq!(addr.port(), 8080); +} + +#[test] +fn set_flowinfo() { + let mut v6 = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 80, 10, 0); + assert_eq!(v6.flowinfo(), 10); + v6.set_flowinfo(20); + assert_eq!(v6.flowinfo(), 20); +} + +#[test] +fn set_scope_id() { + let mut v6 = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 80, 0, 10); + assert_eq!(v6.scope_id(), 10); + v6.set_scope_id(20); + assert_eq!(v6.scope_id(), 20); +} + +#[test] +fn is_v4() { + let v4 = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(77, 88, 21, 11), 80)); + assert!(v4.is_ipv4()); + assert!(!v4.is_ipv6()); +} + +#[test] +fn is_v6() { + let v6 = SocketAddr::V6(SocketAddrV6::new( + Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), + 80, + 10, + 0, + )); + assert!(!v6.is_ipv4()); + assert!(v6.is_ipv6()); +} + +#[test] +fn socket_v4_to_str() { + let socket = SocketAddrV4::new(Ipv4Addr::new(192, 168, 0, 1), 8080); + + assert_eq!(format!("{socket}"), "192.168.0.1:8080"); + assert_eq!(format!("{socket:<20}"), "192.168.0.1:8080 "); + assert_eq!(format!("{socket:>20}"), " 192.168.0.1:8080"); + assert_eq!(format!("{socket:^20}"), " 192.168.0.1:8080 "); + assert_eq!(format!("{socket:.10}"), "192.168.0."); +} + +#[test] +fn socket_v6_to_str() { + let mut socket = SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53, 0, 0); + + assert_eq!(format!("{socket}"), "[2a02:6b8:0:1::1]:53"); + assert_eq!(format!("{socket:<24}"), "[2a02:6b8:0:1::1]:53 "); + assert_eq!(format!("{socket:>24}"), " [2a02:6b8:0:1::1]:53"); + assert_eq!(format!("{socket:^24}"), " [2a02:6b8:0:1::1]:53 "); + assert_eq!(format!("{socket:.15}"), "[2a02:6b8:0:1::"); + + socket.set_scope_id(5); + + assert_eq!(format!("{socket}"), "[2a02:6b8:0:1::1%5]:53"); + assert_eq!(format!("{socket:<24}"), "[2a02:6b8:0:1::1%5]:53 "); + assert_eq!(format!("{socket:>24}"), " [2a02:6b8:0:1::1%5]:53"); + assert_eq!(format!("{socket:^24}"), " [2a02:6b8:0:1::1%5]:53 "); + assert_eq!(format!("{socket:.18}"), "[2a02:6b8:0:1::1%5"); +} + +#[test] +fn compare() { + let v4_1 = "224.120.45.1:23456".parse::<SocketAddrV4>().unwrap(); + let v4_2 = "224.210.103.5:12345".parse::<SocketAddrV4>().unwrap(); + let v4_3 = "224.210.103.5:23456".parse::<SocketAddrV4>().unwrap(); + let v6_1 = "[2001:db8:f00::1002]:23456".parse::<SocketAddrV6>().unwrap(); + let v6_2 = "[2001:db8:f00::2001]:12345".parse::<SocketAddrV6>().unwrap(); + let v6_3 = "[2001:db8:f00::2001]:23456".parse::<SocketAddrV6>().unwrap(); + + // equality + assert_eq!(v4_1, v4_1); + assert_eq!(v6_1, v6_1); + assert_eq!(SocketAddr::V4(v4_1), SocketAddr::V4(v4_1)); + assert_eq!(SocketAddr::V6(v6_1), SocketAddr::V6(v6_1)); + assert!(v4_1 != v4_2); + assert!(v6_1 != v6_2); + + // compare different addresses + assert!(v4_1 < v4_2); + assert!(v6_1 < v6_2); + assert!(v4_2 > v4_1); + assert!(v6_2 > v6_1); + + // compare the same address with different ports + assert!(v4_2 < v4_3); + assert!(v6_2 < v6_3); + assert!(v4_3 > v4_2); + assert!(v6_3 > v6_2); + + // compare different addresses with the same port + assert!(v4_1 < v4_3); + assert!(v6_1 < v6_3); + assert!(v4_3 > v4_1); + assert!(v6_3 > v6_1); + + // compare with an inferred right-hand side + assert_eq!(v4_1, "224.120.45.1:23456".parse().unwrap()); + assert_eq!(v6_1, "[2001:db8:f00::1002]:23456".parse().unwrap()); + assert_eq!(SocketAddr::V4(v4_1), "224.120.45.1:23456".parse().unwrap()); +} diff --git a/library/core/tests/num/dec2flt/mod.rs b/library/core/tests/num/dec2flt/mod.rs index c4e105cba..a2b9bb551 100644 --- a/library/core/tests/num/dec2flt/mod.rs +++ b/library/core/tests/num/dec2flt/mod.rs @@ -15,7 +15,7 @@ macro_rules! test_literal { for input in inputs { assert_eq!(input.parse(), Ok(x64)); assert_eq!(input.parse(), Ok(x32)); - let neg_input = &format!("-{input}"); + let neg_input = format!("-{input}"); assert_eq!(neg_input.parse(), Ok(-x64)); assert_eq!(neg_input.parse(), Ok(-x32)); } diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs index 80d30f14c..c02cd99cc 100644 --- a/library/core/tests/ptr.rs +++ b/library/core/tests/ptr.rs @@ -25,7 +25,7 @@ fn test() { snd: isize, } let mut p = Pair { fst: 10, snd: 20 }; - let pptr: *mut Pair = &mut p; + let pptr: *mut Pair = addr_of_mut!(p); let iptr: *mut isize = pptr as *mut isize; assert_eq!(*iptr, 10); *iptr = 30; @@ -1070,8 +1070,8 @@ fn swap_copy_untyped() { let mut x = 5u8; let mut y = 6u8; - let ptr1 = &mut x as *mut u8 as *mut bool; - let ptr2 = &mut y as *mut u8 as *mut bool; + let ptr1 = addr_of_mut!(x).cast::<bool>(); + let ptr2 = addr_of_mut!(y).cast::<bool>(); unsafe { ptr::swap(ptr1, ptr2); diff --git a/library/portable-simd/crates/core_simd/examples/spectral_norm.rs b/library/portable-simd/crates/core_simd/examples/spectral_norm.rs index 012182e09..d576bd0cc 100644 --- a/library/portable-simd/crates/core_simd/examples/spectral_norm.rs +++ b/library/portable-simd/crates/core_simd/examples/spectral_norm.rs @@ -69,7 +69,7 @@ fn dot(x: &[f64], y: &[f64]) -> f64 { #[cfg(test)] #[test] fn test() { - assert_eq!(&format!("{:.9}", spectral_norm(100)), "1.274219991"); + assert_eq!(format!("{:.9}", spectral_norm(100)), "1.274219991"); } fn main() { diff --git a/library/portable-simd/crates/core_simd/src/vector.rs b/library/portable-simd/crates/core_simd/src/vector.rs index 78f56402e..d52d1ac4d 100644 --- a/library/portable-simd/crates/core_simd/src/vector.rs +++ b/library/portable-simd/crates/core_simd/src/vector.rs @@ -28,7 +28,7 @@ use crate::simd::{ /// let zm_add = a0.zip(a1).map(|(lhs, rhs)| lhs + rhs); /// let zm_mul = a0.zip(a1).map(|(lhs, rhs)| lhs * rhs); /// -/// // `Simd<T, N>` implements `From<[T; N]> +/// // `Simd<T, N>` implements `From<[T; N]>` /// let (v0, v1) = (Simd::from(a0), Simd::from(a1)); /// // Which means arrays implement `Into<Simd<T, N>>`. /// assert_eq!(v0 + v1, zm_add.into()); diff --git a/library/proc_macro/src/bridge/rpc.rs b/library/proc_macro/src/bridge/rpc.rs index e9d7a46c0..5b1bfb309 100644 --- a/library/proc_macro/src/bridge/rpc.rs +++ b/library/proc_macro/src/bridge/rpc.rs @@ -1,7 +1,6 @@ //! Serialization for client-server communication. use std::any::Any; -use std::char; use std::io::Write; use std::num::NonZeroU32; use std::str; diff --git a/library/proc_macro/src/lib.rs b/library/proc_macro/src/lib.rs index f0e4f5d8a..938935771 100644 --- a/library/proc_macro/src/lib.rs +++ b/library/proc_macro/src/lib.rs @@ -74,6 +74,7 @@ pub fn is_available() -> bool { /// /// This is both the input and output of `#[proc_macro]`, `#[proc_macro_attribute]` /// and `#[proc_macro_derive]` definitions. +#[rustc_diagnostic_item = "TokenStream"] #[stable(feature = "proc_macro_lib", since = "1.15.0")] #[derive(Clone)] pub struct TokenStream(Option<bridge::client::TokenStream>); @@ -581,7 +582,7 @@ impl fmt::Debug for Span { /// A line-column pair representing the start or end of a `Span`. #[unstable(feature = "proc_macro_span", issue = "54725")] -#[derive(Copy, Clone, Debug, PartialEq, Eq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub struct LineColumn { /// The 1-indexed line in the source file on which the span starts or ends (inclusive). #[unstable(feature = "proc_macro_span", issue = "54725")] diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml index adf521d9b..598a4bf92 100644 --- a/library/std/Cargo.toml +++ b/library/std/Cargo.toml @@ -15,8 +15,8 @@ cfg-if = { version = "1.0", features = ['rustc-dep-of-std'] } panic_unwind = { path = "../panic_unwind", optional = true } panic_abort = { path = "../panic_abort" } core = { path = "../core" } -libc = { version = "0.2.138", default-features = false, features = ['rustc-dep-of-std'] } -compiler_builtins = { version = "0.1.85" } +libc = { version = "0.2.139", default-features = false, features = ['rustc-dep-of-std'] } +compiler_builtins = { version = "0.1.87" } profiler_builtins = { path = "../profiler_builtins", optional = true } unwind = { path = "../unwind" } hashbrown = { version = "0.12", default-features = false, features = ['rustc-dep-of-std'] } @@ -43,7 +43,7 @@ dlmalloc = { version = "0.2.3", features = ['rustc-dep-of-std'] } fortanix-sgx-abi = { version = "0.5.0", features = ['rustc-dep-of-std'] } [target.'cfg(target_os = "hermit")'.dependencies] -hermit-abi = { version = "0.2.6", features = ['rustc-dep-of-std'] } +hermit-abi = { version = "0.3.0", features = ['rustc-dep-of-std'] } [target.wasm32-wasi.dependencies] wasi = { version = "0.11.0", features = ['rustc-dep-of-std'], default-features = false } diff --git a/library/std/build.rs b/library/std/build.rs index 8b1a06ee7..ea8796675 100644 --- a/library/std/build.rs +++ b/library/std/build.rs @@ -31,6 +31,7 @@ fn main() { || target.contains("espidf") || target.contains("solid") || target.contains("nintendo-3ds") + || target.contains("nto") { // These platforms don't have any special requirements. } else { diff --git a/library/std/src/collections/hash/map.rs b/library/std/src/collections/hash/map.rs index df4903588..742c4cc7c 100644 --- a/library/std/src/collections/hash/map.rs +++ b/library/std/src/collections/hash/map.rs @@ -238,7 +238,7 @@ impl<K, V> HashMap<K, V, RandomState> { /// /// The hash map will be able to hold at least `capacity` elements without /// reallocating. This method is allowed to allocate for more elements than - /// `capacity`. If `capacity` is 0, the hash set will not allocate. + /// `capacity`. If `capacity` is 0, the hash map will not allocate. /// /// # Examples /// diff --git a/library/std/src/f32.rs b/library/std/src/f32.rs index 4e3007624..6b1f0cba8 100644 --- a/library/std/src/f32.rs +++ b/library/std/src/f32.rs @@ -69,8 +69,8 @@ impl f32 { unsafe { intrinsics::ceilf32(self) } } - /// Returns the nearest integer to `self`. Round half-way cases away from - /// `0.0`. + /// Returns the nearest integer to `self`. If a value is half-way between two + /// integers, round away from `0.0`. /// /// # Examples /// diff --git a/library/std/src/f64.rs b/library/std/src/f64.rs index ec67fdad4..16359766b 100644 --- a/library/std/src/f64.rs +++ b/library/std/src/f64.rs @@ -69,8 +69,8 @@ impl f64 { unsafe { intrinsics::ceilf64(self) } } - /// Returns the nearest integer to `self`. Round half-way cases away from - /// `0.0`. + /// Returns the nearest integer to `self`. If a value is half-way between two + /// integers, round away from `0.0`. /// /// # Examples /// diff --git a/library/std/src/fs.rs b/library/std/src/fs.rs index 286ad68fd..c550378e7 100644 --- a/library/std/src/fs.rs +++ b/library/std/src/fs.rs @@ -334,6 +334,10 @@ impl File { /// /// See the [`OpenOptions::open`] method for more details. /// + /// If you only need to read the entire file contents, + /// consider [`std::fs::read()`][self::read] or + /// [`std::fs::read_to_string()`][self::read_to_string] instead. + /// /// # Errors /// /// This function will return an error if `path` does not already exist. @@ -343,9 +347,12 @@ impl File { /// /// ```no_run /// use std::fs::File; + /// use std::io::Read; /// /// fn main() -> std::io::Result<()> { /// let mut f = File::open("foo.txt")?; + /// let mut data = vec![]; + /// f.read_to_end(&mut data)?; /// Ok(()) /// } /// ``` @@ -361,16 +368,20 @@ impl File { /// /// Depending on the platform, this function may fail if the /// full directory path does not exist. - /// /// See the [`OpenOptions::open`] function for more details. /// + /// See also [`std::fs::write()`][self::write] for a simple function to + /// create a file with a given data. + /// /// # Examples /// /// ```no_run /// use std::fs::File; + /// use std::io::Write; /// /// fn main() -> std::io::Result<()> { /// let mut f = File::create("foo.txt")?; + /// f.write_all(&1234_u32.to_be_bytes())?; /// Ok(()) /// } /// ``` @@ -397,9 +408,11 @@ impl File { /// #![feature(file_create_new)] /// /// use std::fs::File; + /// use std::io::Write; /// /// fn main() -> std::io::Result<()> { /// let mut f = File::create_new("foo.txt")?; + /// f.write_all("Hello, world!".as_bytes())?; /// Ok(()) /// } /// ``` @@ -426,9 +439,11 @@ impl File { /// /// ```no_run /// use std::fs::File; + /// use std::io::Write; /// /// fn main() -> std::io::Result<()> { /// let mut f = File::options().append(true).open("example.log")?; + /// writeln!(&mut f, "new line")?; /// Ok(()) /// } /// ``` @@ -966,6 +981,9 @@ impl OpenOptions { /// In order for the file to be created, [`OpenOptions::write`] or /// [`OpenOptions::append`] access must be used. /// + /// See also [`std::fs::write()`][self::write] for a simple function to + /// create a file with a given data. + /// /// # Examples /// /// ```no_run diff --git a/library/std/src/fs/tests.rs b/library/std/src/fs/tests.rs index eb582be01..909d9bf40 100644 --- a/library/std/src/fs/tests.rs +++ b/library/std/src/fs/tests.rs @@ -12,6 +12,8 @@ use crate::time::{Duration, Instant}; use rand::RngCore; +#[cfg(target_os = "macos")] +use crate::ffi::{c_char, c_int}; #[cfg(unix)] use crate::os::unix::fs::symlink as symlink_dir; #[cfg(unix)] @@ -24,8 +26,6 @@ use crate::os::windows::fs::{symlink_dir, symlink_file}; use crate::sys::fs::symlink_junction; #[cfg(target_os = "macos")] use crate::sys::weak::weak; -#[cfg(target_os = "macos")] -use libc::{c_char, c_int}; macro_rules! check { ($e:expr) => { @@ -1595,3 +1595,19 @@ fn test_read_dir_infinite_loop() { // Check for duplicate errors assert!(dir.filter(|e| e.is_err()).take(2).count() < 2); } + +#[test] +fn rename_directory() { + let tmpdir = tmpdir(); + let old_path = tmpdir.join("foo/bar/baz"); + fs::create_dir_all(&old_path).unwrap(); + let test_file = &old_path.join("temp.txt"); + + File::create(test_file).unwrap(); + + let new_path = tmpdir.join("quux/blat"); + fs::create_dir_all(&new_path).unwrap(); + fs::rename(&old_path, &new_path.join("newdir")).unwrap(); + assert!(new_path.join("newdir").is_dir()); + assert!(new_path.join("newdir/temp.txt").exists()); +} diff --git a/library/std/src/io/error.rs b/library/std/src/io/error.rs index 3cabf2449..7f07e4fdd 100644 --- a/library/std/src/io/error.rs +++ b/library/std/src/io/error.rs @@ -88,12 +88,23 @@ impl From<alloc::ffi::NulError> for Error { // doesn't accidentally get printed. #[cfg_attr(test, derive(Debug))] enum ErrorData<C> { - Os(i32), + Os(RawOsError), Simple(ErrorKind), SimpleMessage(&'static SimpleMessage), Custom(C), } +/// The type of raw OS error codes returned by [`Error::raw_os_error`]. +/// +/// This is an [`i32`] on all currently supported platforms, but platforms +/// added in the future (such as UEFI) may use a different primitive type like +/// [`usize`]. Use `as`or [`into`] conversions where applicable to ensure maximum +/// portability. +/// +/// [`into`]: Into::into +#[unstable(feature = "raw_os_error_ty", issue = "107792")] +pub type RawOsError = i32; + // `#[repr(align(4))]` is probably redundant, it should have that value or // higher already. We include it just because repr_bitpacked.rs's encoding // requires an alignment >= 4 (note that `#[repr(align)]` will not reduce the @@ -579,7 +590,7 @@ impl Error { #[must_use] #[inline] pub fn last_os_error() -> Error { - Error::from_raw_os_error(sys::os::errno() as i32) + Error::from_raw_os_error(sys::os::errno()) } /// Creates a new instance of an [`Error`] from a particular OS error code. @@ -610,7 +621,7 @@ impl Error { #[stable(feature = "rust1", since = "1.0.0")] #[must_use] #[inline] - pub fn from_raw_os_error(code: i32) -> Error { + pub fn from_raw_os_error(code: RawOsError) -> Error { Error { repr: Repr::new_os(code) } } @@ -646,7 +657,7 @@ impl Error { #[stable(feature = "rust1", since = "1.0.0")] #[must_use] #[inline] - pub fn raw_os_error(&self) -> Option<i32> { + pub fn raw_os_error(&self) -> Option<RawOsError> { match self.repr.data() { ErrorData::Os(i) => Some(i), ErrorData::Custom(..) => None, diff --git a/library/std/src/io/error/repr_bitpacked.rs b/library/std/src/io/error/repr_bitpacked.rs index 358148405..f94f88bac 100644 --- a/library/std/src/io/error/repr_bitpacked.rs +++ b/library/std/src/io/error/repr_bitpacked.rs @@ -102,7 +102,7 @@ //! to use a pointer type to store something that may hold an integer, some of //! the time. -use super::{Custom, ErrorData, ErrorKind, SimpleMessage}; +use super::{Custom, ErrorData, ErrorKind, RawOsError, SimpleMessage}; use alloc::boxed::Box; use core::marker::PhantomData; use core::mem::{align_of, size_of}; @@ -172,7 +172,7 @@ impl Repr { } #[inline] - pub(super) fn new_os(code: i32) -> Self { + pub(super) fn new_os(code: RawOsError) -> Self { let utagged = ((code as usize) << 32) | TAG_OS; // Safety: `TAG_OS` is not zero, so the result of the `|` is not 0. let res = Self(unsafe { NonNull::new_unchecked(ptr::invalid_mut(utagged)) }, PhantomData); @@ -250,7 +250,7 @@ where let bits = ptr.as_ptr().addr(); match bits & TAG_MASK { TAG_OS => { - let code = ((bits as i64) >> 32) as i32; + let code = ((bits as i64) >> 32) as RawOsError; ErrorData::Os(code) } TAG_SIMPLE => { @@ -374,6 +374,9 @@ static_assert!((TAG_MASK + 1).is_power_of_two()); static_assert!(align_of::<SimpleMessage>() >= TAG_MASK + 1); static_assert!(align_of::<Custom>() >= TAG_MASK + 1); +// `RawOsError` must be an alias for `i32`. +const _: fn(RawOsError) -> i32 = |os| os; + static_assert!(@usize_eq: TAG_MASK & TAG_SIMPLE_MESSAGE, TAG_SIMPLE_MESSAGE); static_assert!(@usize_eq: TAG_MASK & TAG_CUSTOM, TAG_CUSTOM); static_assert!(@usize_eq: TAG_MASK & TAG_OS, TAG_OS); diff --git a/library/std/src/io/error/repr_unpacked.rs b/library/std/src/io/error/repr_unpacked.rs index d6ad55b99..093fde337 100644 --- a/library/std/src/io/error/repr_unpacked.rs +++ b/library/std/src/io/error/repr_unpacked.rs @@ -2,7 +2,7 @@ //! non-64bit targets, where the packed 64 bit representation wouldn't work, and //! would have no benefit. -use super::{Custom, ErrorData, ErrorKind, SimpleMessage}; +use super::{Custom, ErrorData, ErrorKind, RawOsError, SimpleMessage}; use alloc::boxed::Box; type Inner = ErrorData<Box<Custom>>; @@ -18,7 +18,7 @@ impl Repr { Self(Inner::Custom(b)) } #[inline] - pub(super) fn new_os(code: i32) -> Self { + pub(super) fn new_os(code: RawOsError) -> Self { Self(Inner::Os(code)) } #[inline] diff --git a/library/std/src/io/error/tests.rs b/library/std/src/io/error/tests.rs index 16c634e9a..36d52aef0 100644 --- a/library/std/src/io/error/tests.rs +++ b/library/std/src/io/error/tests.rs @@ -71,7 +71,7 @@ fn test_const() { #[test] fn test_os_packing() { - for code in -20i32..20i32 { + for code in -20..20 { let e = Error::from_raw_os_error(code); assert_eq!(e.raw_os_error(), Some(code)); assert_matches!( @@ -190,5 +190,5 @@ fn test_std_io_error_downcast() { let io_error = io_error.downcast::<E>().unwrap_err(); assert_eq!(SIMPLE_MESSAGE.kind, io_error.kind()); - assert_eq!(SIMPLE_MESSAGE.message, &*format!("{io_error}")); + assert_eq!(SIMPLE_MESSAGE.message, format!("{io_error}")); } diff --git a/library/std/src/io/mod.rs b/library/std/src/io/mod.rs index de528e853..b2b6d8613 100644 --- a/library/std/src/io/mod.rs +++ b/library/std/src/io/mod.rs @@ -262,6 +262,8 @@ use crate::sys_common::memchr; #[stable(feature = "bufwriter_into_parts", since = "1.56.0")] pub use self::buffered::WriterPanicked; +#[unstable(feature = "raw_os_error_ty", issue = "107792")] +pub use self::error::RawOsError; pub(crate) use self::stdio::attempt_print_to_stderr; #[unstable(feature = "internal_output_capture", issue = "none")] #[doc(no_inline, hidden)] diff --git a/library/std/src/keyword_docs.rs b/library/std/src/keyword_docs.rs index e35145c4a..203c490fa 100644 --- a/library/std/src/keyword_docs.rs +++ b/library/std/src/keyword_docs.rs @@ -1568,7 +1568,7 @@ mod static_keyword {} /// /// # Style conventions /// -/// Structs are always written in CamelCase, with few exceptions. While the trailing comma on a +/// Structs are always written in UpperCamelCase, with few exceptions. While the trailing comma on a /// struct's list of fields can be omitted, it's usually kept for convenience in adding and /// removing fields down the line. /// diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index a7e13f5b8..b62f3ad29 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -232,13 +232,13 @@ all(target_vendor = "fortanix", target_env = "sgx"), feature(slice_index_methods, coerce_unsized, sgx_platform) )] +#![cfg_attr(windows, feature(round_char_boundary))] // // Language features: #![feature(alloc_error_handler)] #![feature(allocator_internals)] #![feature(allow_internal_unsafe)] #![feature(allow_internal_unstable)] -#![feature(box_syntax)] #![feature(c_unwind)] #![feature(cfg_target_thread_local)] #![feature(concat_idents)] @@ -274,13 +274,9 @@ #![feature(utf8_chunks)] // // Library features (core): -#![feature(array_error_internals)] #![feature(atomic_mut_ptr)] -#![feature(char_error_internals)] #![feature(char_internals)] #![feature(core_intrinsics)] -#![feature(cstr_from_bytes_until_nul)] -#![feature(cstr_internals)] #![feature(duration_constants)] #![feature(error_generic_member_access)] #![feature(error_in_core)] @@ -292,7 +288,8 @@ #![feature(float_next_up_down)] #![feature(hasher_prefixfree_extras)] #![feature(hashmap_internals)] -#![feature(int_error_internals)] +#![feature(ip)] +#![feature(ip_in_core)] #![feature(is_some_and)] #![feature(maybe_uninit_slice)] #![feature(maybe_uninit_write_slice)] @@ -359,7 +356,6 @@ #![feature(const_ip)] #![feature(const_ipv4)] #![feature(const_ipv6)] -#![feature(const_socketaddr)] #![feature(thread_local_internals)] // #![default_lib_allocator] diff --git a/library/std/src/net/ip_addr.rs b/library/std/src/net/ip_addr.rs index 07f08c1b5..e167fbd1b 100644 --- a/library/std/src/net/ip_addr.rs +++ b/library/std/src/net/ip_addr.rs @@ -2,2101 +2,40 @@ #[cfg(all(test, not(target_os = "emscripten")))] mod tests; -use crate::cmp::Ordering; -use crate::fmt::{self, Write}; -use crate::mem::transmute; use crate::sys::net::netc as c; use crate::sys_common::{FromInner, IntoInner}; -use super::display_buffer::DisplayBuffer; - -/// An IP address, either IPv4 or IPv6. -/// -/// This enum can contain either an [`Ipv4Addr`] or an [`Ipv6Addr`], see their -/// respective documentation for more details. -/// -/// # Examples -/// -/// ``` -/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; -/// -/// let localhost_v4 = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); -/// let localhost_v6 = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)); -/// -/// assert_eq!("127.0.0.1".parse(), Ok(localhost_v4)); -/// assert_eq!("::1".parse(), Ok(localhost_v6)); -/// -/// assert_eq!(localhost_v4.is_ipv6(), false); -/// assert_eq!(localhost_v4.is_ipv4(), true); -/// ``` -#[cfg_attr(not(test), rustc_diagnostic_item = "IpAddr")] #[stable(feature = "ip_addr", since = "1.7.0")] -#[derive(Copy, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)] -pub enum IpAddr { - /// An IPv4 address. - #[stable(feature = "ip_addr", since = "1.7.0")] - V4(#[stable(feature = "ip_addr", since = "1.7.0")] Ipv4Addr), - /// An IPv6 address. - #[stable(feature = "ip_addr", since = "1.7.0")] - V6(#[stable(feature = "ip_addr", since = "1.7.0")] Ipv6Addr), -} +pub use core::net::IpAddr; -/// An IPv4 address. -/// -/// IPv4 addresses are defined as 32-bit integers in [IETF RFC 791]. -/// They are usually represented as four octets. -/// -/// See [`IpAddr`] for a type encompassing both IPv4 and IPv6 addresses. -/// -/// [IETF RFC 791]: https://tools.ietf.org/html/rfc791 -/// -/// # Textual representation -/// -/// `Ipv4Addr` provides a [`FromStr`] implementation. The four octets are in decimal -/// notation, divided by `.` (this is called "dot-decimal notation"). -/// Notably, octal numbers (which are indicated with a leading `0`) and hexadecimal numbers (which -/// are indicated with a leading `0x`) are not allowed per [IETF RFC 6943]. -/// -/// [IETF RFC 6943]: https://tools.ietf.org/html/rfc6943#section-3.1.1 -/// [`FromStr`]: crate::str::FromStr -/// -/// # Examples -/// -/// ``` -/// use std::net::Ipv4Addr; -/// -/// let localhost = Ipv4Addr::new(127, 0, 0, 1); -/// assert_eq!("127.0.0.1".parse(), Ok(localhost)); -/// assert_eq!(localhost.is_loopback(), true); -/// assert!("012.004.002.000".parse::<Ipv4Addr>().is_err()); // all octets are in octal -/// assert!("0000000.0.0.0".parse::<Ipv4Addr>().is_err()); // first octet is a zero in octal -/// assert!("0xcb.0x0.0x71.0x00".parse::<Ipv4Addr>().is_err()); // all octets are in hex -/// ``` -#[derive(Copy, Clone, PartialEq, Eq, Hash)] #[stable(feature = "rust1", since = "1.0.0")] -pub struct Ipv4Addr { - octets: [u8; 4], -} +pub use core::net::{Ipv4Addr, Ipv6Addr}; -/// An IPv6 address. -/// -/// IPv6 addresses are defined as 128-bit integers in [IETF RFC 4291]. -/// They are usually represented as eight 16-bit segments. -/// -/// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291 -/// -/// # Embedding IPv4 Addresses -/// -/// See [`IpAddr`] for a type encompassing both IPv4 and IPv6 addresses. -/// -/// To assist in the transition from IPv4 to IPv6 two types of IPv6 addresses that embed an IPv4 address were defined: -/// IPv4-compatible and IPv4-mapped addresses. Of these IPv4-compatible addresses have been officially deprecated. -/// -/// Both types of addresses are not assigned any special meaning by this implementation, -/// other than what the relevant standards prescribe. This means that an address like `::ffff:127.0.0.1`, -/// while representing an IPv4 loopback address, is not itself an IPv6 loopback address; only `::1` is. -/// To handle these so called "IPv4-in-IPv6" addresses, they have to first be converted to their canonical IPv4 address. -/// -/// ### IPv4-Compatible IPv6 Addresses -/// -/// IPv4-compatible IPv6 addresses are defined in [IETF RFC 4291 Section 2.5.5.1], and have been officially deprecated. -/// The RFC describes the format of an "IPv4-Compatible IPv6 address" as follows: -/// -/// ```text -/// | 80 bits | 16 | 32 bits | -/// +--------------------------------------+--------------------------+ -/// |0000..............................0000|0000| IPv4 address | -/// +--------------------------------------+----+---------------------+ -/// ``` -/// So `::a.b.c.d` would be an IPv4-compatible IPv6 address representing the IPv4 address `a.b.c.d`. -/// -/// To convert from an IPv4 address to an IPv4-compatible IPv6 address, use [`Ipv4Addr::to_ipv6_compatible`]. -/// Use [`Ipv6Addr::to_ipv4`] to convert an IPv4-compatible IPv6 address to the canonical IPv4 address. -/// -/// [IETF RFC 4291 Section 2.5.5.1]: https://datatracker.ietf.org/doc/html/rfc4291#section-2.5.5.1 -/// -/// ### IPv4-Mapped IPv6 Addresses -/// -/// IPv4-mapped IPv6 addresses are defined in [IETF RFC 4291 Section 2.5.5.2]. -/// The RFC describes the format of an "IPv4-Mapped IPv6 address" as follows: -/// -/// ```text -/// | 80 bits | 16 | 32 bits | -/// +--------------------------------------+--------------------------+ -/// |0000..............................0000|FFFF| IPv4 address | -/// +--------------------------------------+----+---------------------+ -/// ``` -/// So `::ffff:a.b.c.d` would be an IPv4-mapped IPv6 address representing the IPv4 address `a.b.c.d`. -/// -/// To convert from an IPv4 address to an IPv4-mapped IPv6 address, use [`Ipv4Addr::to_ipv6_mapped`]. -/// Use [`Ipv6Addr::to_ipv4`] to convert an IPv4-mapped IPv6 address to the canonical IPv4 address. -/// Note that this will also convert the IPv6 loopback address `::1` to `0.0.0.1`. Use -/// [`Ipv6Addr::to_ipv4_mapped`] to avoid this. -/// -/// [IETF RFC 4291 Section 2.5.5.2]: https://datatracker.ietf.org/doc/html/rfc4291#section-2.5.5.2 -/// -/// # Textual representation -/// -/// `Ipv6Addr` provides a [`FromStr`] implementation. There are many ways to represent -/// an IPv6 address in text, but in general, each segments is written in hexadecimal -/// notation, and segments are separated by `:`. For more information, see -/// [IETF RFC 5952]. -/// -/// [`FromStr`]: crate::str::FromStr -/// [IETF RFC 5952]: https://tools.ietf.org/html/rfc5952 -/// -/// # Examples -/// -/// ``` -/// use std::net::Ipv6Addr; -/// -/// let localhost = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); -/// assert_eq!("::1".parse(), Ok(localhost)); -/// assert_eq!(localhost.is_loopback(), true); -/// ``` -#[derive(Copy, Clone, PartialEq, Eq, Hash)] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Ipv6Addr { - octets: [u8; 16], -} - -/// Scope of an [IPv6 multicast address] as defined in [IETF RFC 7346 section 2]. -/// -/// # Stability Guarantees -/// -/// Not all possible values for a multicast scope have been assigned. -/// Future RFCs may introduce new scopes, which will be added as variants to this enum; -/// because of this the enum is marked as `#[non_exhaustive]`. -/// -/// # Examples -/// ``` -/// #![feature(ip)] -/// -/// use std::net::Ipv6Addr; -/// use std::net::Ipv6MulticastScope::*; -/// -/// // An IPv6 multicast address with global scope (`ff0e::`). -/// let address = Ipv6Addr::new(0xff0e, 0, 0, 0, 0, 0, 0, 0); -/// -/// // Will print "Global scope". -/// match address.multicast_scope() { -/// Some(InterfaceLocal) => println!("Interface-Local scope"), -/// Some(LinkLocal) => println!("Link-Local scope"), -/// Some(RealmLocal) => println!("Realm-Local scope"), -/// Some(AdminLocal) => println!("Admin-Local scope"), -/// Some(SiteLocal) => println!("Site-Local scope"), -/// Some(OrganizationLocal) => println!("Organization-Local scope"), -/// Some(Global) => println!("Global scope"), -/// Some(_) => println!("Unknown scope"), -/// None => println!("Not a multicast address!") -/// } -/// -/// ``` -/// -/// [IPv6 multicast address]: Ipv6Addr -/// [IETF RFC 7346 section 2]: https://tools.ietf.org/html/rfc7346#section-2 -#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug)] #[unstable(feature = "ip", issue = "27709")] -#[non_exhaustive] -pub enum Ipv6MulticastScope { - /// Interface-Local scope. - InterfaceLocal, - /// Link-Local scope. - LinkLocal, - /// Realm-Local scope. - RealmLocal, - /// Admin-Local scope. - AdminLocal, - /// Site-Local scope. - SiteLocal, - /// Organization-Local scope. - OrganizationLocal, - /// Global scope. - Global, -} - -impl IpAddr { - /// Returns [`true`] for the special 'unspecified' address. - /// - /// See the documentation for [`Ipv4Addr::is_unspecified()`] and - /// [`Ipv6Addr::is_unspecified()`] for more details. - /// - /// # Examples - /// - /// ``` - /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - /// - /// assert_eq!(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)).is_unspecified(), true); - /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)).is_unspecified(), true); - /// ``` - #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] - #[stable(feature = "ip_shared", since = "1.12.0")] - #[must_use] - #[inline] - pub const fn is_unspecified(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_unspecified(), - IpAddr::V6(ip) => ip.is_unspecified(), - } - } - - /// Returns [`true`] if this is a loopback address. - /// - /// See the documentation for [`Ipv4Addr::is_loopback()`] and - /// [`Ipv6Addr::is_loopback()`] for more details. - /// - /// # Examples - /// - /// ``` - /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - /// - /// assert_eq!(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).is_loopback(), true); - /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1)).is_loopback(), true); - /// ``` - #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] - #[stable(feature = "ip_shared", since = "1.12.0")] - #[must_use] - #[inline] - pub const fn is_loopback(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_loopback(), - IpAddr::V6(ip) => ip.is_loopback(), - } - } - - /// Returns [`true`] if the address appears to be globally routable. - /// - /// See the documentation for [`Ipv4Addr::is_global()`] and - /// [`Ipv6Addr::is_global()`] for more details. - /// - /// # Examples - /// - /// ``` - /// #![feature(ip)] - /// - /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - /// - /// assert_eq!(IpAddr::V4(Ipv4Addr::new(80, 9, 12, 3)).is_global(), true); - /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0x1c9, 0, 0, 0xafc8, 0, 0x1)).is_global(), true); - /// ``` - #[rustc_const_unstable(feature = "const_ip", issue = "76205")] - #[unstable(feature = "ip", issue = "27709")] - #[must_use] - #[inline] - pub const fn is_global(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_global(), - IpAddr::V6(ip) => ip.is_global(), - } - } - - /// Returns [`true`] if this is a multicast address. - /// - /// See the documentation for [`Ipv4Addr::is_multicast()`] and - /// [`Ipv6Addr::is_multicast()`] for more details. - /// - /// # Examples - /// - /// ``` - /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - /// - /// assert_eq!(IpAddr::V4(Ipv4Addr::new(224, 254, 0, 0)).is_multicast(), true); - /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0)).is_multicast(), true); - /// ``` - #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] - #[stable(feature = "ip_shared", since = "1.12.0")] - #[must_use] - #[inline] - pub const fn is_multicast(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_multicast(), - IpAddr::V6(ip) => ip.is_multicast(), - } - } - - /// Returns [`true`] if this address is in a range designated for documentation. - /// - /// See the documentation for [`Ipv4Addr::is_documentation()`] and - /// [`Ipv6Addr::is_documentation()`] for more details. - /// - /// # Examples - /// - /// ``` - /// #![feature(ip)] - /// - /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - /// - /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_documentation(), true); - /// assert_eq!( - /// IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_documentation(), - /// true - /// ); - /// ``` - #[rustc_const_unstable(feature = "const_ip", issue = "76205")] - #[unstable(feature = "ip", issue = "27709")] - #[must_use] - #[inline] - pub const fn is_documentation(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_documentation(), - IpAddr::V6(ip) => ip.is_documentation(), - } - } - - /// Returns [`true`] if this address is in a range designated for benchmarking. - /// - /// See the documentation for [`Ipv4Addr::is_benchmarking()`] and - /// [`Ipv6Addr::is_benchmarking()`] for more details. - /// - /// # Examples - /// - /// ``` - /// #![feature(ip)] - /// - /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - /// - /// assert_eq!(IpAddr::V4(Ipv4Addr::new(198, 19, 255, 255)).is_benchmarking(), true); - /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0x2, 0, 0, 0, 0, 0, 0)).is_benchmarking(), true); - /// ``` - #[unstable(feature = "ip", issue = "27709")] - #[must_use] - #[inline] - pub const fn is_benchmarking(&self) -> bool { - match self { - IpAddr::V4(ip) => ip.is_benchmarking(), - IpAddr::V6(ip) => ip.is_benchmarking(), - } - } - - /// Returns [`true`] if this address is an [`IPv4` address], and [`false`] - /// otherwise. - /// - /// [`IPv4` address]: IpAddr::V4 - /// - /// # Examples - /// - /// ``` - /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - /// - /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_ipv4(), true); - /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_ipv4(), false); - /// ``` - #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] - #[stable(feature = "ipaddr_checker", since = "1.16.0")] - #[must_use] - #[inline] - pub const fn is_ipv4(&self) -> bool { - matches!(self, IpAddr::V4(_)) - } - - /// Returns [`true`] if this address is an [`IPv6` address], and [`false`] - /// otherwise. - /// - /// [`IPv6` address]: IpAddr::V6 - /// - /// # Examples - /// - /// ``` - /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - /// - /// assert_eq!(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 6)).is_ipv6(), false); - /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0)).is_ipv6(), true); - /// ``` - #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] - #[stable(feature = "ipaddr_checker", since = "1.16.0")] - #[must_use] - #[inline] - pub const fn is_ipv6(&self) -> bool { - matches!(self, IpAddr::V6(_)) - } - - /// Converts this address to an `IpAddr::V4` if it is an IPv4-mapped IPv6 addresses, otherwise it - /// return `self` as-is. - /// - /// # Examples - /// - /// ``` - /// #![feature(ip)] - /// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - /// - /// assert_eq!(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).to_canonical().is_loopback(), true); - /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1)).is_loopback(), false); - /// assert_eq!(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1)).to_canonical().is_loopback(), true); - /// ``` - #[inline] - #[must_use = "this returns the result of the operation, \ - without modifying the original"] - #[rustc_const_unstable(feature = "const_ip", issue = "76205")] - #[unstable(feature = "ip", issue = "27709")] - pub const fn to_canonical(&self) -> IpAddr { - match self { - &v4 @ IpAddr::V4(_) => v4, - IpAddr::V6(v6) => v6.to_canonical(), - } - } -} - -impl Ipv4Addr { - /// Creates a new IPv4 address from four eight-bit octets. - /// - /// The result will represent the IP address `a`.`b`.`c`.`d`. - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv4Addr; - /// - /// let addr = Ipv4Addr::new(127, 0, 0, 1); - /// ``` - #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")] - #[stable(feature = "rust1", since = "1.0.0")] - #[must_use] - #[inline] - pub const fn new(a: u8, b: u8, c: u8, d: u8) -> Ipv4Addr { - Ipv4Addr { octets: [a, b, c, d] } - } - - /// An IPv4 address with the address pointing to localhost: `127.0.0.1` - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv4Addr; - /// - /// let addr = Ipv4Addr::LOCALHOST; - /// assert_eq!(addr, Ipv4Addr::new(127, 0, 0, 1)); - /// ``` - #[stable(feature = "ip_constructors", since = "1.30.0")] - pub const LOCALHOST: Self = Ipv4Addr::new(127, 0, 0, 1); - - /// An IPv4 address representing an unspecified address: `0.0.0.0` - /// - /// This corresponds to the constant `INADDR_ANY` in other languages. - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv4Addr; - /// - /// let addr = Ipv4Addr::UNSPECIFIED; - /// assert_eq!(addr, Ipv4Addr::new(0, 0, 0, 0)); - /// ``` - #[doc(alias = "INADDR_ANY")] - #[stable(feature = "ip_constructors", since = "1.30.0")] - pub const UNSPECIFIED: Self = Ipv4Addr::new(0, 0, 0, 0); - - /// An IPv4 address representing the broadcast address: `255.255.255.255` - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv4Addr; - /// - /// let addr = Ipv4Addr::BROADCAST; - /// assert_eq!(addr, Ipv4Addr::new(255, 255, 255, 255)); - /// ``` - #[stable(feature = "ip_constructors", since = "1.30.0")] - pub const BROADCAST: Self = Ipv4Addr::new(255, 255, 255, 255); - - /// Returns the four eight-bit integers that make up this address. - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv4Addr; - /// - /// let addr = Ipv4Addr::new(127, 0, 0, 1); - /// assert_eq!(addr.octets(), [127, 0, 0, 1]); - /// ``` - #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] - #[stable(feature = "rust1", since = "1.0.0")] - #[must_use] - #[inline] - pub const fn octets(&self) -> [u8; 4] { - self.octets - } - - /// Returns [`true`] for the special 'unspecified' address (`0.0.0.0`). - /// - /// This property is defined in _UNIX Network Programming, Second Edition_, - /// W. Richard Stevens, p. 891; see also [ip7]. - /// - /// [ip7]: https://man7.org/linux/man-pages/man7/ip.7.html - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv4Addr; - /// - /// assert_eq!(Ipv4Addr::new(0, 0, 0, 0).is_unspecified(), true); - /// assert_eq!(Ipv4Addr::new(45, 22, 13, 197).is_unspecified(), false); - /// ``` - #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")] - #[stable(feature = "ip_shared", since = "1.12.0")] - #[must_use] - #[inline] - pub const fn is_unspecified(&self) -> bool { - u32::from_be_bytes(self.octets) == 0 - } - - /// Returns [`true`] if this is a loopback address (`127.0.0.0/8`). - /// - /// This property is defined by [IETF RFC 1122]. - /// - /// [IETF RFC 1122]: https://tools.ietf.org/html/rfc1122 - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv4Addr; - /// - /// assert_eq!(Ipv4Addr::new(127, 0, 0, 1).is_loopback(), true); - /// assert_eq!(Ipv4Addr::new(45, 22, 13, 197).is_loopback(), false); - /// ``` - #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] - #[stable(since = "1.7.0", feature = "ip_17")] - #[must_use] - #[inline] - pub const fn is_loopback(&self) -> bool { - self.octets()[0] == 127 - } - - /// Returns [`true`] if this is a private address. - /// - /// The private address ranges are defined in [IETF RFC 1918] and include: - /// - /// - `10.0.0.0/8` - /// - `172.16.0.0/12` - /// - `192.168.0.0/16` - /// - /// [IETF RFC 1918]: https://tools.ietf.org/html/rfc1918 - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv4Addr; - /// - /// assert_eq!(Ipv4Addr::new(10, 0, 0, 1).is_private(), true); - /// assert_eq!(Ipv4Addr::new(10, 10, 10, 10).is_private(), true); - /// assert_eq!(Ipv4Addr::new(172, 16, 10, 10).is_private(), true); - /// assert_eq!(Ipv4Addr::new(172, 29, 45, 14).is_private(), true); - /// assert_eq!(Ipv4Addr::new(172, 32, 0, 2).is_private(), false); - /// assert_eq!(Ipv4Addr::new(192, 168, 0, 2).is_private(), true); - /// assert_eq!(Ipv4Addr::new(192, 169, 0, 2).is_private(), false); - /// ``` - #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] - #[stable(since = "1.7.0", feature = "ip_17")] - #[must_use] - #[inline] - pub const fn is_private(&self) -> bool { - match self.octets() { - [10, ..] => true, - [172, b, ..] if b >= 16 && b <= 31 => true, - [192, 168, ..] => true, - _ => false, - } - } - - /// Returns [`true`] if the address is link-local (`169.254.0.0/16`). - /// - /// This property is defined by [IETF RFC 3927]. - /// - /// [IETF RFC 3927]: https://tools.ietf.org/html/rfc3927 - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv4Addr; - /// - /// assert_eq!(Ipv4Addr::new(169, 254, 0, 0).is_link_local(), true); - /// assert_eq!(Ipv4Addr::new(169, 254, 10, 65).is_link_local(), true); - /// assert_eq!(Ipv4Addr::new(16, 89, 10, 65).is_link_local(), false); - /// ``` - #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] - #[stable(since = "1.7.0", feature = "ip_17")] - #[must_use] - #[inline] - pub const fn is_link_local(&self) -> bool { - matches!(self.octets(), [169, 254, ..]) - } - - /// Returns [`true`] if the address appears to be globally reachable - /// as specified by the [IANA IPv4 Special-Purpose Address Registry]. - /// Whether or not an address is practically reachable will depend on your network configuration. - /// - /// Most IPv4 addresses are globally reachable; - /// unless they are specifically defined as *not* globally reachable. - /// - /// Non-exhaustive list of notable addresses that are not globally reachable: - /// - /// - The [unspecified address] ([`is_unspecified`](Ipv4Addr::is_unspecified)) - /// - Addresses reserved for private use ([`is_private`](Ipv4Addr::is_private)) - /// - Addresses in the shared address space ([`is_shared`](Ipv4Addr::is_shared)) - /// - Loopback addresses ([`is_loopback`](Ipv4Addr::is_loopback)) - /// - Link-local addresses ([`is_link_local`](Ipv4Addr::is_link_local)) - /// - Addresses reserved for documentation ([`is_documentation`](Ipv4Addr::is_documentation)) - /// - Addresses reserved for benchmarking ([`is_benchmarking`](Ipv4Addr::is_benchmarking)) - /// - Reserved addresses ([`is_reserved`](Ipv4Addr::is_reserved)) - /// - The [broadcast address] ([`is_broadcast`](Ipv4Addr::is_broadcast)) - /// - /// For the complete overview of which addresses are globally reachable, see the table at the [IANA IPv4 Special-Purpose Address Registry]. - /// - /// [IANA IPv4 Special-Purpose Address Registry]: https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml - /// [unspecified address]: Ipv4Addr::UNSPECIFIED - /// [broadcast address]: Ipv4Addr::BROADCAST - - /// - /// # Examples - /// - /// ``` - /// #![feature(ip)] - /// - /// use std::net::Ipv4Addr; - /// - /// // Most IPv4 addresses are globally reachable: - /// assert_eq!(Ipv4Addr::new(80, 9, 12, 3).is_global(), true); - /// - /// // However some addresses have been assigned a special meaning - /// // that makes them not globally reachable. Some examples are: - /// - /// // The unspecified address (`0.0.0.0`) - /// assert_eq!(Ipv4Addr::UNSPECIFIED.is_global(), false); - /// - /// // Addresses reserved for private use (`10.0.0.0/8`, `172.16.0.0/12`, 192.168.0.0/16) - /// assert_eq!(Ipv4Addr::new(10, 254, 0, 0).is_global(), false); - /// assert_eq!(Ipv4Addr::new(192, 168, 10, 65).is_global(), false); - /// assert_eq!(Ipv4Addr::new(172, 16, 10, 65).is_global(), false); - /// - /// // Addresses in the shared address space (`100.64.0.0/10`) - /// assert_eq!(Ipv4Addr::new(100, 100, 0, 0).is_global(), false); - /// - /// // The loopback addresses (`127.0.0.0/8`) - /// assert_eq!(Ipv4Addr::LOCALHOST.is_global(), false); - /// - /// // Link-local addresses (`169.254.0.0/16`) - /// assert_eq!(Ipv4Addr::new(169, 254, 45, 1).is_global(), false); - /// - /// // Addresses reserved for documentation (`192.0.2.0/24`, `198.51.100.0/24`, `203.0.113.0/24`) - /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).is_global(), false); - /// assert_eq!(Ipv4Addr::new(198, 51, 100, 65).is_global(), false); - /// assert_eq!(Ipv4Addr::new(203, 0, 113, 6).is_global(), false); - /// - /// // Addresses reserved for benchmarking (`198.18.0.0/15`) - /// assert_eq!(Ipv4Addr::new(198, 18, 0, 0).is_global(), false); - /// - /// // Reserved addresses (`240.0.0.0/4`) - /// assert_eq!(Ipv4Addr::new(250, 10, 20, 30).is_global(), false); - /// - /// // The broadcast address (`255.255.255.255`) - /// assert_eq!(Ipv4Addr::BROADCAST.is_global(), false); - /// - /// // For a complete overview see the IANA IPv4 Special-Purpose Address Registry. - /// ``` - #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")] - #[unstable(feature = "ip", issue = "27709")] - #[must_use] - #[inline] - pub const fn is_global(&self) -> bool { - !(self.octets()[0] == 0 // "This network" - || self.is_private() - || self.is_shared() - || self.is_loopback() - || self.is_link_local() - // addresses reserved for future protocols (`192.0.0.0/24`) - ||(self.octets()[0] == 192 && self.octets()[1] == 0 && self.octets()[2] == 0) - || self.is_documentation() - || self.is_benchmarking() - || self.is_reserved() - || self.is_broadcast()) - } - - /// Returns [`true`] if this address is part of the Shared Address Space defined in - /// [IETF RFC 6598] (`100.64.0.0/10`). - /// - /// [IETF RFC 6598]: https://tools.ietf.org/html/rfc6598 - /// - /// # Examples - /// - /// ``` - /// #![feature(ip)] - /// use std::net::Ipv4Addr; - /// - /// assert_eq!(Ipv4Addr::new(100, 64, 0, 0).is_shared(), true); - /// assert_eq!(Ipv4Addr::new(100, 127, 255, 255).is_shared(), true); - /// assert_eq!(Ipv4Addr::new(100, 128, 0, 0).is_shared(), false); - /// ``` - #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")] - #[unstable(feature = "ip", issue = "27709")] - #[must_use] - #[inline] - pub const fn is_shared(&self) -> bool { - self.octets()[0] == 100 && (self.octets()[1] & 0b1100_0000 == 0b0100_0000) - } - - /// Returns [`true`] if this address part of the `198.18.0.0/15` range, which is reserved for - /// network devices benchmarking. This range is defined in [IETF RFC 2544] as `192.18.0.0` - /// through `198.19.255.255` but [errata 423] corrects it to `198.18.0.0/15`. - /// - /// [IETF RFC 2544]: https://tools.ietf.org/html/rfc2544 - /// [errata 423]: https://www.rfc-editor.org/errata/eid423 - /// - /// # Examples - /// - /// ``` - /// #![feature(ip)] - /// use std::net::Ipv4Addr; - /// - /// assert_eq!(Ipv4Addr::new(198, 17, 255, 255).is_benchmarking(), false); - /// assert_eq!(Ipv4Addr::new(198, 18, 0, 0).is_benchmarking(), true); - /// assert_eq!(Ipv4Addr::new(198, 19, 255, 255).is_benchmarking(), true); - /// assert_eq!(Ipv4Addr::new(198, 20, 0, 0).is_benchmarking(), false); - /// ``` - #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")] - #[unstable(feature = "ip", issue = "27709")] - #[must_use] - #[inline] - pub const fn is_benchmarking(&self) -> bool { - self.octets()[0] == 198 && (self.octets()[1] & 0xfe) == 18 - } - - /// Returns [`true`] if this address is reserved by IANA for future use. [IETF RFC 1112] - /// defines the block of reserved addresses as `240.0.0.0/4`. This range normally includes the - /// broadcast address `255.255.255.255`, but this implementation explicitly excludes it, since - /// it is obviously not reserved for future use. - /// - /// [IETF RFC 1112]: https://tools.ietf.org/html/rfc1112 - /// - /// # Warning - /// - /// As IANA assigns new addresses, this method will be - /// updated. This may result in non-reserved addresses being - /// treated as reserved in code that relies on an outdated version - /// of this method. - /// - /// # Examples - /// - /// ``` - /// #![feature(ip)] - /// use std::net::Ipv4Addr; - /// - /// assert_eq!(Ipv4Addr::new(240, 0, 0, 0).is_reserved(), true); - /// assert_eq!(Ipv4Addr::new(255, 255, 255, 254).is_reserved(), true); - /// - /// assert_eq!(Ipv4Addr::new(239, 255, 255, 255).is_reserved(), false); - /// // The broadcast address is not considered as reserved for future use by this implementation - /// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_reserved(), false); - /// ``` - #[rustc_const_unstable(feature = "const_ipv4", issue = "76205")] - #[unstable(feature = "ip", issue = "27709")] - #[must_use] - #[inline] - pub const fn is_reserved(&self) -> bool { - self.octets()[0] & 240 == 240 && !self.is_broadcast() - } - - /// Returns [`true`] if this is a multicast address (`224.0.0.0/4`). - /// - /// Multicast addresses have a most significant octet between `224` and `239`, - /// and is defined by [IETF RFC 5771]. - /// - /// [IETF RFC 5771]: https://tools.ietf.org/html/rfc5771 - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv4Addr; - /// - /// assert_eq!(Ipv4Addr::new(224, 254, 0, 0).is_multicast(), true); - /// assert_eq!(Ipv4Addr::new(236, 168, 10, 65).is_multicast(), true); - /// assert_eq!(Ipv4Addr::new(172, 16, 10, 65).is_multicast(), false); - /// ``` - #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] - #[stable(since = "1.7.0", feature = "ip_17")] - #[must_use] - #[inline] - pub const fn is_multicast(&self) -> bool { - self.octets()[0] >= 224 && self.octets()[0] <= 239 - } - - /// Returns [`true`] if this is a broadcast address (`255.255.255.255`). - /// - /// A broadcast address has all octets set to `255` as defined in [IETF RFC 919]. - /// - /// [IETF RFC 919]: https://tools.ietf.org/html/rfc919 - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv4Addr; - /// - /// assert_eq!(Ipv4Addr::new(255, 255, 255, 255).is_broadcast(), true); - /// assert_eq!(Ipv4Addr::new(236, 168, 10, 65).is_broadcast(), false); - /// ``` - #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] - #[stable(since = "1.7.0", feature = "ip_17")] - #[must_use] - #[inline] - pub const fn is_broadcast(&self) -> bool { - u32::from_be_bytes(self.octets()) == u32::from_be_bytes(Self::BROADCAST.octets()) - } - - /// Returns [`true`] if this address is in a range designated for documentation. - /// - /// This is defined in [IETF RFC 5737]: - /// - /// - `192.0.2.0/24` (TEST-NET-1) - /// - `198.51.100.0/24` (TEST-NET-2) - /// - `203.0.113.0/24` (TEST-NET-3) - /// - /// [IETF RFC 5737]: https://tools.ietf.org/html/rfc5737 - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv4Addr; - /// - /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).is_documentation(), true); - /// assert_eq!(Ipv4Addr::new(198, 51, 100, 65).is_documentation(), true); - /// assert_eq!(Ipv4Addr::new(203, 0, 113, 6).is_documentation(), true); - /// assert_eq!(Ipv4Addr::new(193, 34, 17, 19).is_documentation(), false); - /// ``` - #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] - #[stable(since = "1.7.0", feature = "ip_17")] - #[must_use] - #[inline] - pub const fn is_documentation(&self) -> bool { - matches!(self.octets(), [192, 0, 2, _] | [198, 51, 100, _] | [203, 0, 113, _]) - } - - /// Converts this address to an [IPv4-compatible] [`IPv6` address]. - /// - /// `a.b.c.d` becomes `::a.b.c.d` - /// - /// Note that IPv4-compatible addresses have been officially deprecated. - /// If you don't explicitly need an IPv4-compatible address for legacy reasons, consider using `to_ipv6_mapped` instead. - /// - /// [IPv4-compatible]: Ipv6Addr#ipv4-compatible-ipv6-addresses - /// [`IPv6` address]: Ipv6Addr - /// - /// # Examples - /// - /// ``` - /// use std::net::{Ipv4Addr, Ipv6Addr}; - /// - /// assert_eq!( - /// Ipv4Addr::new(192, 0, 2, 255).to_ipv6_compatible(), - /// Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc000, 0x2ff) - /// ); - /// ``` - #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] - #[stable(feature = "rust1", since = "1.0.0")] - #[must_use = "this returns the result of the operation, \ - without modifying the original"] - #[inline] - pub const fn to_ipv6_compatible(&self) -> Ipv6Addr { - let [a, b, c, d] = self.octets(); - Ipv6Addr { octets: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, a, b, c, d] } - } - - /// Converts this address to an [IPv4-mapped] [`IPv6` address]. - /// - /// `a.b.c.d` becomes `::ffff:a.b.c.d` - /// - /// [IPv4-mapped]: Ipv6Addr#ipv4-mapped-ipv6-addresses - /// [`IPv6` address]: Ipv6Addr - /// - /// # Examples - /// - /// ``` - /// use std::net::{Ipv4Addr, Ipv6Addr}; - /// - /// assert_eq!(Ipv4Addr::new(192, 0, 2, 255).to_ipv6_mapped(), - /// Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x2ff)); - /// ``` - #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] - #[stable(feature = "rust1", since = "1.0.0")] - #[must_use = "this returns the result of the operation, \ - without modifying the original"] - #[inline] - pub const fn to_ipv6_mapped(&self) -> Ipv6Addr { - let [a, b, c, d] = self.octets(); - Ipv6Addr { octets: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, a, b, c, d] } - } -} - -#[stable(feature = "ip_addr", since = "1.7.0")] -impl fmt::Display for IpAddr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - IpAddr::V4(ip) => ip.fmt(fmt), - IpAddr::V6(ip) => ip.fmt(fmt), - } - } -} - -#[stable(feature = "ip_addr", since = "1.7.0")] -impl fmt::Debug for IpAddr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, fmt) - } -} - -#[stable(feature = "ip_from_ip", since = "1.16.0")] -impl From<Ipv4Addr> for IpAddr { - /// Copies this address to a new `IpAddr::V4`. - /// - /// # Examples - /// - /// ``` - /// use std::net::{IpAddr, Ipv4Addr}; - /// - /// let addr = Ipv4Addr::new(127, 0, 0, 1); - /// - /// assert_eq!( - /// IpAddr::V4(addr), - /// IpAddr::from(addr) - /// ) - /// ``` - #[inline] - fn from(ipv4: Ipv4Addr) -> IpAddr { - IpAddr::V4(ipv4) - } -} - -#[stable(feature = "ip_from_ip", since = "1.16.0")] -impl From<Ipv6Addr> for IpAddr { - /// Copies this address to a new `IpAddr::V6`. - /// - /// # Examples - /// - /// ``` - /// use std::net::{IpAddr, Ipv6Addr}; - /// - /// let addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff); - /// - /// assert_eq!( - /// IpAddr::V6(addr), - /// IpAddr::from(addr) - /// ); - /// ``` - #[inline] - fn from(ipv6: Ipv6Addr) -> IpAddr { - IpAddr::V6(ipv6) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Display for Ipv4Addr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - let octets = self.octets(); - - // If there are no alignment requirements, write the IP address directly to `f`. - // Otherwise, write it to a local buffer and then use `f.pad`. - if fmt.precision().is_none() && fmt.width().is_none() { - write!(fmt, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]) - } else { - const LONGEST_IPV4_ADDR: &str = "255.255.255.255"; - - let mut buf = DisplayBuffer::<{ LONGEST_IPV4_ADDR.len() }>::new(); - // Buffer is long enough for the longest possible IPv4 address, so this should never fail. - write!(buf, "{}.{}.{}.{}", octets[0], octets[1], octets[2], octets[3]).unwrap(); - - fmt.pad(buf.as_str()) - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Debug for Ipv4Addr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, fmt) - } -} - -#[stable(feature = "ip_cmp", since = "1.16.0")] -impl PartialEq<Ipv4Addr> for IpAddr { - #[inline] - fn eq(&self, other: &Ipv4Addr) -> bool { - match self { - IpAddr::V4(v4) => v4 == other, - IpAddr::V6(_) => false, - } - } -} - -#[stable(feature = "ip_cmp", since = "1.16.0")] -impl PartialEq<IpAddr> for Ipv4Addr { - #[inline] - fn eq(&self, other: &IpAddr) -> bool { - match other { - IpAddr::V4(v4) => self == v4, - IpAddr::V6(_) => false, - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl PartialOrd for Ipv4Addr { - #[inline] - fn partial_cmp(&self, other: &Ipv4Addr) -> Option<Ordering> { - Some(self.cmp(other)) - } -} - -#[stable(feature = "ip_cmp", since = "1.16.0")] -impl PartialOrd<Ipv4Addr> for IpAddr { - #[inline] - fn partial_cmp(&self, other: &Ipv4Addr) -> Option<Ordering> { - match self { - IpAddr::V4(v4) => v4.partial_cmp(other), - IpAddr::V6(_) => Some(Ordering::Greater), - } - } -} - -#[stable(feature = "ip_cmp", since = "1.16.0")] -impl PartialOrd<IpAddr> for Ipv4Addr { - #[inline] - fn partial_cmp(&self, other: &IpAddr) -> Option<Ordering> { - match other { - IpAddr::V4(v4) => self.partial_cmp(v4), - IpAddr::V6(_) => Some(Ordering::Less), - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Ord for Ipv4Addr { - #[inline] - fn cmp(&self, other: &Ipv4Addr) -> Ordering { - self.octets.cmp(&other.octets) - } -} +pub use core::net::Ipv6MulticastScope; impl IntoInner<c::in_addr> for Ipv4Addr { #[inline] fn into_inner(self) -> c::in_addr { // `s_addr` is stored as BE on all machines and the array is in BE order. // So the native endian conversion method is used so that it's never swapped. - c::in_addr { s_addr: u32::from_ne_bytes(self.octets) } + c::in_addr { s_addr: u32::from_ne_bytes(self.octets()) } } } impl FromInner<c::in_addr> for Ipv4Addr { fn from_inner(addr: c::in_addr) -> Ipv4Addr { - Ipv4Addr { octets: addr.s_addr.to_ne_bytes() } - } -} - -#[stable(feature = "ip_u32", since = "1.1.0")] -impl From<Ipv4Addr> for u32 { - /// Converts an `Ipv4Addr` into a host byte order `u32`. - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv4Addr; - /// - /// let addr = Ipv4Addr::new(0x12, 0x34, 0x56, 0x78); - /// assert_eq!(0x12345678, u32::from(addr)); - /// ``` - #[inline] - fn from(ip: Ipv4Addr) -> u32 { - u32::from_be_bytes(ip.octets) - } -} - -#[stable(feature = "ip_u32", since = "1.1.0")] -impl From<u32> for Ipv4Addr { - /// Converts a host byte order `u32` into an `Ipv4Addr`. - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv4Addr; - /// - /// let addr = Ipv4Addr::from(0x12345678); - /// assert_eq!(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78), addr); - /// ``` - #[inline] - fn from(ip: u32) -> Ipv4Addr { - Ipv4Addr { octets: ip.to_be_bytes() } - } -} - -#[stable(feature = "from_slice_v4", since = "1.9.0")] -impl From<[u8; 4]> for Ipv4Addr { - /// Creates an `Ipv4Addr` from a four element byte array. - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv4Addr; - /// - /// let addr = Ipv4Addr::from([13u8, 12u8, 11u8, 10u8]); - /// assert_eq!(Ipv4Addr::new(13, 12, 11, 10), addr); - /// ``` - #[inline] - fn from(octets: [u8; 4]) -> Ipv4Addr { - Ipv4Addr { octets } - } -} - -#[stable(feature = "ip_from_slice", since = "1.17.0")] -impl From<[u8; 4]> for IpAddr { - /// Creates an `IpAddr::V4` from a four element byte array. - /// - /// # Examples - /// - /// ``` - /// use std::net::{IpAddr, Ipv4Addr}; - /// - /// let addr = IpAddr::from([13u8, 12u8, 11u8, 10u8]); - /// assert_eq!(IpAddr::V4(Ipv4Addr::new(13, 12, 11, 10)), addr); - /// ``` - #[inline] - fn from(octets: [u8; 4]) -> IpAddr { - IpAddr::V4(Ipv4Addr::from(octets)) - } -} - -impl Ipv6Addr { - /// Creates a new IPv6 address from eight 16-bit segments. - /// - /// The result will represent the IP address `a:b:c:d:e:f:g:h`. - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv6Addr; - /// - /// let addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff); - /// ``` - #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")] - #[stable(feature = "rust1", since = "1.0.0")] - #[must_use] - #[inline] - pub const fn new(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, h: u16) -> Ipv6Addr { - let addr16 = [ - a.to_be(), - b.to_be(), - c.to_be(), - d.to_be(), - e.to_be(), - f.to_be(), - g.to_be(), - h.to_be(), - ]; - Ipv6Addr { - // All elements in `addr16` are big endian. - // SAFETY: `[u16; 8]` is always safe to transmute to `[u8; 16]`. - octets: unsafe { transmute::<_, [u8; 16]>(addr16) }, - } - } - - /// An IPv6 address representing localhost: `::1`. - /// - /// This corresponds to constant `IN6ADDR_LOOPBACK_INIT` or `in6addr_loopback` in other - /// languages. - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv6Addr; - /// - /// let addr = Ipv6Addr::LOCALHOST; - /// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)); - /// ``` - #[doc(alias = "IN6ADDR_LOOPBACK_INIT")] - #[doc(alias = "in6addr_loopback")] - #[stable(feature = "ip_constructors", since = "1.30.0")] - pub const LOCALHOST: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); - - /// An IPv6 address representing the unspecified address: `::` - /// - /// This corresponds to constant `IN6ADDR_ANY_INIT` or `in6addr_any` in other languages. - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv6Addr; - /// - /// let addr = Ipv6Addr::UNSPECIFIED; - /// assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)); - /// ``` - #[doc(alias = "IN6ADDR_ANY_INIT")] - #[doc(alias = "in6addr_any")] - #[stable(feature = "ip_constructors", since = "1.30.0")] - pub const UNSPECIFIED: Self = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0); - - /// Returns the eight 16-bit segments that make up this address. - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv6Addr; - /// - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).segments(), - /// [0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff]); - /// ``` - #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] - #[stable(feature = "rust1", since = "1.0.0")] - #[must_use] - #[inline] - pub const fn segments(&self) -> [u16; 8] { - // All elements in `self.octets` must be big endian. - // SAFETY: `[u8; 16]` is always safe to transmute to `[u16; 8]`. - let [a, b, c, d, e, f, g, h] = unsafe { transmute::<_, [u16; 8]>(self.octets) }; - // We want native endian u16 - [ - u16::from_be(a), - u16::from_be(b), - u16::from_be(c), - u16::from_be(d), - u16::from_be(e), - u16::from_be(f), - u16::from_be(g), - u16::from_be(h), - ] - } - - /// Returns [`true`] for the special 'unspecified' address (`::`). - /// - /// This property is defined in [IETF RFC 4291]. - /// - /// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291 - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv6Addr; - /// - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unspecified(), false); - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).is_unspecified(), true); - /// ``` - #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] - #[stable(since = "1.7.0", feature = "ip_17")] - #[must_use] - #[inline] - pub const fn is_unspecified(&self) -> bool { - u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::UNSPECIFIED.octets()) - } - - /// Returns [`true`] if this is the [loopback address] (`::1`), - /// as defined in [IETF RFC 4291 section 2.5.3]. - /// - /// Contrary to IPv4, in IPv6 there is only one loopback address. - /// - /// [loopback address]: Ipv6Addr::LOCALHOST - /// [IETF RFC 4291 section 2.5.3]: https://tools.ietf.org/html/rfc4291#section-2.5.3 - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv6Addr; - /// - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_loopback(), false); - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0x1).is_loopback(), true); - /// ``` - #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] - #[stable(since = "1.7.0", feature = "ip_17")] - #[must_use] - #[inline] - pub const fn is_loopback(&self) -> bool { - u128::from_be_bytes(self.octets()) == u128::from_be_bytes(Ipv6Addr::LOCALHOST.octets()) - } - - /// Returns [`true`] if the address appears to be globally reachable - /// as specified by the [IANA IPv6 Special-Purpose Address Registry]. - /// Whether or not an address is practically reachable will depend on your network configuration. - /// - /// Most IPv6 addresses are globally reachable; - /// unless they are specifically defined as *not* globally reachable. - /// - /// Non-exhaustive list of notable addresses that are not globally reachable: - /// - The [unspecified address] ([`is_unspecified`](Ipv6Addr::is_unspecified)) - /// - The [loopback address] ([`is_loopback`](Ipv6Addr::is_loopback)) - /// - IPv4-mapped addresses - /// - Addresses reserved for benchmarking - /// - Addresses reserved for documentation ([`is_documentation`](Ipv6Addr::is_documentation)) - /// - Unique local addresses ([`is_unique_local`](Ipv6Addr::is_unique_local)) - /// - Unicast addresses with link-local scope ([`is_unicast_link_local`](Ipv6Addr::is_unicast_link_local)) - /// - /// For the complete overview of which addresses are globally reachable, see the table at the [IANA IPv6 Special-Purpose Address Registry]. - /// - /// Note that an address having global scope is not the same as being globally reachable, - /// and there is no direct relation between the two concepts: There exist addresses with global scope - /// that are not globally reachable (for example unique local addresses), - /// and addresses that are globally reachable without having global scope - /// (multicast addresses with non-global scope). - /// - /// [IANA IPv6 Special-Purpose Address Registry]: https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml - /// [unspecified address]: Ipv6Addr::UNSPECIFIED - /// [loopback address]: Ipv6Addr::LOCALHOST - /// - /// # Examples - /// - /// ``` - /// #![feature(ip)] - /// - /// use std::net::Ipv6Addr; - /// - /// // Most IPv6 addresses are globally reachable: - /// assert_eq!(Ipv6Addr::new(0x26, 0, 0x1c9, 0, 0, 0xafc8, 0x10, 0x1).is_global(), true); - /// - /// // However some addresses have been assigned a special meaning - /// // that makes them not globally reachable. Some examples are: - /// - /// // The unspecified address (`::`) - /// assert_eq!(Ipv6Addr::UNSPECIFIED.is_global(), false); - /// - /// // The loopback address (`::1`) - /// assert_eq!(Ipv6Addr::LOCALHOST.is_global(), false); - /// - /// // IPv4-mapped addresses (`::ffff:0:0/96`) - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_global(), false); - /// - /// // Addresses reserved for benchmarking (`2001:2::/48`) - /// assert_eq!(Ipv6Addr::new(0x2001, 2, 0, 0, 0, 0, 0, 1,).is_global(), false); - /// - /// // Addresses reserved for documentation (`2001:db8::/32`) - /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1).is_global(), false); - /// - /// // Unique local addresses (`fc00::/7`) - /// assert_eq!(Ipv6Addr::new(0xfc02, 0, 0, 0, 0, 0, 0, 1).is_global(), false); - /// - /// // Unicast addresses with link-local scope (`fe80::/10`) - /// assert_eq!(Ipv6Addr::new(0xfe81, 0, 0, 0, 0, 0, 0, 1).is_global(), false); - /// - /// // For a complete overview see the IANA IPv6 Special-Purpose Address Registry. - /// ``` - #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] - #[unstable(feature = "ip", issue = "27709")] - #[must_use] - #[inline] - pub const fn is_global(&self) -> bool { - !(self.is_unspecified() - || self.is_loopback() - // IPv4-mapped Address (`::ffff:0:0/96`) - || matches!(self.segments(), [0, 0, 0, 0, 0, 0xffff, _, _]) - // IPv4-IPv6 Translat. (`64:ff9b:1::/48`) - || matches!(self.segments(), [0x64, 0xff9b, 1, _, _, _, _, _]) - // Discard-Only Address Block (`100::/64`) - || matches!(self.segments(), [0x100, 0, 0, 0, _, _, _, _]) - // IETF Protocol Assignments (`2001::/23`) - || (matches!(self.segments(), [0x2001, b, _, _, _, _, _, _] if b < 0x200) - && !( - // Port Control Protocol Anycast (`2001:1::1`) - u128::from_be_bytes(self.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0001 - // Traversal Using Relays around NAT Anycast (`2001:1::2`) - || u128::from_be_bytes(self.octets()) == 0x2001_0001_0000_0000_0000_0000_0000_0002 - // AMT (`2001:3::/32`) - || matches!(self.segments(), [0x2001, 3, _, _, _, _, _, _]) - // AS112-v6 (`2001:4:112::/48`) - || matches!(self.segments(), [0x2001, 4, 0x112, _, _, _, _, _]) - // ORCHIDv2 (`2001:20::/28`) - || matches!(self.segments(), [0x2001, b, _, _, _, _, _, _] if b >= 0x20 && b <= 0x2F) - )) - || self.is_documentation() - || self.is_unique_local() - || self.is_unicast_link_local()) - } - - /// Returns [`true`] if this is a unique local address (`fc00::/7`). - /// - /// This property is defined in [IETF RFC 4193]. - /// - /// [IETF RFC 4193]: https://tools.ietf.org/html/rfc4193 - /// - /// # Examples - /// - /// ``` - /// #![feature(ip)] - /// - /// use std::net::Ipv6Addr; - /// - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unique_local(), false); - /// assert_eq!(Ipv6Addr::new(0xfc02, 0, 0, 0, 0, 0, 0, 0).is_unique_local(), true); - /// ``` - #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] - #[unstable(feature = "ip", issue = "27709")] - #[must_use] - #[inline] - pub const fn is_unique_local(&self) -> bool { - (self.segments()[0] & 0xfe00) == 0xfc00 - } - - /// Returns [`true`] if this is a unicast address, as defined by [IETF RFC 4291]. - /// Any address that is not a [multicast address] (`ff00::/8`) is unicast. - /// - /// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291 - /// [multicast address]: Ipv6Addr::is_multicast - /// - /// # Examples - /// - /// ``` - /// #![feature(ip)] - /// - /// use std::net::Ipv6Addr; - /// - /// // The unspecified and loopback addresses are unicast. - /// assert_eq!(Ipv6Addr::UNSPECIFIED.is_unicast(), true); - /// assert_eq!(Ipv6Addr::LOCALHOST.is_unicast(), true); - /// - /// // Any address that is not a multicast address (`ff00::/8`) is unicast. - /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast(), true); - /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).is_unicast(), false); - /// ``` - #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] - #[unstable(feature = "ip", issue = "27709")] - #[must_use] - #[inline] - pub const fn is_unicast(&self) -> bool { - !self.is_multicast() - } - - /// Returns `true` if the address is a unicast address with link-local scope, - /// as defined in [RFC 4291]. - /// - /// A unicast address has link-local scope if it has the prefix `fe80::/10`, as per [RFC 4291 section 2.4]. - /// Note that this encompasses more addresses than those defined in [RFC 4291 section 2.5.6], - /// which describes "Link-Local IPv6 Unicast Addresses" as having the following stricter format: - /// - /// ```text - /// | 10 bits | 54 bits | 64 bits | - /// +----------+-------------------------+----------------------------+ - /// |1111111010| 0 | interface ID | - /// +----------+-------------------------+----------------------------+ - /// ``` - /// So while currently the only addresses with link-local scope an application will encounter are all in `fe80::/64`, - /// this might change in the future with the publication of new standards. More addresses in `fe80::/10` could be allocated, - /// and those addresses will have link-local scope. - /// - /// Also note that while [RFC 4291 section 2.5.3] mentions about the [loopback address] (`::1`) that "it is treated as having Link-Local scope", - /// this does not mean that the loopback address actually has link-local scope and this method will return `false` on it. - /// - /// [RFC 4291]: https://tools.ietf.org/html/rfc4291 - /// [RFC 4291 section 2.4]: https://tools.ietf.org/html/rfc4291#section-2.4 - /// [RFC 4291 section 2.5.3]: https://tools.ietf.org/html/rfc4291#section-2.5.3 - /// [RFC 4291 section 2.5.6]: https://tools.ietf.org/html/rfc4291#section-2.5.6 - /// [loopback address]: Ipv6Addr::LOCALHOST - /// - /// # Examples - /// - /// ``` - /// #![feature(ip)] - /// - /// use std::net::Ipv6Addr; - /// - /// // The loopback address (`::1`) does not actually have link-local scope. - /// assert_eq!(Ipv6Addr::LOCALHOST.is_unicast_link_local(), false); - /// - /// // Only addresses in `fe80::/10` have link-local scope. - /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast_link_local(), false); - /// assert_eq!(Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 0).is_unicast_link_local(), true); - /// - /// // Addresses outside the stricter `fe80::/64` also have link-local scope. - /// assert_eq!(Ipv6Addr::new(0xfe80, 0, 0, 1, 0, 0, 0, 0).is_unicast_link_local(), true); - /// assert_eq!(Ipv6Addr::new(0xfe81, 0, 0, 0, 0, 0, 0, 0).is_unicast_link_local(), true); - /// ``` - #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] - #[unstable(feature = "ip", issue = "27709")] - #[must_use] - #[inline] - pub const fn is_unicast_link_local(&self) -> bool { - (self.segments()[0] & 0xffc0) == 0xfe80 - } - - /// Returns [`true`] if this is an address reserved for documentation - /// (`2001:db8::/32`). - /// - /// This property is defined in [IETF RFC 3849]. - /// - /// [IETF RFC 3849]: https://tools.ietf.org/html/rfc3849 - /// - /// # Examples - /// - /// ``` - /// #![feature(ip)] - /// - /// use std::net::Ipv6Addr; - /// - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_documentation(), false); - /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_documentation(), true); - /// ``` - #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] - #[unstable(feature = "ip", issue = "27709")] - #[must_use] - #[inline] - pub const fn is_documentation(&self) -> bool { - (self.segments()[0] == 0x2001) && (self.segments()[1] == 0xdb8) - } - - /// Returns [`true`] if this is an address reserved for benchmarking (`2001:2::/48`). - /// - /// This property is defined in [IETF RFC 5180], where it is mistakenly specified as covering the range `2001:0200::/48`. - /// This is corrected in [IETF RFC Errata 1752] to `2001:0002::/48`. - /// - /// [IETF RFC 5180]: https://tools.ietf.org/html/rfc5180 - /// [IETF RFC Errata 1752]: https://www.rfc-editor.org/errata_search.php?eid=1752 - /// - /// ``` - /// #![feature(ip)] - /// - /// use std::net::Ipv6Addr; - /// - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc613, 0x0).is_benchmarking(), false); - /// assert_eq!(Ipv6Addr::new(0x2001, 0x2, 0, 0, 0, 0, 0, 0).is_benchmarking(), true); - /// ``` - #[unstable(feature = "ip", issue = "27709")] - #[must_use] - #[inline] - pub const fn is_benchmarking(&self) -> bool { - (self.segments()[0] == 0x2001) && (self.segments()[1] == 0x2) && (self.segments()[2] == 0) - } - - /// Returns [`true`] if the address is a globally routable unicast address. - /// - /// The following return false: - /// - /// - the loopback address - /// - the link-local addresses - /// - unique local addresses - /// - the unspecified address - /// - the address range reserved for documentation - /// - /// This method returns [`true`] for site-local addresses as per [RFC 4291 section 2.5.7] - /// - /// ```no_rust - /// The special behavior of [the site-local unicast] prefix defined in [RFC3513] must no longer - /// be supported in new implementations (i.e., new implementations must treat this prefix as - /// Global Unicast). - /// ``` - /// - /// [RFC 4291 section 2.5.7]: https://tools.ietf.org/html/rfc4291#section-2.5.7 - /// - /// # Examples - /// - /// ``` - /// #![feature(ip)] - /// - /// use std::net::Ipv6Addr; - /// - /// assert_eq!(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0).is_unicast_global(), false); - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_unicast_global(), true); - /// ``` - #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] - #[unstable(feature = "ip", issue = "27709")] - #[must_use] - #[inline] - pub const fn is_unicast_global(&self) -> bool { - self.is_unicast() - && !self.is_loopback() - && !self.is_unicast_link_local() - && !self.is_unique_local() - && !self.is_unspecified() - && !self.is_documentation() - && !self.is_benchmarking() - } - - /// Returns the address's multicast scope if the address is multicast. - /// - /// # Examples - /// - /// ``` - /// #![feature(ip)] - /// - /// use std::net::{Ipv6Addr, Ipv6MulticastScope}; - /// - /// assert_eq!( - /// Ipv6Addr::new(0xff0e, 0, 0, 0, 0, 0, 0, 0).multicast_scope(), - /// Some(Ipv6MulticastScope::Global) - /// ); - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).multicast_scope(), None); - /// ``` - #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] - #[unstable(feature = "ip", issue = "27709")] - #[must_use] - #[inline] - pub const fn multicast_scope(&self) -> Option<Ipv6MulticastScope> { - if self.is_multicast() { - match self.segments()[0] & 0x000f { - 1 => Some(Ipv6MulticastScope::InterfaceLocal), - 2 => Some(Ipv6MulticastScope::LinkLocal), - 3 => Some(Ipv6MulticastScope::RealmLocal), - 4 => Some(Ipv6MulticastScope::AdminLocal), - 5 => Some(Ipv6MulticastScope::SiteLocal), - 8 => Some(Ipv6MulticastScope::OrganizationLocal), - 14 => Some(Ipv6MulticastScope::Global), - _ => None, - } - } else { - None - } - } - - /// Returns [`true`] if this is a multicast address (`ff00::/8`). - /// - /// This property is defined by [IETF RFC 4291]. - /// - /// [IETF RFC 4291]: https://tools.ietf.org/html/rfc4291 - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv6Addr; - /// - /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).is_multicast(), true); - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).is_multicast(), false); - /// ``` - #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] - #[stable(since = "1.7.0", feature = "ip_17")] - #[must_use] - #[inline] - pub const fn is_multicast(&self) -> bool { - (self.segments()[0] & 0xff00) == 0xff00 - } - - /// Converts this address to an [`IPv4` address] if it's an [IPv4-mapped] address, - /// as defined in [IETF RFC 4291 section 2.5.5.2], otherwise returns [`None`]. - /// - /// `::ffff:a.b.c.d` becomes `a.b.c.d`. - /// All addresses *not* starting with `::ffff` will return `None`. - /// - /// [`IPv4` address]: Ipv4Addr - /// [IPv4-mapped]: Ipv6Addr - /// [IETF RFC 4291 section 2.5.5.2]: https://tools.ietf.org/html/rfc4291#section-2.5.5.2 - /// - /// # Examples - /// - /// ``` - /// use std::net::{Ipv4Addr, Ipv6Addr}; - /// - /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).to_ipv4_mapped(), None); - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).to_ipv4_mapped(), - /// Some(Ipv4Addr::new(192, 10, 2, 255))); - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_ipv4_mapped(), None); - /// ``` - #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] - #[stable(feature = "ipv6_to_ipv4_mapped", since = "1.63.0")] - #[must_use = "this returns the result of the operation, \ - without modifying the original"] - #[inline] - pub const fn to_ipv4_mapped(&self) -> Option<Ipv4Addr> { - match self.octets() { - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => { - Some(Ipv4Addr::new(a, b, c, d)) - } - _ => None, - } - } - - /// Converts this address to an [`IPv4` address] if it is either - /// an [IPv4-compatible] address as defined in [IETF RFC 4291 section 2.5.5.1], - /// or an [IPv4-mapped] address as defined in [IETF RFC 4291 section 2.5.5.2], - /// otherwise returns [`None`]. - /// - /// Note that this will return an [`IPv4` address] for the IPv6 loopback address `::1`. Use - /// [`Ipv6Addr::to_ipv4_mapped`] to avoid this. - /// - /// `::a.b.c.d` and `::ffff:a.b.c.d` become `a.b.c.d`. `::1` becomes `0.0.0.1`. - /// All addresses *not* starting with either all zeroes or `::ffff` will return `None`. - /// - /// [`IPv4` address]: Ipv4Addr - /// [IPv4-compatible]: Ipv6Addr#ipv4-compatible-ipv6-addresses - /// [IPv4-mapped]: Ipv6Addr#ipv4-mapped-ipv6-addresses - /// [IETF RFC 4291 section 2.5.5.1]: https://tools.ietf.org/html/rfc4291#section-2.5.5.1 - /// [IETF RFC 4291 section 2.5.5.2]: https://tools.ietf.org/html/rfc4291#section-2.5.5.2 - /// - /// # Examples - /// - /// ``` - /// use std::net::{Ipv4Addr, Ipv6Addr}; - /// - /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).to_ipv4(), None); - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff).to_ipv4(), - /// Some(Ipv4Addr::new(192, 10, 2, 255))); - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_ipv4(), - /// Some(Ipv4Addr::new(0, 0, 0, 1))); - /// ``` - #[rustc_const_stable(feature = "const_ip_50", since = "1.50.0")] - #[stable(feature = "rust1", since = "1.0.0")] - #[must_use = "this returns the result of the operation, \ - without modifying the original"] - #[inline] - pub const fn to_ipv4(&self) -> Option<Ipv4Addr> { - if let [0, 0, 0, 0, 0, 0 | 0xffff, ab, cd] = self.segments() { - let [a, b] = ab.to_be_bytes(); - let [c, d] = cd.to_be_bytes(); - Some(Ipv4Addr::new(a, b, c, d)) - } else { - None - } - } - - /// Converts this address to an `IpAddr::V4` if it is an IPv4-mapped addresses, otherwise it - /// returns self wrapped in an `IpAddr::V6`. - /// - /// # Examples - /// - /// ``` - /// #![feature(ip)] - /// use std::net::Ipv6Addr; - /// - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1).is_loopback(), false); - /// assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x7f00, 0x1).to_canonical().is_loopback(), true); - /// ``` - #[rustc_const_unstable(feature = "const_ipv6", issue = "76205")] - #[unstable(feature = "ip", issue = "27709")] - #[must_use = "this returns the result of the operation, \ - without modifying the original"] - #[inline] - pub const fn to_canonical(&self) -> IpAddr { - if let Some(mapped) = self.to_ipv4_mapped() { - return IpAddr::V4(mapped); - } - IpAddr::V6(*self) - } - - /// Returns the sixteen eight-bit integers the IPv6 address consists of. - /// - /// ``` - /// use std::net::Ipv6Addr; - /// - /// assert_eq!(Ipv6Addr::new(0xff00, 0, 0, 0, 0, 0, 0, 0).octets(), - /// [255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); - /// ``` - #[rustc_const_stable(feature = "const_ip_32", since = "1.32.0")] - #[stable(feature = "ipv6_to_octets", since = "1.12.0")] - #[must_use] - #[inline] - pub const fn octets(&self) -> [u8; 16] { - self.octets - } -} - -/// Write an Ipv6Addr, conforming to the canonical style described by -/// [RFC 5952](https://tools.ietf.org/html/rfc5952). -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Display for Ipv6Addr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // If there are no alignment requirements, write the IP address directly to `f`. - // Otherwise, write it to a local buffer and then use `f.pad`. - if f.precision().is_none() && f.width().is_none() { - let segments = self.segments(); - - // Special case for :: and ::1; otherwise they get written with the - // IPv4 formatter - if self.is_unspecified() { - f.write_str("::") - } else if self.is_loopback() { - f.write_str("::1") - } else if let Some(ipv4) = self.to_ipv4() { - match segments[5] { - // IPv4 Compatible address - 0 => write!(f, "::{}", ipv4), - // IPv4 Mapped address - 0xffff => write!(f, "::ffff:{}", ipv4), - _ => unreachable!(), - } - } else { - #[derive(Copy, Clone, Default)] - struct Span { - start: usize, - len: usize, - } - - // Find the inner 0 span - let zeroes = { - let mut longest = Span::default(); - let mut current = Span::default(); - - for (i, &segment) in segments.iter().enumerate() { - if segment == 0 { - if current.len == 0 { - current.start = i; - } - - current.len += 1; - - if current.len > longest.len { - longest = current; - } - } else { - current = Span::default(); - } - } - - longest - }; - - /// Write a colon-separated part of the address - #[inline] - fn fmt_subslice(f: &mut fmt::Formatter<'_>, chunk: &[u16]) -> fmt::Result { - if let Some((first, tail)) = chunk.split_first() { - write!(f, "{:x}", first)?; - for segment in tail { - f.write_char(':')?; - write!(f, "{:x}", segment)?; - } - } - Ok(()) - } - - if zeroes.len > 1 { - fmt_subslice(f, &segments[..zeroes.start])?; - f.write_str("::")?; - fmt_subslice(f, &segments[zeroes.start + zeroes.len..]) - } else { - fmt_subslice(f, &segments) - } - } - } else { - const LONGEST_IPV6_ADDR: &str = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"; - - let mut buf = DisplayBuffer::<{ LONGEST_IPV6_ADDR.len() }>::new(); - // Buffer is long enough for the longest possible IPv6 address, so this should never fail. - write!(buf, "{}", self).unwrap(); - - f.pad(buf.as_str()) - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Debug for Ipv6Addr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, fmt) - } -} - -#[stable(feature = "ip_cmp", since = "1.16.0")] -impl PartialEq<IpAddr> for Ipv6Addr { - #[inline] - fn eq(&self, other: &IpAddr) -> bool { - match other { - IpAddr::V4(_) => false, - IpAddr::V6(v6) => self == v6, - } - } -} - -#[stable(feature = "ip_cmp", since = "1.16.0")] -impl PartialEq<Ipv6Addr> for IpAddr { - #[inline] - fn eq(&self, other: &Ipv6Addr) -> bool { - match self { - IpAddr::V4(_) => false, - IpAddr::V6(v6) => v6 == other, - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl PartialOrd for Ipv6Addr { - #[inline] - fn partial_cmp(&self, other: &Ipv6Addr) -> Option<Ordering> { - Some(self.cmp(other)) - } -} - -#[stable(feature = "ip_cmp", since = "1.16.0")] -impl PartialOrd<Ipv6Addr> for IpAddr { - #[inline] - fn partial_cmp(&self, other: &Ipv6Addr) -> Option<Ordering> { - match self { - IpAddr::V4(_) => Some(Ordering::Less), - IpAddr::V6(v6) => v6.partial_cmp(other), - } - } -} - -#[stable(feature = "ip_cmp", since = "1.16.0")] -impl PartialOrd<IpAddr> for Ipv6Addr { - #[inline] - fn partial_cmp(&self, other: &IpAddr) -> Option<Ordering> { - match other { - IpAddr::V4(_) => Some(Ordering::Greater), - IpAddr::V6(v6) => self.partial_cmp(v6), - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Ord for Ipv6Addr { - #[inline] - fn cmp(&self, other: &Ipv6Addr) -> Ordering { - self.segments().cmp(&other.segments()) + Ipv4Addr::from(addr.s_addr.to_ne_bytes()) } } impl IntoInner<c::in6_addr> for Ipv6Addr { fn into_inner(self) -> c::in6_addr { - c::in6_addr { s6_addr: self.octets } + c::in6_addr { s6_addr: self.octets() } } } impl FromInner<c::in6_addr> for Ipv6Addr { #[inline] fn from_inner(addr: c::in6_addr) -> Ipv6Addr { - Ipv6Addr { octets: addr.s6_addr } - } -} - -#[stable(feature = "i128", since = "1.26.0")] -impl From<Ipv6Addr> for u128 { - /// Convert an `Ipv6Addr` into a host byte order `u128`. - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv6Addr; - /// - /// let addr = Ipv6Addr::new( - /// 0x1020, 0x3040, 0x5060, 0x7080, - /// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D, - /// ); - /// assert_eq!(0x102030405060708090A0B0C0D0E0F00D_u128, u128::from(addr)); - /// ``` - #[inline] - fn from(ip: Ipv6Addr) -> u128 { - u128::from_be_bytes(ip.octets) - } -} -#[stable(feature = "i128", since = "1.26.0")] -impl From<u128> for Ipv6Addr { - /// Convert a host byte order `u128` into an `Ipv6Addr`. - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv6Addr; - /// - /// let addr = Ipv6Addr::from(0x102030405060708090A0B0C0D0E0F00D_u128); - /// assert_eq!( - /// Ipv6Addr::new( - /// 0x1020, 0x3040, 0x5060, 0x7080, - /// 0x90A0, 0xB0C0, 0xD0E0, 0xF00D, - /// ), - /// addr); - /// ``` - #[inline] - fn from(ip: u128) -> Ipv6Addr { - Ipv6Addr::from(ip.to_be_bytes()) - } -} - -#[stable(feature = "ipv6_from_octets", since = "1.9.0")] -impl From<[u8; 16]> for Ipv6Addr { - /// Creates an `Ipv6Addr` from a sixteen element byte array. - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv6Addr; - /// - /// let addr = Ipv6Addr::from([ - /// 25u8, 24u8, 23u8, 22u8, 21u8, 20u8, 19u8, 18u8, - /// 17u8, 16u8, 15u8, 14u8, 13u8, 12u8, 11u8, 10u8, - /// ]); - /// assert_eq!( - /// Ipv6Addr::new( - /// 0x1918, 0x1716, - /// 0x1514, 0x1312, - /// 0x1110, 0x0f0e, - /// 0x0d0c, 0x0b0a - /// ), - /// addr - /// ); - /// ``` - #[inline] - fn from(octets: [u8; 16]) -> Ipv6Addr { - Ipv6Addr { octets } - } -} - -#[stable(feature = "ipv6_from_segments", since = "1.16.0")] -impl From<[u16; 8]> for Ipv6Addr { - /// Creates an `Ipv6Addr` from an eight element 16-bit array. - /// - /// # Examples - /// - /// ``` - /// use std::net::Ipv6Addr; - /// - /// let addr = Ipv6Addr::from([ - /// 525u16, 524u16, 523u16, 522u16, - /// 521u16, 520u16, 519u16, 518u16, - /// ]); - /// assert_eq!( - /// Ipv6Addr::new( - /// 0x20d, 0x20c, - /// 0x20b, 0x20a, - /// 0x209, 0x208, - /// 0x207, 0x206 - /// ), - /// addr - /// ); - /// ``` - #[inline] - fn from(segments: [u16; 8]) -> Ipv6Addr { - let [a, b, c, d, e, f, g, h] = segments; - Ipv6Addr::new(a, b, c, d, e, f, g, h) - } -} - -#[stable(feature = "ip_from_slice", since = "1.17.0")] -impl From<[u8; 16]> for IpAddr { - /// Creates an `IpAddr::V6` from a sixteen element byte array. - /// - /// # Examples - /// - /// ``` - /// use std::net::{IpAddr, Ipv6Addr}; - /// - /// let addr = IpAddr::from([ - /// 25u8, 24u8, 23u8, 22u8, 21u8, 20u8, 19u8, 18u8, - /// 17u8, 16u8, 15u8, 14u8, 13u8, 12u8, 11u8, 10u8, - /// ]); - /// assert_eq!( - /// IpAddr::V6(Ipv6Addr::new( - /// 0x1918, 0x1716, - /// 0x1514, 0x1312, - /// 0x1110, 0x0f0e, - /// 0x0d0c, 0x0b0a - /// )), - /// addr - /// ); - /// ``` - #[inline] - fn from(octets: [u8; 16]) -> IpAddr { - IpAddr::V6(Ipv6Addr::from(octets)) - } -} - -#[stable(feature = "ip_from_slice", since = "1.17.0")] -impl From<[u16; 8]> for IpAddr { - /// Creates an `IpAddr::V6` from an eight element 16-bit array. - /// - /// # Examples - /// - /// ``` - /// use std::net::{IpAddr, Ipv6Addr}; - /// - /// let addr = IpAddr::from([ - /// 525u16, 524u16, 523u16, 522u16, - /// 521u16, 520u16, 519u16, 518u16, - /// ]); - /// assert_eq!( - /// IpAddr::V6(Ipv6Addr::new( - /// 0x20d, 0x20c, - /// 0x20b, 0x20a, - /// 0x209, 0x208, - /// 0x207, 0x206 - /// )), - /// addr - /// ); - /// ``` - #[inline] - fn from(segments: [u16; 8]) -> IpAddr { - IpAddr::V6(Ipv6Addr::from(segments)) + Ipv6Addr::from(addr.s6_addr) } } diff --git a/library/std/src/net/ip_addr/tests.rs b/library/std/src/net/ip_addr/tests.rs index 7c3430b2b..ab99c0c2f 100644 --- a/library/std/src/net/ip_addr/tests.rs +++ b/library/std/src/net/ip_addr/tests.rs @@ -1,1039 +1,8 @@ -use crate::net::test::{sa4, sa6, tsa}; -use crate::net::*; -use crate::str::FromStr; - -#[test] -fn test_from_str_ipv4() { - assert_eq!(Ok(Ipv4Addr::new(127, 0, 0, 1)), "127.0.0.1".parse()); - assert_eq!(Ok(Ipv4Addr::new(255, 255, 255, 255)), "255.255.255.255".parse()); - assert_eq!(Ok(Ipv4Addr::new(0, 0, 0, 0)), "0.0.0.0".parse()); - - // out of range - let none: Option<Ipv4Addr> = "256.0.0.1".parse().ok(); - assert_eq!(None, none); - // too short - let none: Option<Ipv4Addr> = "255.0.0".parse().ok(); - assert_eq!(None, none); - // too long - let none: Option<Ipv4Addr> = "255.0.0.1.2".parse().ok(); - assert_eq!(None, none); - // no number between dots - let none: Option<Ipv4Addr> = "255.0..1".parse().ok(); - assert_eq!(None, none); - // octal - let none: Option<Ipv4Addr> = "255.0.0.01".parse().ok(); - assert_eq!(None, none); - // octal zero - let none: Option<Ipv4Addr> = "255.0.0.00".parse().ok(); - assert_eq!(None, none); - let none: Option<Ipv4Addr> = "255.0.00.0".parse().ok(); - assert_eq!(None, none); -} - -#[test] -fn test_from_str_ipv6() { - assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), "0:0:0:0:0:0:0:0".parse()); - assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), "0:0:0:0:0:0:0:1".parse()); - - assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), "::1".parse()); - assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), "::".parse()); - - assert_eq!(Ok(Ipv6Addr::new(0x2a02, 0x6b8, 0, 0, 0, 0, 0x11, 0x11)), "2a02:6b8::11:11".parse()); - - // too long group - let none: Option<Ipv6Addr> = "::00000".parse().ok(); - assert_eq!(None, none); - // too short - let none: Option<Ipv6Addr> = "1:2:3:4:5:6:7".parse().ok(); - assert_eq!(None, none); - // too long - let none: Option<Ipv6Addr> = "1:2:3:4:5:6:7:8:9".parse().ok(); - assert_eq!(None, none); - // triple colon - let none: Option<Ipv6Addr> = "1:2:::6:7:8".parse().ok(); - assert_eq!(None, none); - // two double colons - let none: Option<Ipv6Addr> = "1:2::6::8".parse().ok(); - assert_eq!(None, none); - // `::` indicating zero groups of zeros - let none: Option<Ipv6Addr> = "1:2:3:4::5:6:7:8".parse().ok(); - assert_eq!(None, none); -} - -#[test] -fn test_from_str_ipv4_in_ipv6() { - assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 49152, 545)), "::192.0.2.33".parse()); - assert_eq!(Ok(Ipv6Addr::new(0, 0, 0, 0, 0, 0xFFFF, 49152, 545)), "::FFFF:192.0.2.33".parse()); - assert_eq!( - Ok(Ipv6Addr::new(0x64, 0xff9b, 0, 0, 0, 0, 49152, 545)), - "64:ff9b::192.0.2.33".parse() - ); - assert_eq!( - Ok(Ipv6Addr::new(0x2001, 0xdb8, 0x122, 0xc000, 0x2, 0x2100, 49152, 545)), - "2001:db8:122:c000:2:2100:192.0.2.33".parse() - ); - - // colon after v4 - let none: Option<Ipv4Addr> = "::127.0.0.1:".parse().ok(); - assert_eq!(None, none); - // not enough groups - let none: Option<Ipv6Addr> = "1:2:3:4:5:127.0.0.1".parse().ok(); - assert_eq!(None, none); - // too many groups - let none: Option<Ipv6Addr> = "1:2:3:4:5:6:7:127.0.0.1".parse().ok(); - assert_eq!(None, none); -} - -#[test] -fn test_from_str_socket_addr() { - assert_eq!(Ok(sa4(Ipv4Addr::new(77, 88, 21, 11), 80)), "77.88.21.11:80".parse()); - assert_eq!(Ok(SocketAddrV4::new(Ipv4Addr::new(77, 88, 21, 11), 80)), "77.88.21.11:80".parse()); - assert_eq!( - Ok(sa6(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53)), - "[2a02:6b8:0:1::1]:53".parse() - ); - assert_eq!( - Ok(SocketAddrV6::new(Ipv6Addr::new(0x2a02, 0x6b8, 0, 1, 0, 0, 0, 1), 53, 0, 0)), - "[2a02:6b8:0:1::1]:53".parse() - ); - assert_eq!(Ok(sa6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x7F00, 1), 22)), "[::127.0.0.1]:22".parse()); - assert_eq!( - Ok(SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x7F00, 1), 22, 0, 0)), - "[::127.0.0.1]:22".parse() - ); - - // without port - let none: Option<SocketAddr> = "127.0.0.1".parse().ok(); - assert_eq!(None, none); - // without port - let none: Option<SocketAddr> = "127.0.0.1:".parse().ok(); - assert_eq!(None, none); - // wrong brackets around v4 - let none: Option<SocketAddr> = "[127.0.0.1]:22".parse().ok(); - assert_eq!(None, none); - // port out of range - let none: Option<SocketAddr> = "127.0.0.1:123456".parse().ok(); - assert_eq!(None, none); -} - -#[test] -fn ipv4_addr_to_string() { - assert_eq!(Ipv4Addr::new(127, 0, 0, 1).to_string(), "127.0.0.1"); - // Short address - assert_eq!(Ipv4Addr::new(1, 1, 1, 1).to_string(), "1.1.1.1"); - // Long address - assert_eq!(Ipv4Addr::new(127, 127, 127, 127).to_string(), "127.127.127.127"); - - // Test padding - assert_eq!(&format!("{:16}", Ipv4Addr::new(1, 1, 1, 1)), "1.1.1.1 "); - assert_eq!(&format!("{:>16}", Ipv4Addr::new(1, 1, 1, 1)), " 1.1.1.1"); -} - -#[test] -fn ipv6_addr_to_string() { - // ipv4-mapped address - let a1 = Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x280); - assert_eq!(a1.to_string(), "::ffff:192.0.2.128"); - - // ipv4-compatible address - let a1 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0xc000, 0x280); - assert_eq!(a1.to_string(), "::192.0.2.128"); - - // v6 address with no zero segments - assert_eq!(Ipv6Addr::new(8, 9, 10, 11, 12, 13, 14, 15).to_string(), "8:9:a:b:c:d:e:f"); - - // longest possible IPv6 length - assert_eq!( - Ipv6Addr::new(0x1111, 0x2222, 0x3333, 0x4444, 0x5555, 0x6666, 0x7777, 0x8888).to_string(), - "1111:2222:3333:4444:5555:6666:7777:8888" - ); - // padding - assert_eq!(&format!("{:20}", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8)), "1:2:3:4:5:6:7:8 "); - assert_eq!(&format!("{:>20}", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8)), " 1:2:3:4:5:6:7:8"); - - // reduce a single run of zeros - assert_eq!( - "ae::ffff:102:304", - Ipv6Addr::new(0xae, 0, 0, 0, 0, 0xffff, 0x0102, 0x0304).to_string() - ); - - // don't reduce just a single zero segment - assert_eq!("1:2:3:4:5:6:0:8", Ipv6Addr::new(1, 2, 3, 4, 5, 6, 0, 8).to_string()); - - // 'any' address - assert_eq!("::", Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).to_string()); - - // loopback address - assert_eq!("::1", Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).to_string()); - - // ends in zeros - assert_eq!("1::", Ipv6Addr::new(1, 0, 0, 0, 0, 0, 0, 0).to_string()); - - // two runs of zeros, second one is longer - assert_eq!("1:0:0:4::8", Ipv6Addr::new(1, 0, 0, 4, 0, 0, 0, 8).to_string()); - - // two runs of zeros, equal length - assert_eq!("1::4:5:0:0:8", Ipv6Addr::new(1, 0, 0, 4, 5, 0, 0, 8).to_string()); - - // don't prefix `0x` to each segment in `dbg!`. - assert_eq!("1::4:5:0:0:8", &format!("{:#?}", Ipv6Addr::new(1, 0, 0, 4, 5, 0, 0, 8))); -} - -#[test] -fn ipv4_to_ipv6() { - assert_eq!( - Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678), - Ipv4Addr::new(0x12, 0x34, 0x56, 0x78).to_ipv6_mapped() - ); - assert_eq!( - Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678), - Ipv4Addr::new(0x12, 0x34, 0x56, 0x78).to_ipv6_compatible() - ); -} - -#[test] -fn ipv6_to_ipv4_mapped() { - assert_eq!( - Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678).to_ipv4_mapped(), - Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78)) - ); - assert_eq!(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678).to_ipv4_mapped(), None); -} - -#[test] -fn ipv6_to_ipv4() { - assert_eq!( - Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678).to_ipv4(), - Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78)) - ); - assert_eq!( - Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x1234, 0x5678).to_ipv4(), - Some(Ipv4Addr::new(0x12, 0x34, 0x56, 0x78)) - ); - assert_eq!(Ipv6Addr::new(0, 0, 1, 0, 0, 0, 0x1234, 0x5678).to_ipv4(), None); -} - -#[test] -fn ip_properties() { - macro_rules! ip { - ($s:expr) => { - IpAddr::from_str($s).unwrap() - }; - } - - macro_rules! check { - ($s:expr) => { - check!($s, 0); - }; - - ($s:expr, $mask:expr) => {{ - let unspec: u8 = 1 << 0; - let loopback: u8 = 1 << 1; - let global: u8 = 1 << 2; - let multicast: u8 = 1 << 3; - let doc: u8 = 1 << 4; - let benchmarking: u8 = 1 << 5; - - if ($mask & unspec) == unspec { - assert!(ip!($s).is_unspecified()); - } else { - assert!(!ip!($s).is_unspecified()); - } - - if ($mask & loopback) == loopback { - assert!(ip!($s).is_loopback()); - } else { - assert!(!ip!($s).is_loopback()); - } - - if ($mask & global) == global { - assert!(ip!($s).is_global()); - } else { - assert!(!ip!($s).is_global()); - } - - if ($mask & multicast) == multicast { - assert!(ip!($s).is_multicast()); - } else { - assert!(!ip!($s).is_multicast()); - } - - if ($mask & doc) == doc { - assert!(ip!($s).is_documentation()); - } else { - assert!(!ip!($s).is_documentation()); - } - - if ($mask & benchmarking) == benchmarking { - assert!(ip!($s).is_benchmarking()); - } else { - assert!(!ip!($s).is_benchmarking()); - } - }}; - } - - let unspec: u8 = 1 << 0; - let loopback: u8 = 1 << 1; - let global: u8 = 1 << 2; - let multicast: u8 = 1 << 3; - let doc: u8 = 1 << 4; - let benchmarking: u8 = 1 << 5; - - check!("0.0.0.0", unspec); - check!("0.0.0.1"); - check!("0.1.0.0"); - check!("10.9.8.7"); - check!("127.1.2.3", loopback); - check!("172.31.254.253"); - check!("169.254.253.242"); - check!("192.0.2.183", doc); - check!("192.1.2.183", global); - check!("192.168.254.253"); - check!("198.51.100.0", doc); - check!("203.0.113.0", doc); - check!("203.2.113.0", global); - check!("224.0.0.0", global | multicast); - check!("239.255.255.255", global | multicast); - check!("255.255.255.255"); - // make sure benchmarking addresses are not global - check!("198.18.0.0", benchmarking); - check!("198.18.54.2", benchmarking); - check!("198.19.255.255", benchmarking); - // make sure addresses reserved for protocol assignment are not global - check!("192.0.0.0"); - check!("192.0.0.255"); - check!("192.0.0.100"); - // make sure reserved addresses are not global - check!("240.0.0.0"); - check!("251.54.1.76"); - check!("254.255.255.255"); - // make sure shared addresses are not global - check!("100.64.0.0"); - check!("100.127.255.255"); - check!("100.100.100.0"); - - check!("::", unspec); - check!("::1", loopback); - check!("::0.0.0.2", global); - check!("1::", global); - check!("fc00::"); - check!("fdff:ffff::"); - check!("fe80:ffff::"); - check!("febf:ffff::"); - check!("fec0::", global); - check!("ff01::", global | multicast); - check!("ff02::", global | multicast); - check!("ff03::", global | multicast); - check!("ff04::", global | multicast); - check!("ff05::", global | multicast); - check!("ff08::", global | multicast); - check!("ff0e::", global | multicast); - check!("2001:db8:85a3::8a2e:370:7334", doc); - check!("2001:2::ac32:23ff:21", benchmarking); - check!("102:304:506:708:90a:b0c:d0e:f10", global); -} - -#[test] -fn ipv4_properties() { - macro_rules! ip { - ($s:expr) => { - Ipv4Addr::from_str($s).unwrap() - }; - } - - macro_rules! check { - ($s:expr) => { - check!($s, 0); - }; - - ($s:expr, $mask:expr) => {{ - let unspec: u16 = 1 << 0; - let loopback: u16 = 1 << 1; - let private: u16 = 1 << 2; - let link_local: u16 = 1 << 3; - let global: u16 = 1 << 4; - let multicast: u16 = 1 << 5; - let broadcast: u16 = 1 << 6; - let documentation: u16 = 1 << 7; - let benchmarking: u16 = 1 << 8; - let reserved: u16 = 1 << 10; - let shared: u16 = 1 << 11; - - if ($mask & unspec) == unspec { - assert!(ip!($s).is_unspecified()); - } else { - assert!(!ip!($s).is_unspecified()); - } - - if ($mask & loopback) == loopback { - assert!(ip!($s).is_loopback()); - } else { - assert!(!ip!($s).is_loopback()); - } - - if ($mask & private) == private { - assert!(ip!($s).is_private()); - } else { - assert!(!ip!($s).is_private()); - } - - if ($mask & link_local) == link_local { - assert!(ip!($s).is_link_local()); - } else { - assert!(!ip!($s).is_link_local()); - } - - if ($mask & global) == global { - assert!(ip!($s).is_global()); - } else { - assert!(!ip!($s).is_global()); - } - - if ($mask & multicast) == multicast { - assert!(ip!($s).is_multicast()); - } else { - assert!(!ip!($s).is_multicast()); - } - - if ($mask & broadcast) == broadcast { - assert!(ip!($s).is_broadcast()); - } else { - assert!(!ip!($s).is_broadcast()); - } - - if ($mask & documentation) == documentation { - assert!(ip!($s).is_documentation()); - } else { - assert!(!ip!($s).is_documentation()); - } - - if ($mask & benchmarking) == benchmarking { - assert!(ip!($s).is_benchmarking()); - } else { - assert!(!ip!($s).is_benchmarking()); - } - - if ($mask & reserved) == reserved { - assert!(ip!($s).is_reserved()); - } else { - assert!(!ip!($s).is_reserved()); - } - - if ($mask & shared) == shared { - assert!(ip!($s).is_shared()); - } else { - assert!(!ip!($s).is_shared()); - } - }}; - } - - let unspec: u16 = 1 << 0; - let loopback: u16 = 1 << 1; - let private: u16 = 1 << 2; - let link_local: u16 = 1 << 3; - let global: u16 = 1 << 4; - let multicast: u16 = 1 << 5; - let broadcast: u16 = 1 << 6; - let documentation: u16 = 1 << 7; - let benchmarking: u16 = 1 << 8; - let reserved: u16 = 1 << 10; - let shared: u16 = 1 << 11; - - check!("0.0.0.0", unspec); - check!("0.0.0.1"); - check!("0.1.0.0"); - check!("10.9.8.7", private); - check!("127.1.2.3", loopback); - check!("172.31.254.253", private); - check!("169.254.253.242", link_local); - check!("192.0.2.183", documentation); - check!("192.1.2.183", global); - check!("192.168.254.253", private); - check!("198.51.100.0", documentation); - check!("203.0.113.0", documentation); - check!("203.2.113.0", global); - check!("224.0.0.0", global | multicast); - check!("239.255.255.255", global | multicast); - check!("255.255.255.255", broadcast); - check!("198.18.0.0", benchmarking); - check!("198.18.54.2", benchmarking); - check!("198.19.255.255", benchmarking); - check!("192.0.0.0"); - check!("192.0.0.255"); - check!("192.0.0.100"); - check!("240.0.0.0", reserved); - check!("251.54.1.76", reserved); - check!("254.255.255.255", reserved); - check!("100.64.0.0", shared); - check!("100.127.255.255", shared); - check!("100.100.100.0", shared); -} - -#[test] -fn ipv6_properties() { - macro_rules! ip { - ($s:expr) => { - Ipv6Addr::from_str($s).unwrap() - }; - } - - macro_rules! check { - ($s:expr, &[$($octet:expr),*], $mask:expr) => { - assert_eq!($s, ip!($s).to_string()); - let octets = &[$($octet),*]; - assert_eq!(&ip!($s).octets(), octets); - assert_eq!(Ipv6Addr::from(*octets), ip!($s)); - - let unspecified: u32 = 1 << 0; - let loopback: u32 = 1 << 1; - let unique_local: u32 = 1 << 2; - let global: u32 = 1 << 3; - let unicast_link_local: u32 = 1 << 4; - let unicast_global: u32 = 1 << 7; - let documentation: u32 = 1 << 8; - let benchmarking: u32 = 1 << 16; - let multicast_interface_local: u32 = 1 << 9; - let multicast_link_local: u32 = 1 << 10; - let multicast_realm_local: u32 = 1 << 11; - let multicast_admin_local: u32 = 1 << 12; - let multicast_site_local: u32 = 1 << 13; - let multicast_organization_local: u32 = 1 << 14; - let multicast_global: u32 = 1 << 15; - let multicast: u32 = multicast_interface_local - | multicast_admin_local - | multicast_global - | multicast_link_local - | multicast_realm_local - | multicast_site_local - | multicast_organization_local; - - if ($mask & unspecified) == unspecified { - assert!(ip!($s).is_unspecified()); - } else { - assert!(!ip!($s).is_unspecified()); - } - if ($mask & loopback) == loopback { - assert!(ip!($s).is_loopback()); - } else { - assert!(!ip!($s).is_loopback()); - } - if ($mask & unique_local) == unique_local { - assert!(ip!($s).is_unique_local()); - } else { - assert!(!ip!($s).is_unique_local()); - } - if ($mask & global) == global { - assert!(ip!($s).is_global()); - } else { - assert!(!ip!($s).is_global()); - } - if ($mask & unicast_link_local) == unicast_link_local { - assert!(ip!($s).is_unicast_link_local()); - } else { - assert!(!ip!($s).is_unicast_link_local()); - } - if ($mask & unicast_global) == unicast_global { - assert!(ip!($s).is_unicast_global()); - } else { - assert!(!ip!($s).is_unicast_global()); - } - if ($mask & documentation) == documentation { - assert!(ip!($s).is_documentation()); - } else { - assert!(!ip!($s).is_documentation()); - } - if ($mask & benchmarking) == benchmarking { - assert!(ip!($s).is_benchmarking()); - } else { - assert!(!ip!($s).is_benchmarking()); - } - if ($mask & multicast) != 0 { - assert!(ip!($s).multicast_scope().is_some()); - assert!(ip!($s).is_multicast()); - } else { - assert!(ip!($s).multicast_scope().is_none()); - assert!(!ip!($s).is_multicast()); - } - if ($mask & multicast_interface_local) == multicast_interface_local { - assert_eq!(ip!($s).multicast_scope().unwrap(), - Ipv6MulticastScope::InterfaceLocal); - } - if ($mask & multicast_link_local) == multicast_link_local { - assert_eq!(ip!($s).multicast_scope().unwrap(), - Ipv6MulticastScope::LinkLocal); - } - if ($mask & multicast_realm_local) == multicast_realm_local { - assert_eq!(ip!($s).multicast_scope().unwrap(), - Ipv6MulticastScope::RealmLocal); - } - if ($mask & multicast_admin_local) == multicast_admin_local { - assert_eq!(ip!($s).multicast_scope().unwrap(), - Ipv6MulticastScope::AdminLocal); - } - if ($mask & multicast_site_local) == multicast_site_local { - assert_eq!(ip!($s).multicast_scope().unwrap(), - Ipv6MulticastScope::SiteLocal); - } - if ($mask & multicast_organization_local) == multicast_organization_local { - assert_eq!(ip!($s).multicast_scope().unwrap(), - Ipv6MulticastScope::OrganizationLocal); - } - if ($mask & multicast_global) == multicast_global { - assert_eq!(ip!($s).multicast_scope().unwrap(), - Ipv6MulticastScope::Global); - } - } - } - - let unspecified: u32 = 1 << 0; - let loopback: u32 = 1 << 1; - let unique_local: u32 = 1 << 2; - let global: u32 = 1 << 3; - let unicast_link_local: u32 = 1 << 4; - let unicast_global: u32 = 1 << 7; - let documentation: u32 = 1 << 8; - let benchmarking: u32 = 1 << 16; - let multicast_interface_local: u32 = 1 << 9; - let multicast_link_local: u32 = 1 << 10; - let multicast_realm_local: u32 = 1 << 11; - let multicast_admin_local: u32 = 1 << 12; - let multicast_site_local: u32 = 1 << 13; - let multicast_organization_local: u32 = 1 << 14; - let multicast_global: u32 = 1 << 15; - - check!("::", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unspecified); - - check!("::1", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], loopback); - - check!("::0.0.0.2", &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], global | unicast_global); - - check!("1::", &[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], global | unicast_global); - - check!( - "::ffff:127.0.0.1", - &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0x7f, 0, 0, 1], - unicast_global - ); - - check!( - "64:ff9b:1::", - &[0, 0x64, 0xff, 0x9b, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - unicast_global - ); - - check!("100::", &[0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_global); - - check!("2001::", &[0x20, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_global); - - check!( - "2001:1::1", - &[0x20, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], - global | unicast_global - ); - - check!( - "2001:1::2", - &[0x20, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], - global | unicast_global - ); - - check!( - "2001:3::", - &[0x20, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - global | unicast_global - ); - - check!( - "2001:4:112::", - &[0x20, 1, 0, 4, 1, 0x12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - global | unicast_global - ); - - check!( - "2001:20::", - &[0x20, 1, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - global | unicast_global - ); - - check!("2001:30::", &[0x20, 1, 0, 0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_global); - - check!( - "2001:200::", - &[0x20, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - global | unicast_global - ); - - check!("fc00::", &[0xfc, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unique_local); - - check!( - "fdff:ffff::", - &[0xfd, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - unique_local - ); - - check!( - "fe80:ffff::", - &[0xfe, 0x80, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - unicast_link_local - ); - - check!("fe80::", &[0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_link_local); - - check!( - "febf:ffff::", - &[0xfe, 0xbf, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - unicast_link_local - ); - - check!("febf::", &[0xfe, 0xbf, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], unicast_link_local); - - check!( - "febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff", - &[ - 0xfe, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff - ], - unicast_link_local - ); - - check!( - "fe80::ffff:ffff:ffff:ffff", - &[ - 0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff - ], - unicast_link_local - ); - - check!( - "fe80:0:0:1::", - &[0xfe, 0x80, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], - unicast_link_local - ); - - check!( - "fec0::", - &[0xfe, 0xc0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - unicast_global | global - ); - - check!( - "ff01::", - &[0xff, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - multicast_interface_local | global - ); - - check!( - "ff02::", - &[0xff, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - multicast_link_local | global - ); - - check!( - "ff03::", - &[0xff, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - multicast_realm_local | global - ); - - check!( - "ff04::", - &[0xff, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - multicast_admin_local | global - ); - - check!( - "ff05::", - &[0xff, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - multicast_site_local | global - ); - - check!( - "ff08::", - &[0xff, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - multicast_organization_local | global - ); - - check!( - "ff0e::", - &[0xff, 0xe, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - multicast_global | global - ); - - check!( - "2001:db8:85a3::8a2e:370:7334", - &[0x20, 1, 0xd, 0xb8, 0x85, 0xa3, 0, 0, 0, 0, 0x8a, 0x2e, 3, 0x70, 0x73, 0x34], - documentation - ); - - check!( - "2001:2::ac32:23ff:21", - &[0x20, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0xac, 0x32, 0x23, 0xff, 0, 0x21], - benchmarking - ); - - check!( - "102:304:506:708:90a:b0c:d0e:f10", - &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], - global | unicast_global - ); -} +use crate::net::test::{sa4, tsa}; +use crate::net::Ipv4Addr; #[test] fn to_socket_addr_socketaddr() { let a = sa4(Ipv4Addr::new(77, 88, 21, 11), 12345); assert_eq!(Ok(vec![a]), tsa(a)); } - -#[test] -fn test_ipv4_to_int() { - let a = Ipv4Addr::new(0x11, 0x22, 0x33, 0x44); - assert_eq!(u32::from(a), 0x11223344); -} - -#[test] -fn test_int_to_ipv4() { - let a = Ipv4Addr::new(0x11, 0x22, 0x33, 0x44); - assert_eq!(Ipv4Addr::from(0x11223344), a); -} - -#[test] -fn test_ipv6_to_int() { - let a = Ipv6Addr::new(0x1122, 0x3344, 0x5566, 0x7788, 0x99aa, 0xbbcc, 0xddee, 0xff11); - assert_eq!(u128::from(a), 0x112233445566778899aabbccddeeff11u128); -} - -#[test] -fn test_int_to_ipv6() { - let a = Ipv6Addr::new(0x1122, 0x3344, 0x5566, 0x7788, 0x99aa, 0xbbcc, 0xddee, 0xff11); - assert_eq!(Ipv6Addr::from(0x112233445566778899aabbccddeeff11u128), a); -} - -#[test] -fn ipv4_from_constructors() { - assert_eq!(Ipv4Addr::LOCALHOST, Ipv4Addr::new(127, 0, 0, 1)); - assert!(Ipv4Addr::LOCALHOST.is_loopback()); - assert_eq!(Ipv4Addr::UNSPECIFIED, Ipv4Addr::new(0, 0, 0, 0)); - assert!(Ipv4Addr::UNSPECIFIED.is_unspecified()); - assert_eq!(Ipv4Addr::BROADCAST, Ipv4Addr::new(255, 255, 255, 255)); - assert!(Ipv4Addr::BROADCAST.is_broadcast()); -} - -#[test] -fn ipv6_from_constructors() { - assert_eq!(Ipv6Addr::LOCALHOST, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)); - assert!(Ipv6Addr::LOCALHOST.is_loopback()); - assert_eq!(Ipv6Addr::UNSPECIFIED, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)); - assert!(Ipv6Addr::UNSPECIFIED.is_unspecified()); -} - -#[test] -fn ipv4_from_octets() { - assert_eq!(Ipv4Addr::from([127, 0, 0, 1]), Ipv4Addr::new(127, 0, 0, 1)) -} - -#[test] -fn ipv6_from_segments() { - let from_u16s = - Ipv6Addr::from([0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff]); - let new = Ipv6Addr::new(0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff); - assert_eq!(new, from_u16s); -} - -#[test] -fn ipv6_from_octets() { - let from_u16s = - Ipv6Addr::from([0x0011, 0x2233, 0x4455, 0x6677, 0x8899, 0xaabb, 0xccdd, 0xeeff]); - let from_u8s = Ipv6Addr::from([ - 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, - 0xff, - ]); - assert_eq!(from_u16s, from_u8s); -} - -#[test] -fn cmp() { - let v41 = Ipv4Addr::new(100, 64, 3, 3); - let v42 = Ipv4Addr::new(192, 0, 2, 2); - let v61 = "2001:db8:f00::1002".parse::<Ipv6Addr>().unwrap(); - let v62 = "2001:db8:f00::2001".parse::<Ipv6Addr>().unwrap(); - assert!(v41 < v42); - assert!(v61 < v62); - - assert_eq!(v41, IpAddr::V4(v41)); - assert_eq!(v61, IpAddr::V6(v61)); - assert!(v41 != IpAddr::V4(v42)); - assert!(v61 != IpAddr::V6(v62)); - - assert!(v41 < IpAddr::V4(v42)); - assert!(v61 < IpAddr::V6(v62)); - assert!(IpAddr::V4(v41) < v42); - assert!(IpAddr::V6(v61) < v62); - - assert!(v41 < IpAddr::V6(v61)); - assert!(IpAddr::V4(v41) < v61); -} - -#[test] -fn is_v4() { - let ip = IpAddr::V4(Ipv4Addr::new(100, 64, 3, 3)); - assert!(ip.is_ipv4()); - assert!(!ip.is_ipv6()); -} - -#[test] -fn is_v6() { - let ip = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0x1234, 0x5678)); - assert!(!ip.is_ipv4()); - assert!(ip.is_ipv6()); -} - -#[test] -fn ipv4_const() { - // test that the methods of `Ipv4Addr` are usable in a const context - - const IP_ADDRESS: Ipv4Addr = Ipv4Addr::new(127, 0, 0, 1); - assert_eq!(IP_ADDRESS, Ipv4Addr::LOCALHOST); - - const OCTETS: [u8; 4] = IP_ADDRESS.octets(); - assert_eq!(OCTETS, [127, 0, 0, 1]); - - const IS_UNSPECIFIED: bool = IP_ADDRESS.is_unspecified(); - assert!(!IS_UNSPECIFIED); - - const IS_LOOPBACK: bool = IP_ADDRESS.is_loopback(); - assert!(IS_LOOPBACK); - - const IS_PRIVATE: bool = IP_ADDRESS.is_private(); - assert!(!IS_PRIVATE); - - const IS_LINK_LOCAL: bool = IP_ADDRESS.is_link_local(); - assert!(!IS_LINK_LOCAL); - - const IS_GLOBAL: bool = IP_ADDRESS.is_global(); - assert!(!IS_GLOBAL); - - const IS_SHARED: bool = IP_ADDRESS.is_shared(); - assert!(!IS_SHARED); - - const IS_BENCHMARKING: bool = IP_ADDRESS.is_benchmarking(); - assert!(!IS_BENCHMARKING); - - const IS_RESERVED: bool = IP_ADDRESS.is_reserved(); - assert!(!IS_RESERVED); - - const IS_MULTICAST: bool = IP_ADDRESS.is_multicast(); - assert!(!IS_MULTICAST); - - const IS_BROADCAST: bool = IP_ADDRESS.is_broadcast(); - assert!(!IS_BROADCAST); - - const IS_DOCUMENTATION: bool = IP_ADDRESS.is_documentation(); - assert!(!IS_DOCUMENTATION); - - const IP_V6_COMPATIBLE: Ipv6Addr = IP_ADDRESS.to_ipv6_compatible(); - assert_eq!( - IP_V6_COMPATIBLE, - Ipv6Addr::from([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 0, 0, 1]) - ); - - const IP_V6_MAPPED: Ipv6Addr = IP_ADDRESS.to_ipv6_mapped(); - assert_eq!( - IP_V6_MAPPED, - Ipv6Addr::from([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 127, 0, 0, 1]) - ); -} - -#[test] -fn ipv6_const() { - // test that the methods of `Ipv6Addr` are usable in a const context - - const IP_ADDRESS: Ipv6Addr = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); - assert_eq!(IP_ADDRESS, Ipv6Addr::LOCALHOST); - - const SEGMENTS: [u16; 8] = IP_ADDRESS.segments(); - assert_eq!(SEGMENTS, [0, 0, 0, 0, 0, 0, 0, 1]); - - const OCTETS: [u8; 16] = IP_ADDRESS.octets(); - assert_eq!(OCTETS, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]); - - const IS_UNSPECIFIED: bool = IP_ADDRESS.is_unspecified(); - assert!(!IS_UNSPECIFIED); - - const IS_LOOPBACK: bool = IP_ADDRESS.is_loopback(); - assert!(IS_LOOPBACK); - - const IS_GLOBAL: bool = IP_ADDRESS.is_global(); - assert!(!IS_GLOBAL); - - const IS_UNIQUE_LOCAL: bool = IP_ADDRESS.is_unique_local(); - assert!(!IS_UNIQUE_LOCAL); - - const IS_UNICAST_LINK_LOCAL: bool = IP_ADDRESS.is_unicast_link_local(); - assert!(!IS_UNICAST_LINK_LOCAL); - - const IS_DOCUMENTATION: bool = IP_ADDRESS.is_documentation(); - assert!(!IS_DOCUMENTATION); - - const IS_BENCHMARKING: bool = IP_ADDRESS.is_benchmarking(); - assert!(!IS_BENCHMARKING); - - const IS_UNICAST_GLOBAL: bool = IP_ADDRESS.is_unicast_global(); - assert!(!IS_UNICAST_GLOBAL); - - const MULTICAST_SCOPE: Option<Ipv6MulticastScope> = IP_ADDRESS.multicast_scope(); - assert_eq!(MULTICAST_SCOPE, None); - - const IS_MULTICAST: bool = IP_ADDRESS.is_multicast(); - assert!(!IS_MULTICAST); - - const IP_V4: Option<Ipv4Addr> = IP_ADDRESS.to_ipv4(); - assert_eq!(IP_V4.unwrap(), Ipv4Addr::new(0, 0, 0, 1)); -} - -#[test] -fn ip_const() { - // test that the methods of `IpAddr` are usable in a const context - - const IP_ADDRESS: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST); - - const IS_UNSPECIFIED: bool = IP_ADDRESS.is_unspecified(); - assert!(!IS_UNSPECIFIED); - - const IS_LOOPBACK: bool = IP_ADDRESS.is_loopback(); - assert!(IS_LOOPBACK); - - const IS_GLOBAL: bool = IP_ADDRESS.is_global(); - assert!(!IS_GLOBAL); - - const IS_MULTICAST: bool = IP_ADDRESS.is_multicast(); - assert!(!IS_MULTICAST); - - const IS_IP_V4: bool = IP_ADDRESS.is_ipv4(); - assert!(IS_IP_V4); - - const IS_IP_V6: bool = IP_ADDRESS.is_ipv6(); - assert!(!IS_IP_V6); -} - -#[test] -fn structural_match() { - // test that all IP types can be structurally matched upon - - const IPV4: Ipv4Addr = Ipv4Addr::LOCALHOST; - match IPV4 { - Ipv4Addr::LOCALHOST => {} - _ => unreachable!(), - } - - const IPV6: Ipv6Addr = Ipv6Addr::LOCALHOST; - match IPV6 { - Ipv6Addr::LOCALHOST => {} - _ => unreachable!(), - } - - const IP: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST); - match IP { - IpAddr::V4(Ipv4Addr::LOCALHOST) => {} - _ => unreachable!(), - } -} diff --git a/library/std/src/net/mod.rs b/library/std/src/net/mod.rs index 19d90e7ec..bcab15db3 100644 --- a/library/std/src/net/mod.rs +++ b/library/std/src/net/mod.rs @@ -26,8 +26,6 @@ use crate::io::{self, ErrorKind}; #[stable(feature = "rust1", since = "1.0.0")] pub use self::ip_addr::{IpAddr, Ipv4Addr, Ipv6Addr, Ipv6MulticastScope}; #[stable(feature = "rust1", since = "1.0.0")] -pub use self::parser::AddrParseError; -#[stable(feature = "rust1", since = "1.0.0")] pub use self::socket_addr::{SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs}; #[unstable(feature = "tcplistener_into_incoming", issue = "88339")] pub use self::tcp::IntoIncoming; @@ -35,10 +33,10 @@ pub use self::tcp::IntoIncoming; pub use self::tcp::{Incoming, TcpListener, TcpStream}; #[stable(feature = "rust1", since = "1.0.0")] pub use self::udp::UdpSocket; +#[stable(feature = "rust1", since = "1.0.0")] +pub use core::net::AddrParseError; -mod display_buffer; mod ip_addr; -mod parser; mod socket_addr; mod tcp; #[cfg(test)] diff --git a/library/std/src/net/socket_addr.rs b/library/std/src/net/socket_addr.rs index 33b0dfa03..421fed907 100644 --- a/library/std/src/net/socket_addr.rs +++ b/library/std/src/net/socket_addr.rs @@ -1,9 +1,7 @@ +// Tests for this module #[cfg(all(test, not(target_os = "emscripten")))] mod tests; -use crate::cmp::Ordering; -use crate::fmt::{self, Write}; -use crate::hash; use crate::io; use crate::iter; use crate::mem; @@ -15,533 +13,23 @@ use crate::sys_common::net::LookupHost; use crate::sys_common::{FromInner, IntoInner}; use crate::vec; -use super::display_buffer::DisplayBuffer; - -/// An internet socket address, either IPv4 or IPv6. -/// -/// Internet socket addresses consist of an [IP address], a 16-bit port number, as well -/// as possibly some version-dependent additional information. See [`SocketAddrV4`]'s and -/// [`SocketAddrV6`]'s respective documentation for more details. -/// -/// The size of a `SocketAddr` instance may vary depending on the target operating -/// system. -/// -/// [IP address]: IpAddr -/// -/// # Examples -/// -/// ``` -/// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -/// -/// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); -/// -/// assert_eq!("127.0.0.1:8080".parse(), Ok(socket)); -/// assert_eq!(socket.port(), 8080); -/// assert_eq!(socket.is_ipv4(), true); -/// ``` -#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -#[stable(feature = "rust1", since = "1.0.0")] -pub enum SocketAddr { - /// An IPv4 socket address. - #[stable(feature = "rust1", since = "1.0.0")] - V4(#[stable(feature = "rust1", since = "1.0.0")] SocketAddrV4), - /// An IPv6 socket address. - #[stable(feature = "rust1", since = "1.0.0")] - V6(#[stable(feature = "rust1", since = "1.0.0")] SocketAddrV6), -} - -/// An IPv4 socket address. -/// -/// IPv4 socket addresses consist of an [`IPv4` address] and a 16-bit port number, as -/// stated in [IETF RFC 793]. -/// -/// See [`SocketAddr`] for a type encompassing both IPv4 and IPv6 socket addresses. -/// -/// The size of a `SocketAddrV4` struct may vary depending on the target operating -/// system. Do not assume that this type has the same memory layout as the underlying -/// system representation. -/// -/// [IETF RFC 793]: https://tools.ietf.org/html/rfc793 -/// [`IPv4` address]: Ipv4Addr -/// -/// # Examples -/// -/// ``` -/// use std::net::{Ipv4Addr, SocketAddrV4}; -/// -/// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080); -/// -/// assert_eq!("127.0.0.1:8080".parse(), Ok(socket)); -/// assert_eq!(socket.ip(), &Ipv4Addr::new(127, 0, 0, 1)); -/// assert_eq!(socket.port(), 8080); -/// ``` -#[derive(Copy, Clone, Eq, PartialEq)] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct SocketAddrV4 { - ip: Ipv4Addr, - port: u16, -} - -/// An IPv6 socket address. -/// -/// IPv6 socket addresses consist of an [`IPv6` address], a 16-bit port number, as well -/// as fields containing the traffic class, the flow label, and a scope identifier -/// (see [IETF RFC 2553, Section 3.3] for more details). -/// -/// See [`SocketAddr`] for a type encompassing both IPv4 and IPv6 socket addresses. -/// -/// The size of a `SocketAddrV6` struct may vary depending on the target operating -/// system. Do not assume that this type has the same memory layout as the underlying -/// system representation. -/// -/// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3 -/// [`IPv6` address]: Ipv6Addr -/// -/// # Examples -/// -/// ``` -/// use std::net::{Ipv6Addr, SocketAddrV6}; -/// -/// let socket = SocketAddrV6::new(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1), 8080, 0, 0); -/// -/// assert_eq!("[2001:db8::1]:8080".parse(), Ok(socket)); -/// assert_eq!(socket.ip(), &Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1)); -/// assert_eq!(socket.port(), 8080); -/// ``` -#[derive(Copy, Clone, Eq, PartialEq)] #[stable(feature = "rust1", since = "1.0.0")] -pub struct SocketAddrV6 { - ip: Ipv6Addr, - port: u16, - flowinfo: u32, - scope_id: u32, -} - -impl SocketAddr { - /// Creates a new socket address from an [IP address] and a port number. - /// - /// [IP address]: IpAddr - /// - /// # Examples - /// - /// ``` - /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - /// - /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - /// assert_eq!(socket.ip(), IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); - /// assert_eq!(socket.port(), 8080); - /// ``` - #[stable(feature = "ip_addr", since = "1.7.0")] - #[must_use] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] - pub const fn new(ip: IpAddr, port: u16) -> SocketAddr { - match ip { - IpAddr::V4(a) => SocketAddr::V4(SocketAddrV4::new(a, port)), - IpAddr::V6(a) => SocketAddr::V6(SocketAddrV6::new(a, port, 0, 0)), - } - } - - /// Returns the IP address associated with this socket address. - /// - /// # Examples - /// - /// ``` - /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - /// - /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - /// assert_eq!(socket.ip(), IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); - /// ``` - #[must_use] - #[stable(feature = "ip_addr", since = "1.7.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] - pub const fn ip(&self) -> IpAddr { - match *self { - SocketAddr::V4(ref a) => IpAddr::V4(*a.ip()), - SocketAddr::V6(ref a) => IpAddr::V6(*a.ip()), - } - } - - /// Changes the IP address associated with this socket address. - /// - /// # Examples - /// - /// ``` - /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - /// - /// let mut socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - /// socket.set_ip(IpAddr::V4(Ipv4Addr::new(10, 10, 0, 1))); - /// assert_eq!(socket.ip(), IpAddr::V4(Ipv4Addr::new(10, 10, 0, 1))); - /// ``` - #[stable(feature = "sockaddr_setters", since = "1.9.0")] - pub fn set_ip(&mut self, new_ip: IpAddr) { - // `match (*self, new_ip)` would have us mutate a copy of self only to throw it away. - match (self, new_ip) { - (&mut SocketAddr::V4(ref mut a), IpAddr::V4(new_ip)) => a.set_ip(new_ip), - (&mut SocketAddr::V6(ref mut a), IpAddr::V6(new_ip)) => a.set_ip(new_ip), - (self_, new_ip) => *self_ = Self::new(new_ip, self_.port()), - } - } - - /// Returns the port number associated with this socket address. - /// - /// # Examples - /// - /// ``` - /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - /// - /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - /// assert_eq!(socket.port(), 8080); - /// ``` - #[must_use] - #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] - pub const fn port(&self) -> u16 { - match *self { - SocketAddr::V4(ref a) => a.port(), - SocketAddr::V6(ref a) => a.port(), - } - } - - /// Changes the port number associated with this socket address. - /// - /// # Examples - /// - /// ``` - /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - /// - /// let mut socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - /// socket.set_port(1025); - /// assert_eq!(socket.port(), 1025); - /// ``` - #[stable(feature = "sockaddr_setters", since = "1.9.0")] - pub fn set_port(&mut self, new_port: u16) { - match *self { - SocketAddr::V4(ref mut a) => a.set_port(new_port), - SocketAddr::V6(ref mut a) => a.set_port(new_port), - } - } - - /// Returns [`true`] if the [IP address] in this `SocketAddr` is an - /// [`IPv4` address], and [`false`] otherwise. - /// - /// [IP address]: IpAddr - /// [`IPv4` address]: IpAddr::V4 - /// - /// # Examples - /// - /// ``` - /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - /// - /// let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - /// assert_eq!(socket.is_ipv4(), true); - /// assert_eq!(socket.is_ipv6(), false); - /// ``` - #[must_use] - #[stable(feature = "sockaddr_checker", since = "1.16.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] - pub const fn is_ipv4(&self) -> bool { - matches!(*self, SocketAddr::V4(_)) - } - - /// Returns [`true`] if the [IP address] in this `SocketAddr` is an - /// [`IPv6` address], and [`false`] otherwise. - /// - /// [IP address]: IpAddr - /// [`IPv6` address]: IpAddr::V6 - /// - /// # Examples - /// - /// ``` - /// use std::net::{IpAddr, Ipv6Addr, SocketAddr}; - /// - /// let socket = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 65535, 0, 1)), 8080); - /// assert_eq!(socket.is_ipv4(), false); - /// assert_eq!(socket.is_ipv6(), true); - /// ``` - #[must_use] - #[stable(feature = "sockaddr_checker", since = "1.16.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] - pub const fn is_ipv6(&self) -> bool { - matches!(*self, SocketAddr::V6(_)) - } -} - -impl SocketAddrV4 { - /// Creates a new socket address from an [`IPv4` address] and a port number. - /// - /// [`IPv4` address]: Ipv4Addr - /// - /// # Examples - /// - /// ``` - /// use std::net::{SocketAddrV4, Ipv4Addr}; - /// - /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[must_use] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] - pub const fn new(ip: Ipv4Addr, port: u16) -> SocketAddrV4 { - SocketAddrV4 { ip, port } - } - - /// Returns the IP address associated with this socket address. - /// - /// # Examples - /// - /// ``` - /// use std::net::{SocketAddrV4, Ipv4Addr}; - /// - /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080); - /// assert_eq!(socket.ip(), &Ipv4Addr::new(127, 0, 0, 1)); - /// ``` - #[must_use] - #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] - pub const fn ip(&self) -> &Ipv4Addr { - &self.ip - } - - /// Changes the IP address associated with this socket address. - /// - /// # Examples - /// - /// ``` - /// use std::net::{SocketAddrV4, Ipv4Addr}; - /// - /// let mut socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080); - /// socket.set_ip(Ipv4Addr::new(192, 168, 0, 1)); - /// assert_eq!(socket.ip(), &Ipv4Addr::new(192, 168, 0, 1)); - /// ``` - #[stable(feature = "sockaddr_setters", since = "1.9.0")] - pub fn set_ip(&mut self, new_ip: Ipv4Addr) { - self.ip = new_ip; - } - - /// Returns the port number associated with this socket address. - /// - /// # Examples - /// - /// ``` - /// use std::net::{SocketAddrV4, Ipv4Addr}; - /// - /// let socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080); - /// assert_eq!(socket.port(), 8080); - /// ``` - #[must_use] - #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] - pub const fn port(&self) -> u16 { - self.port - } - - /// Changes the port number associated with this socket address. - /// - /// # Examples - /// - /// ``` - /// use std::net::{SocketAddrV4, Ipv4Addr}; - /// - /// let mut socket = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 8080); - /// socket.set_port(4242); - /// assert_eq!(socket.port(), 4242); - /// ``` - #[stable(feature = "sockaddr_setters", since = "1.9.0")] - pub fn set_port(&mut self, new_port: u16) { - self.port = new_port; - } -} - -impl SocketAddrV6 { - /// Creates a new socket address from an [`IPv6` address], a 16-bit port number, - /// and the `flowinfo` and `scope_id` fields. - /// - /// For more information on the meaning and layout of the `flowinfo` and `scope_id` - /// parameters, see [IETF RFC 2553, Section 3.3]. - /// - /// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3 - /// [`IPv6` address]: Ipv6Addr - /// - /// # Examples - /// - /// ``` - /// use std::net::{SocketAddrV6, Ipv6Addr}; - /// - /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[must_use] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] - pub const fn new(ip: Ipv6Addr, port: u16, flowinfo: u32, scope_id: u32) -> SocketAddrV6 { - SocketAddrV6 { ip, port, flowinfo, scope_id } - } - - /// Returns the IP address associated with this socket address. - /// - /// # Examples - /// - /// ``` - /// use std::net::{SocketAddrV6, Ipv6Addr}; - /// - /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0); - /// assert_eq!(socket.ip(), &Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)); - /// ``` - #[must_use] - #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] - pub const fn ip(&self) -> &Ipv6Addr { - &self.ip - } - - /// Changes the IP address associated with this socket address. - /// - /// # Examples - /// - /// ``` - /// use std::net::{SocketAddrV6, Ipv6Addr}; - /// - /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0); - /// socket.set_ip(Ipv6Addr::new(76, 45, 0, 0, 0, 0, 0, 0)); - /// assert_eq!(socket.ip(), &Ipv6Addr::new(76, 45, 0, 0, 0, 0, 0, 0)); - /// ``` - #[stable(feature = "sockaddr_setters", since = "1.9.0")] - pub fn set_ip(&mut self, new_ip: Ipv6Addr) { - self.ip = new_ip; - } - - /// Returns the port number associated with this socket address. - /// - /// # Examples - /// - /// ``` - /// use std::net::{SocketAddrV6, Ipv6Addr}; - /// - /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0); - /// assert_eq!(socket.port(), 8080); - /// ``` - #[must_use] - #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] - pub const fn port(&self) -> u16 { - self.port - } - - /// Changes the port number associated with this socket address. - /// - /// # Examples - /// - /// ``` - /// use std::net::{SocketAddrV6, Ipv6Addr}; - /// - /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 0); - /// socket.set_port(4242); - /// assert_eq!(socket.port(), 4242); - /// ``` - #[stable(feature = "sockaddr_setters", since = "1.9.0")] - pub fn set_port(&mut self, new_port: u16) { - self.port = new_port; - } - - /// Returns the flow information associated with this address. - /// - /// This information corresponds to the `sin6_flowinfo` field in C's `netinet/in.h`, - /// as specified in [IETF RFC 2553, Section 3.3]. - /// It combines information about the flow label and the traffic class as specified - /// in [IETF RFC 2460], respectively [Section 6] and [Section 7]. - /// - /// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3 - /// [IETF RFC 2460]: https://tools.ietf.org/html/rfc2460 - /// [Section 6]: https://tools.ietf.org/html/rfc2460#section-6 - /// [Section 7]: https://tools.ietf.org/html/rfc2460#section-7 - /// - /// # Examples - /// - /// ``` - /// use std::net::{SocketAddrV6, Ipv6Addr}; - /// - /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 10, 0); - /// assert_eq!(socket.flowinfo(), 10); - /// ``` - #[must_use] - #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] - pub const fn flowinfo(&self) -> u32 { - self.flowinfo - } - - /// Changes the flow information associated with this socket address. - /// - /// See [`SocketAddrV6::flowinfo`]'s documentation for more details. - /// - /// # Examples - /// - /// ``` - /// use std::net::{SocketAddrV6, Ipv6Addr}; - /// - /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 10, 0); - /// socket.set_flowinfo(56); - /// assert_eq!(socket.flowinfo(), 56); - /// ``` - #[stable(feature = "sockaddr_setters", since = "1.9.0")] - pub fn set_flowinfo(&mut self, new_flowinfo: u32) { - self.flowinfo = new_flowinfo; - } - - /// Returns the scope ID associated with this address. - /// - /// This information corresponds to the `sin6_scope_id` field in C's `netinet/in.h`, - /// as specified in [IETF RFC 2553, Section 3.3]. - /// - /// [IETF RFC 2553, Section 3.3]: https://tools.ietf.org/html/rfc2553#section-3.3 - /// - /// # Examples - /// - /// ``` - /// use std::net::{SocketAddrV6, Ipv6Addr}; - /// - /// let socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 78); - /// assert_eq!(socket.scope_id(), 78); - /// ``` - #[must_use] - #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_socketaddr", issue = "82485")] - pub const fn scope_id(&self) -> u32 { - self.scope_id - } - - /// Changes the scope ID associated with this socket address. - /// - /// See [`SocketAddrV6::scope_id`]'s documentation for more details. - /// - /// # Examples - /// - /// ``` - /// use std::net::{SocketAddrV6, Ipv6Addr}; - /// - /// let mut socket = SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 8080, 0, 78); - /// socket.set_scope_id(42); - /// assert_eq!(socket.scope_id(), 42); - /// ``` - #[stable(feature = "sockaddr_setters", since = "1.9.0")] - pub fn set_scope_id(&mut self, new_scope_id: u32) { - self.scope_id = new_scope_id; - } -} +pub use core::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; impl FromInner<c::sockaddr_in> for SocketAddrV4 { fn from_inner(addr: c::sockaddr_in) -> SocketAddrV4 { - SocketAddrV4 { ip: Ipv4Addr::from_inner(addr.sin_addr), port: u16::from_be(addr.sin_port) } + SocketAddrV4::new(Ipv4Addr::from_inner(addr.sin_addr), u16::from_be(addr.sin_port)) } } impl FromInner<c::sockaddr_in6> for SocketAddrV6 { fn from_inner(addr: c::sockaddr_in6) -> SocketAddrV6 { - SocketAddrV6 { - ip: Ipv6Addr::from_inner(addr.sin6_addr), - port: u16::from_be(addr.sin6_port), - flowinfo: addr.sin6_flowinfo, - scope_id: addr.sin6_scope_id, - } + SocketAddrV6::new( + Ipv6Addr::from_inner(addr.sin6_addr), + u16::from_be(addr.sin6_port), + addr.sin6_flowinfo, + addr.sin6_scope_id, + ) } } @@ -549,8 +37,8 @@ impl IntoInner<c::sockaddr_in> for SocketAddrV4 { fn into_inner(self) -> c::sockaddr_in { c::sockaddr_in { sin_family: c::AF_INET as c::sa_family_t, - sin_port: self.port.to_be(), - sin_addr: self.ip.into_inner(), + sin_port: self.port().to_be(), + sin_addr: self.ip().into_inner(), ..unsafe { mem::zeroed() } } } @@ -560,162 +48,15 @@ impl IntoInner<c::sockaddr_in6> for SocketAddrV6 { fn into_inner(self) -> c::sockaddr_in6 { c::sockaddr_in6 { sin6_family: c::AF_INET6 as c::sa_family_t, - sin6_port: self.port.to_be(), - sin6_addr: self.ip.into_inner(), - sin6_flowinfo: self.flowinfo, - sin6_scope_id: self.scope_id, + sin6_port: self.port().to_be(), + sin6_addr: self.ip().into_inner(), + sin6_flowinfo: self.flowinfo(), + sin6_scope_id: self.scope_id(), ..unsafe { mem::zeroed() } } } } -#[stable(feature = "ip_from_ip", since = "1.16.0")] -impl From<SocketAddrV4> for SocketAddr { - /// Converts a [`SocketAddrV4`] into a [`SocketAddr::V4`]. - fn from(sock4: SocketAddrV4) -> SocketAddr { - SocketAddr::V4(sock4) - } -} - -#[stable(feature = "ip_from_ip", since = "1.16.0")] -impl From<SocketAddrV6> for SocketAddr { - /// Converts a [`SocketAddrV6`] into a [`SocketAddr::V6`]. - fn from(sock6: SocketAddrV6) -> SocketAddr { - SocketAddr::V6(sock6) - } -} - -#[stable(feature = "addr_from_into_ip", since = "1.17.0")] -impl<I: Into<IpAddr>> From<(I, u16)> for SocketAddr { - /// Converts a tuple struct (Into<[`IpAddr`]>, `u16`) into a [`SocketAddr`]. - /// - /// This conversion creates a [`SocketAddr::V4`] for an [`IpAddr::V4`] - /// and creates a [`SocketAddr::V6`] for an [`IpAddr::V6`]. - /// - /// `u16` is treated as port of the newly created [`SocketAddr`]. - fn from(pieces: (I, u16)) -> SocketAddr { - SocketAddr::new(pieces.0.into(), pieces.1) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Display for SocketAddr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - SocketAddr::V4(ref a) => a.fmt(f), - SocketAddr::V6(ref a) => a.fmt(f), - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Debug for SocketAddr { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, fmt) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Display for SocketAddrV4 { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // If there are no alignment requirements, write the socket address directly to `f`. - // Otherwise, write it to a local buffer and then use `f.pad`. - if f.precision().is_none() && f.width().is_none() { - write!(f, "{}:{}", self.ip(), self.port()) - } else { - const LONGEST_IPV4_SOCKET_ADDR: &str = "255.255.255.255:65536"; - - let mut buf = DisplayBuffer::<{ LONGEST_IPV4_SOCKET_ADDR.len() }>::new(); - // Buffer is long enough for the longest possible IPv4 socket address, so this should never fail. - write!(buf, "{}:{}", self.ip(), self.port()).unwrap(); - - f.pad(buf.as_str()) - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Debug for SocketAddrV4 { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, fmt) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Display for SocketAddrV6 { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // If there are no alignment requirements, write the socket address directly to `f`. - // Otherwise, write it to a local buffer and then use `f.pad`. - if f.precision().is_none() && f.width().is_none() { - match self.scope_id() { - 0 => write!(f, "[{}]:{}", self.ip(), self.port()), - scope_id => write!(f, "[{}%{}]:{}", self.ip(), scope_id, self.port()), - } - } else { - const LONGEST_IPV6_SOCKET_ADDR: &str = - "[ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff%4294967296]:65536"; - - let mut buf = DisplayBuffer::<{ LONGEST_IPV6_SOCKET_ADDR.len() }>::new(); - match self.scope_id() { - 0 => write!(buf, "[{}]:{}", self.ip(), self.port()), - scope_id => write!(buf, "[{}%{}]:{}", self.ip(), scope_id, self.port()), - } - // Buffer is long enough for the longest possible IPv6 socket address, so this should never fail. - .unwrap(); - - f.pad(buf.as_str()) - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Debug for SocketAddrV6 { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, fmt) - } -} - -#[stable(feature = "socketaddr_ordering", since = "1.45.0")] -impl PartialOrd for SocketAddrV4 { - fn partial_cmp(&self, other: &SocketAddrV4) -> Option<Ordering> { - Some(self.cmp(other)) - } -} - -#[stable(feature = "socketaddr_ordering", since = "1.45.0")] -impl PartialOrd for SocketAddrV6 { - fn partial_cmp(&self, other: &SocketAddrV6) -> Option<Ordering> { - Some(self.cmp(other)) - } -} - -#[stable(feature = "socketaddr_ordering", since = "1.45.0")] -impl Ord for SocketAddrV4 { - fn cmp(&self, other: &SocketAddrV4) -> Ordering { - self.ip().cmp(other.ip()).then(self.port().cmp(&other.port())) - } -} - -#[stable(feature = "socketaddr_ordering", since = "1.45.0")] -impl Ord for SocketAddrV6 { - fn cmp(&self, other: &SocketAddrV6) -> Ordering { - self.ip().cmp(other.ip()).then(self.port().cmp(&other.port())) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl hash::Hash for SocketAddrV4 { - fn hash<H: hash::Hasher>(&self, s: &mut H) { - (self.port, self.ip).hash(s) - } -} -#[stable(feature = "rust1", since = "1.0.0")] -impl hash::Hash for SocketAddrV6 { - fn hash<H: hash::Hasher>(&self, s: &mut H) { - (self.port, &self.ip, self.flowinfo, self.scope_id).hash(s) - } -} - /// A trait for objects which can be converted or resolved to one or more /// [`SocketAddr`] values. /// diff --git a/library/std/src/net/socket_addr/tests.rs b/library/std/src/net/socket_addr/tests.rs index 15211f819..dfc6dabbe 100644 --- a/library/std/src/net/socket_addr/tests.rs +++ b/library/std/src/net/socket_addr/tests.rs @@ -64,11 +64,11 @@ fn ipv4_socket_addr_to_string() { // Test padding. assert_eq!( - &format!("{:16}", SocketAddrV4::new(Ipv4Addr::new(1, 1, 1, 1), 53)), + format!("{:16}", SocketAddrV4::new(Ipv4Addr::new(1, 1, 1, 1), 53)), "1.1.1.1:53 " ); assert_eq!( - &format!("{:>16}", SocketAddrV4::new(Ipv4Addr::new(1, 1, 1, 1), 53)), + format!("{:>16}", SocketAddrV4::new(Ipv4Addr::new(1, 1, 1, 1), 53)), " 1.1.1.1:53" ); } @@ -111,11 +111,11 @@ fn ipv6_socket_addr_to_string() { // Test padding. assert_eq!( - &format!("{:22}", SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9, 0, 0)), + format!("{:22}", SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9, 0, 0)), "[1:2:3:4:5:6:7:8]:9 " ); assert_eq!( - &format!("{:>22}", SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9, 0, 0)), + format!("{:>22}", SocketAddrV6::new(Ipv6Addr::new(1, 2, 3, 4, 5, 6, 7, 8), 9, 0, 0)), " [1:2:3:4:5:6:7:8]:9" ); } diff --git a/library/std/src/net/tcp.rs b/library/std/src/net/tcp.rs index 69b72a81c..ac09a8059 100644 --- a/library/std/src/net/tcp.rs +++ b/library/std/src/net/tcp.rs @@ -829,7 +829,7 @@ impl TcpListener { /// } /// /// fn main() -> std::io::Result<()> { - /// let listener = TcpListener::bind("127.0.0.1:80").unwrap(); + /// let listener = TcpListener::bind("127.0.0.1:80")?; /// /// for stream in listener.incoming() { /// match stream { diff --git a/library/std/src/net/tcp/tests.rs b/library/std/src/net/tcp/tests.rs index 8c0adcfb0..e019bc0b6 100644 --- a/library/std/src/net/tcp/tests.rs +++ b/library/std/src/net/tcp/tests.rs @@ -670,7 +670,10 @@ fn debug() { // FIXME: re-enabled openbsd tests once their socket timeout code // no longer has rounding errors. // VxWorks ignores SO_SNDTIMEO. -#[cfg_attr(any(target_os = "netbsd", target_os = "openbsd", target_os = "vxworks"), ignore)] +#[cfg_attr( + any(target_os = "netbsd", target_os = "openbsd", target_os = "vxworks", target_os = "nto"), + ignore +)] #[cfg_attr(target_env = "sgx", ignore)] // FIXME: https://github.com/fortanix/rust-sgx/issues/31 #[test] fn timeouts() { diff --git a/library/std/src/net/udp/tests.rs b/library/std/src/net/udp/tests.rs index f82904ffb..892fe2ba8 100644 --- a/library/std/src/net/udp/tests.rs +++ b/library/std/src/net/udp/tests.rs @@ -180,7 +180,10 @@ fn debug() { // FIXME: re-enabled openbsd/netbsd tests once their socket timeout code // no longer has rounding errors. // VxWorks ignores SO_SNDTIMEO. -#[cfg_attr(any(target_os = "netbsd", target_os = "openbsd", target_os = "vxworks"), ignore)] +#[cfg_attr( + any(target_os = "netbsd", target_os = "openbsd", target_os = "vxworks", target_os = "nto"), + ignore +)] #[test] fn timeouts() { let addr = next_test_ip4(); diff --git a/library/std/src/os/fd/owned.rs b/library/std/src/os/fd/owned.rs index c41e093a7..99a4e0b51 100644 --- a/library/std/src/os/fd/owned.rs +++ b/library/std/src/os/fd/owned.rs @@ -9,7 +9,7 @@ use crate::fs; use crate::io; use crate::marker::PhantomData; use crate::mem::forget; -#[cfg(not(any(target_arch = "wasm32", target_env = "sgx")))] +#[cfg(not(any(target_arch = "wasm32", target_env = "sgx", target_os = "hermit")))] use crate::sys::cvt; use crate::sys_common::{AsInner, FromInner, IntoInner}; @@ -89,7 +89,7 @@ impl OwnedFd { impl BorrowedFd<'_> { /// Creates a new `OwnedFd` instance that shares the same underlying file /// description as the existing `BorrowedFd` instance. - #[cfg(not(target_arch = "wasm32"))] + #[cfg(not(any(target_arch = "wasm32", target_os = "hermit")))] #[stable(feature = "io_safety", since = "1.63.0")] pub fn try_clone_to_owned(&self) -> crate::io::Result<OwnedFd> { // We want to atomically duplicate this file descriptor and set the @@ -112,7 +112,7 @@ impl BorrowedFd<'_> { /// Creates a new `OwnedFd` instance that shares the same underlying file /// description as the existing `BorrowedFd` instance. - #[cfg(target_arch = "wasm32")] + #[cfg(any(target_arch = "wasm32", target_os = "hermit"))] #[stable(feature = "io_safety", since = "1.63.0")] pub fn try_clone_to_owned(&self) -> crate::io::Result<OwnedFd> { Err(crate::io::const_io_error!( @@ -174,7 +174,10 @@ impl Drop for OwnedFd { // the file descriptor was closed or not, and if we retried (for // something like EINTR), we might close another valid file descriptor // opened after we closed ours. + #[cfg(not(target_os = "hermit"))] let _ = libc::close(self.fd); + #[cfg(target_os = "hermit")] + let _ = hermit_abi::close(self.fd); } } } @@ -396,6 +399,14 @@ impl<T: AsFd> AsFd for crate::sync::Arc<T> { } } +#[stable(feature = "asfd_rc", since = "1.69.0")] +impl<T: AsFd> AsFd for crate::rc::Rc<T> { + #[inline] + fn as_fd(&self) -> BorrowedFd<'_> { + (**self).as_fd() + } +} + #[stable(feature = "asfd_ptrs", since = "1.64.0")] impl<T: AsFd> AsFd for Box<T> { #[inline] diff --git a/library/std/src/os/fd/raw.rs b/library/std/src/os/fd/raw.rs index f92a05066..592e072ad 100644 --- a/library/std/src/os/fd/raw.rs +++ b/library/std/src/os/fd/raw.rs @@ -4,6 +4,9 @@ use crate::fs; use crate::io; +#[cfg(target_os = "hermit")] +use crate::os::hermit::io::OwnedFd; +#[cfg(not(target_os = "hermit"))] use crate::os::raw; #[cfg(all(doc, not(target_arch = "wasm32")))] use crate::os::unix::io::AsFd; @@ -12,11 +15,18 @@ use crate::os::unix::io::OwnedFd; #[cfg(target_os = "wasi")] use crate::os::wasi::io::OwnedFd; use crate::sys_common::{AsInner, IntoInner}; +#[cfg(target_os = "hermit")] +use hermit_abi as libc; /// Raw file descriptors. #[rustc_allowed_through_unstable_modules] #[stable(feature = "rust1", since = "1.0.0")] +#[cfg(not(target_os = "hermit"))] pub type RawFd = raw::c_int; +#[rustc_allowed_through_unstable_modules] +#[stable(feature = "rust1", since = "1.0.0")] +#[cfg(target_os = "hermit")] +pub type RawFd = i32; /// A trait to extract the raw file descriptor from an underlying object. /// @@ -244,6 +254,14 @@ impl<T: AsRawFd> AsRawFd for crate::sync::Arc<T> { } } +#[stable(feature = "asfd_rc", since = "1.69.0")] +impl<T: AsRawFd> AsRawFd for crate::rc::Rc<T> { + #[inline] + fn as_raw_fd(&self) -> RawFd { + (**self).as_raw_fd() + } +} + #[stable(feature = "asrawfd_ptrs", since = "1.63.0")] impl<T: AsRawFd> AsRawFd for Box<T> { #[inline] diff --git a/library/std/src/os/fuchsia/raw.rs b/library/std/src/os/fuchsia/raw.rs index 060d6e86b..ea6b94f2f 100644 --- a/library/std/src/os/fuchsia/raw.rs +++ b/library/std/src/os/fuchsia/raw.rs @@ -24,12 +24,7 @@ pub type pthread_t = c_ulong; #[stable(feature = "raw_ext", since = "1.1.0")] pub use self::arch::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t}; -#[cfg(any( - target_arch = "x86", - target_arch = "le32", - target_arch = "powerpc", - target_arch = "arm" -))] +#[cfg(any(target_arch = "x86", target_arch = "powerpc", target_arch = "arm"))] mod arch { use crate::os::raw::{c_long, c_short, c_uint}; diff --git a/library/std/src/os/hermit/io/mod.rs b/library/std/src/os/hermit/io/mod.rs new file mode 100644 index 000000000..524dfae0d --- /dev/null +++ b/library/std/src/os/hermit/io/mod.rs @@ -0,0 +1,13 @@ +#![stable(feature = "os_fd", since = "1.66.0")] + +mod net; +#[path = "../../fd/owned.rs"] +mod owned; +#[path = "../../fd/raw.rs"] +mod raw; + +// Export the types and traits for the public API. +#[stable(feature = "os_fd", since = "1.66.0")] +pub use owned::*; +#[stable(feature = "os_fd", since = "1.66.0")] +pub use raw::*; diff --git a/library/std/src/os/hermit/io/net.rs b/library/std/src/os/hermit/io/net.rs new file mode 100644 index 000000000..8f3802d78 --- /dev/null +++ b/library/std/src/os/hermit/io/net.rs @@ -0,0 +1,46 @@ +use crate::os::hermit::io::OwnedFd; +use crate::os::hermit::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; +use crate::sys_common::{self, AsInner, FromInner, IntoInner}; +use crate::{net, sys}; + +macro_rules! impl_as_raw_fd { + ($($t:ident)*) => {$( + #[stable(feature = "rust1", since = "1.0.0")] + impl AsRawFd for net::$t { + #[inline] + fn as_raw_fd(&self) -> RawFd { + self.as_inner().socket().as_raw_fd() + } + } + )*}; +} +impl_as_raw_fd! { TcpStream TcpListener UdpSocket } + +macro_rules! impl_from_raw_fd { + ($($t:ident)*) => {$( + #[stable(feature = "from_raw_os", since = "1.1.0")] + impl FromRawFd for net::$t { + #[inline] + unsafe fn from_raw_fd(fd: RawFd) -> net::$t { + unsafe { + let socket = sys::net::Socket::from_inner(FromInner::from_inner(OwnedFd::from_raw_fd(fd))); + net::$t::from_inner(sys_common::net::$t::from_inner(socket)) + } + } + } + )*}; +} +impl_from_raw_fd! { TcpStream TcpListener UdpSocket } + +macro_rules! impl_into_raw_fd { + ($($t:ident)*) => {$( + #[stable(feature = "into_raw_os", since = "1.4.0")] + impl IntoRawFd for net::$t { + #[inline] + fn into_raw_fd(self) -> RawFd { + self.into_inner().into_socket().into_inner().into_inner().into_raw_fd() + } + } + )*}; +} +impl_into_raw_fd! { TcpStream TcpListener UdpSocket } diff --git a/library/std/src/os/hermit/mod.rs b/library/std/src/os/hermit/mod.rs index 4657b545a..89b1b8319 100644 --- a/library/std/src/os/hermit/mod.rs +++ b/library/std/src/os/hermit/mod.rs @@ -1,6 +1,11 @@ #![stable(feature = "rust1", since = "1.0.0")] +#[allow(unused_extern_crates)] +#[stable(feature = "rust1", since = "1.0.0")] +pub extern crate hermit_abi as abi; + pub mod ffi; +pub mod io; /// A prelude for conveniently writing platform-specific code. /// diff --git a/library/std/src/os/l4re/raw.rs b/library/std/src/os/l4re/raw.rs index 699e8be33..b3f7439f8 100644 --- a/library/std/src/os/l4re/raw.rs +++ b/library/std/src/os/l4re/raw.rs @@ -26,7 +26,6 @@ pub use self::arch::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t}; #[cfg(any( target_arch = "x86", - target_arch = "le32", target_arch = "m68k", target_arch = "powerpc", target_arch = "sparc", diff --git a/library/std/src/os/linux/raw.rs b/library/std/src/os/linux/raw.rs index c73791d14..f46028c3a 100644 --- a/library/std/src/os/linux/raw.rs +++ b/library/std/src/os/linux/raw.rs @@ -26,7 +26,6 @@ pub use self::arch::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t}; #[cfg(any( target_arch = "x86", - target_arch = "le32", target_arch = "m68k", target_arch = "powerpc", target_arch = "sparc", diff --git a/library/std/src/os/mod.rs b/library/std/src/os/mod.rs index 42773805c..b148d8a00 100644 --- a/library/std/src/os/mod.rs +++ b/library/std/src/os/mod.rs @@ -60,16 +60,6 @@ pub mod windows {} all(target_vendor = "fortanix", target_env = "sgx") ) )))] -#[cfg(target_os = "hermit")] -#[path = "hermit/mod.rs"] -pub mod unix; -#[cfg(not(all( - doc, - any( - all(target_arch = "wasm32", not(target_os = "wasi")), - all(target_vendor = "fortanix", target_env = "sgx") - ) -)))] #[cfg(all(not(target_os = "hermit"), any(unix, doc)))] pub mod unix; @@ -123,6 +113,8 @@ pub mod freebsd; pub mod fuchsia; #[cfg(target_os = "haiku")] pub mod haiku; +#[cfg(target_os = "hermit")] +pub mod hermit; #[cfg(target_os = "horizon")] pub mod horizon; #[cfg(target_os = "illumos")] @@ -135,6 +127,8 @@ pub mod l4re; pub mod macos; #[cfg(target_os = "netbsd")] pub mod netbsd; +#[cfg(target_os = "nto")] +pub mod nto; #[cfg(target_os = "openbsd")] pub mod openbsd; #[cfg(target_os = "redox")] diff --git a/library/std/src/os/net/mod.rs b/library/std/src/os/net/mod.rs index 5ec267c41..b7046dd7c 100644 --- a/library/std/src/os/net/mod.rs +++ b/library/std/src/os/net/mod.rs @@ -1,4 +1,13 @@ //! OS-specific networking functionality. +// See cfg macros in `library/std/src/os/mod.rs` for why these platforms must +// be special-cased during rustdoc generation. +#[cfg(not(all( + doc, + any( + all(target_arch = "wasm32", not(target_os = "wasi")), + all(target_vendor = "fortanix", target_env = "sgx") + ) +)))] #[cfg(any(target_os = "linux", target_os = "android", doc))] pub(super) mod linux_ext; diff --git a/library/std/src/os/nto/fs.rs b/library/std/src/os/nto/fs.rs new file mode 100644 index 000000000..8f915b08c --- /dev/null +++ b/library/std/src/os/nto/fs.rs @@ -0,0 +1,92 @@ +#![stable(feature = "metadata_ext", since = "1.1.0")] + +use crate::fs::Metadata; +use crate::sys_common::AsInner; + +#[stable(feature = "metadata_ext", since = "1.1.0")] +pub trait MetadataExt { + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_dev(&self) -> u64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_ino(&self) -> u64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_mode(&self) -> u32; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_nlink(&self) -> u64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_uid(&self) -> u32; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_gid(&self) -> u32; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_rdev(&self) -> u64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_size(&self) -> u64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_atime(&self) -> i64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_atime_nsec(&self) -> i64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_mtime(&self) -> i64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_mtime_nsec(&self) -> i64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_ctime(&self) -> i64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_ctime_nsec(&self) -> i64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_blksize(&self) -> u64; + #[stable(feature = "metadata_ext2", since = "1.8.0")] + fn st_blocks(&self) -> u64; +} + +#[stable(feature = "metadata_ext", since = "1.1.0")] +impl MetadataExt for Metadata { + fn st_dev(&self) -> u64 { + self.as_inner().as_inner().st_dev as u64 + } + fn st_ino(&self) -> u64 { + self.as_inner().as_inner().st_ino as u64 + } + fn st_mode(&self) -> u32 { + self.as_inner().as_inner().st_mode as u32 + } + fn st_nlink(&self) -> u64 { + self.as_inner().as_inner().st_nlink as u64 + } + fn st_uid(&self) -> u32 { + self.as_inner().as_inner().st_uid as u32 + } + fn st_gid(&self) -> u32 { + self.as_inner().as_inner().st_gid as u32 + } + fn st_rdev(&self) -> u64 { + self.as_inner().as_inner().st_rdev as u64 + } + fn st_size(&self) -> u64 { + self.as_inner().as_inner().st_size as u64 + } + fn st_atime(&self) -> i64 { + self.as_inner().as_inner().st_atim.tv_sec as i64 + } + fn st_atime_nsec(&self) -> i64 { + self.as_inner().as_inner().st_atim.tv_nsec as i64 + } + fn st_mtime(&self) -> i64 { + self.as_inner().as_inner().st_mtim.tv_sec as i64 + } + fn st_mtime_nsec(&self) -> i64 { + self.as_inner().as_inner().st_mtim.tv_nsec as i64 + } + fn st_ctime(&self) -> i64 { + self.as_inner().as_inner().st_ctim.tv_sec as i64 + } + fn st_ctime_nsec(&self) -> i64 { + self.as_inner().as_inner().st_ctim.tv_nsec as i64 + } + fn st_blksize(&self) -> u64 { + self.as_inner().as_inner().st_blksize as u64 + } + fn st_blocks(&self) -> u64 { + self.as_inner().as_inner().st_blocks as u64 + } +} diff --git a/library/std/src/os/nto/mod.rs b/library/std/src/os/nto/mod.rs new file mode 100644 index 000000000..3e591dace --- /dev/null +++ b/library/std/src/os/nto/mod.rs @@ -0,0 +1,4 @@ +#![stable(feature = "raw_ext", since = "1.1.0")] + +pub mod fs; +pub(super) mod raw; diff --git a/library/std/src/os/nto/raw.rs b/library/std/src/os/nto/raw.rs new file mode 100644 index 000000000..90e9ad546 --- /dev/null +++ b/library/std/src/os/nto/raw.rs @@ -0,0 +1,40 @@ +#![stable(feature = "raw_ext", since = "1.1.0")] +#![deprecated( + since = "1.8.0", + note = "these type aliases are no longer supported by \ + the standard library, the `libc` crate on \ + crates.io should be used instead for the correct \ + definitions" +)] +#![allow(deprecated)] + +use crate::os::raw::c_int; + +#[stable(feature = "raw_ext", since = "1.1.0")] +pub type dev_t = u32; +#[stable(feature = "raw_ext", since = "1.1.0")] +pub type mode_t = u32; + +#[stable(feature = "pthread_t", since = "1.8.0")] +pub type pthread_t = c_int; + +#[doc(inline)] +#[stable(feature = "raw_ext", since = "1.1.0")] +pub use self::arch::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, time_t}; + +mod arch { + use crate::os::raw::c_long; + + #[stable(feature = "raw_ext", since = "1.1.0")] + pub type blkcnt_t = i64; + #[stable(feature = "raw_ext", since = "1.1.0")] + pub type blksize_t = i32; + #[stable(feature = "raw_ext", since = "1.1.0")] + pub type ino_t = u64; + #[stable(feature = "raw_ext", since = "1.1.0")] + pub type nlink_t = u32; + #[stable(feature = "raw_ext", since = "1.1.0")] + pub type off_t = i64; + #[stable(feature = "raw_ext", since = "1.1.0")] + pub type time_t = c_long; +} diff --git a/library/std/src/os/unix/fs.rs b/library/std/src/os/unix/fs.rs index 3fc6cc44c..a0e664acd 100644 --- a/library/std/src/os/unix/fs.rs +++ b/library/std/src/os/unix/fs.rs @@ -17,6 +17,10 @@ use crate::sealed::Sealed; #[allow(unused_imports)] use io::{Read, Write}; +// Tests for this module +#[cfg(test)] +mod tests; + /// Unix-specific extensions to [`fs::File`]. #[stable(feature = "file_offset", since = "1.15.0")] pub trait FileExt { @@ -54,6 +58,16 @@ pub trait FileExt { #[stable(feature = "file_offset", since = "1.15.0")] fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize>; + /// Like `read_at`, except that it reads into a slice of buffers. + /// + /// Data is copied to fill each buffer in order, with the final buffer + /// written to possibly being only partially filled. This method must behave + /// equivalently to a single call to read with concatenated buffers. + #[unstable(feature = "unix_file_vectored_at", issue = "89517")] + fn read_vectored_at(&self, bufs: &mut [io::IoSliceMut<'_>], offset: u64) -> io::Result<usize> { + io::default_read_vectored(|b| self.read_at(b, offset), bufs) + } + /// Reads the exact number of byte required to fill `buf` from the given offset. /// /// The offset is relative to the start of the file and thus independent @@ -155,6 +169,16 @@ pub trait FileExt { #[stable(feature = "file_offset", since = "1.15.0")] fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize>; + /// Like `write_at`, except that it writes from a slice of buffers. + /// + /// Data is copied from each buffer in order, with the final buffer read + /// from possibly being only partially consumed. This method must behave as + /// a call to `write_at` with the buffers concatenated would. + #[unstable(feature = "unix_file_vectored_at", issue = "89517")] + fn write_vectored_at(&self, bufs: &[io::IoSlice<'_>], offset: u64) -> io::Result<usize> { + io::default_write_vectored(|b| self.write_at(b, offset), bufs) + } + /// Attempts to write an entire buffer starting from a given offset. /// /// The offset is relative to the start of the file and thus independent @@ -218,9 +242,15 @@ impl FileExt for fs::File { fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<usize> { self.as_inner().read_at(buf, offset) } + fn read_vectored_at(&self, bufs: &mut [io::IoSliceMut<'_>], offset: u64) -> io::Result<usize> { + self.as_inner().read_vectored_at(bufs, offset) + } fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> { self.as_inner().write_at(buf, offset) } + fn write_vectored_at(&self, bufs: &[io::IoSlice<'_>], offset: u64) -> io::Result<usize> { + self.as_inner().write_vectored_at(bufs, offset) + } } /// Unix-specific extensions to [`fs::Permissions`]. diff --git a/library/std/src/os/unix/fs/tests.rs b/library/std/src/os/unix/fs/tests.rs new file mode 100644 index 000000000..67f607bd4 --- /dev/null +++ b/library/std/src/os/unix/fs/tests.rs @@ -0,0 +1,57 @@ +use super::*; + +#[test] +fn read_vectored_at() { + let msg = b"preadv is working!"; + let dir = crate::sys_common::io::test::tmpdir(); + + let filename = dir.join("preadv.txt"); + { + let mut file = fs::File::create(&filename).unwrap(); + file.write_all(msg).unwrap(); + } + { + let file = fs::File::open(&filename).unwrap(); + let mut buf0 = [0; 4]; + let mut buf1 = [0; 3]; + + let mut iovec = [io::IoSliceMut::new(&mut buf0), io::IoSliceMut::new(&mut buf1)]; + + let n = file.read_vectored_at(&mut iovec, 4).unwrap(); + + assert!(n == 4 || n == 7); + assert_eq!(&buf0, b"dv i"); + + if n == 7 { + assert_eq!(&buf1, b"s w"); + } + } +} + +#[test] +fn write_vectored_at() { + let msg = b"pwritev is not working!"; + let dir = crate::sys_common::io::test::tmpdir(); + + let filename = dir.join("preadv.txt"); + { + let mut file = fs::File::create(&filename).unwrap(); + file.write_all(msg).unwrap(); + } + let expected = { + let file = fs::File::options().write(true).open(&filename).unwrap(); + let buf0 = b" "; + let buf1 = b"great "; + + let iovec = [io::IoSlice::new(buf0), io::IoSlice::new(buf1)]; + + let n = file.write_vectored_at(&iovec, 11).unwrap(); + + assert!(n == 4 || n == 11); + + if n == 4 { b"pwritev is working!" } else { b"pwritev is great !" } + }; + + let content = fs::read(&filename).unwrap(); + assert_eq!(&content, expected); +} diff --git a/library/std/src/os/unix/mod.rs b/library/std/src/os/unix/mod.rs index f97fa0fb0..eb2d7ce11 100644 --- a/library/std/src/os/unix/mod.rs +++ b/library/std/src/os/unix/mod.rs @@ -65,6 +65,8 @@ mod platform { pub use crate::os::macos::*; #[cfg(target_os = "netbsd")] pub use crate::os::netbsd::*; + #[cfg(target_os = "nto")] + pub use crate::os::nto::*; #[cfg(target_os = "openbsd")] pub use crate::os::openbsd::*; #[cfg(target_os = "redox")] @@ -95,7 +97,8 @@ pub mod thread; target_os = "watchos", target_os = "macos", target_os = "netbsd", - target_os = "openbsd" + target_os = "openbsd", + target_os = "nto", ))] pub mod ucred; diff --git a/library/std/src/os/unix/net/datagram.rs b/library/std/src/os/unix/net/datagram.rs index f758f88d0..272b4f5dc 100644 --- a/library/std/src/os/unix/net/datagram.rs +++ b/library/std/src/os/unix/net/datagram.rs @@ -19,7 +19,8 @@ use crate::{fmt, io}; target_os = "freebsd", target_os = "openbsd", target_os = "netbsd", - target_os = "haiku" + target_os = "haiku", + target_os = "nto", ))] use libc::MSG_NOSIGNAL; #[cfg(not(any( @@ -29,7 +30,8 @@ use libc::MSG_NOSIGNAL; target_os = "freebsd", target_os = "openbsd", target_os = "netbsd", - target_os = "haiku" + target_os = "haiku", + target_os = "nto", )))] const MSG_NOSIGNAL: libc::c_int = 0x0; diff --git a/library/std/src/os/unix/net/tests.rs b/library/std/src/os/unix/net/tests.rs index 37fcfa844..f8c29a6d3 100644 --- a/library/std/src/os/unix/net/tests.rs +++ b/library/std/src/os/unix/net/tests.rs @@ -167,6 +167,7 @@ fn long_path() { } #[test] +#[cfg(not(target_os = "nto"))] fn timeouts() { let dir = tmpdir(); let socket_path = dir.path().join("sock"); diff --git a/library/std/src/os/unix/process.rs b/library/std/src/os/unix/process.rs index 09b2bfe39..729c63d18 100644 --- a/library/std/src/os/unix/process.rs +++ b/library/std/src/os/unix/process.rs @@ -12,15 +12,23 @@ use crate::sealed::Sealed; use crate::sys; use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner}; -#[cfg(not(any(target_os = "vxworks", target_os = "espidf", target_os = "horizon")))] -type UserId = u32; -#[cfg(not(any(target_os = "vxworks", target_os = "espidf", target_os = "horizon")))] -type GroupId = u32; - -#[cfg(any(target_os = "vxworks", target_os = "espidf", target_os = "horizon"))] -type UserId = u16; -#[cfg(any(target_os = "vxworks", target_os = "espidf", target_os = "horizon"))] -type GroupId = u16; +use cfg_if::cfg_if; + +cfg_if! { + if #[cfg(any(target_os = "vxworks", target_os = "espidf", target_os = "horizon"))] { + type UserId = u16; + type GroupId = u16; + } else if #[cfg(target_os = "nto")] { + // Both IDs are signed, see `sys/target_nto.h` of the QNX Neutrino SDP. + // Only positive values should be used, see e.g. + // https://www.qnx.com/developers/docs/7.1/#com.qnx.doc.neutrino.lib_ref/topic/s/setuid.html + type UserId = i32; + type GroupId = i32; + } else { + type UserId = u32; + type GroupId = u32; + } +} /// Unix-specific extensions to the [`process::Command`] builder. /// diff --git a/library/std/src/os/unix/ucred.rs b/library/std/src/os/unix/ucred.rs index ae4faf27b..95967eac2 100644 --- a/library/std/src/os/unix/ucred.rs +++ b/library/std/src/os/unix/ucred.rs @@ -79,7 +79,8 @@ pub mod impl_linux { target_os = "dragonfly", target_os = "freebsd", target_os = "openbsd", - target_os = "netbsd" + target_os = "netbsd", + target_os = "nto", ))] pub mod impl_bsd { use super::UCred; diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs index b0db3112e..e59f32af7 100644 --- a/library/std/src/panicking.rs +++ b/library/std/src/panicking.rs @@ -95,13 +95,16 @@ impl Default for Hook { static HOOK: RwLock<Hook> = RwLock::new(Hook::Default); -/// Registers a custom panic hook, replacing any that was previously registered. +/// Registers a custom panic hook, replacing the previously registered hook. /// /// The panic hook is invoked when a thread panics, but before the panic runtime /// is invoked. As such, the hook will run with both the aborting and unwinding -/// runtimes. The default hook prints a message to standard error and generates -/// a backtrace if requested, but this behavior can be customized with the -/// `set_hook` and [`take_hook`] functions. +/// runtimes. +/// +/// The default hook, which is registered at startup, prints a message to standard error and +/// generates a backtrace if requested. This behavior can be customized using the `set_hook` function. +/// The current hook can be retrieved while reinstating the default hook with the [`take_hook`] +/// function. /// /// [`take_hook`]: ./fn.take_hook.html /// @@ -143,13 +146,14 @@ pub fn set_hook(hook: Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send>) { drop(old); } -/// Unregisters the current panic hook, returning it. +/// Unregisters the current panic hook and returns it, registering the default hook +/// in its place. /// /// *See also the function [`set_hook`].* /// /// [`set_hook`]: ./fn.set_hook.html /// -/// If no custom hook is registered, the default hook will be returned. +/// If the default hook is registered it will be returned, but remain registered. /// /// # Panics /// diff --git a/library/std/src/path.rs b/library/std/src/path.rs index 4778114b4..cd6b393a2 100644 --- a/library/std/src/path.rs +++ b/library/std/src/path.rs @@ -2531,6 +2531,8 @@ impl Path { /// Creates an owned [`PathBuf`] with `path` adjoined to `self`. /// + /// If `path` is absolute, it replaces the current path. + /// /// See [`PathBuf::push`] for more details on what it means to adjoin a path. /// /// # Examples @@ -2539,6 +2541,7 @@ impl Path { /// use std::path::{Path, PathBuf}; /// /// assert_eq!(Path::new("/etc").join("passwd"), PathBuf::from("/etc/passwd")); + /// assert_eq!(Path::new("/etc").join("/bin/sh"), PathBuf::from("/bin/sh")); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[must_use] @@ -2685,6 +2688,7 @@ impl Path { /// escapes the path please use [`Debug`] instead. /// /// [`Display`]: fmt::Display + /// [`Debug`]: fmt::Debug /// /// # Examples /// diff --git a/library/std/src/personality/emcc.rs b/library/std/src/personality/emcc.rs index f942bdf18..cb52ae89b 100644 --- a/library/std/src/personality/emcc.rs +++ b/library/std/src/personality/emcc.rs @@ -1,7 +1,7 @@ //! On Emscripten Rust panics are wrapped in C++ exceptions, so we just forward //! to `__gxx_personality_v0` which is provided by Emscripten. -use libc::c_int; +use crate::ffi::c_int; use unwind as uw; // This is required by the compiler to exist (e.g., it's a lang item), but it's diff --git a/library/std/src/personality/gcc.rs b/library/std/src/personality/gcc.rs index 5fc1b91a1..41c0fe725 100644 --- a/library/std/src/personality/gcc.rs +++ b/library/std/src/personality/gcc.rs @@ -37,7 +37,8 @@ //! and the last personality routine transfers control to the catch block. use super::dwarf::eh::{self, EHAction, EHContext}; -use libc::{c_int, uintptr_t}; +use crate::ffi::c_int; +use libc::uintptr_t; use unwind as uw; // Register ids were lifted from LLVM's TargetLowering::getExceptionPointerRegister() diff --git a/library/std/src/primitive_docs.rs b/library/std/src/primitive_docs.rs index d6e9da187..6f78811a1 100644 --- a/library/std/src/primitive_docs.rs +++ b/library/std/src/primitive_docs.rs @@ -587,8 +587,10 @@ mod prim_pointer {} /// There are two syntactic forms for creating an array: /// /// * A list with each element, i.e., `[x, y, z]`. -/// * A repeat expression `[x; N]`, which produces an array with `N` copies of `x`. -/// The type of `x` must be [`Copy`]. +/// * A repeat expression `[expr; N]` where `N` is how many times to repeat `expr` in the array. `expr` must either be: +/// +/// * A value of a type implementing the [`Copy`] trait +/// * A `const` value /// /// Note that `[expr; 0]` is allowed, and produces an empty array. /// This will still evaluate `expr`, however, and immediately drop the resulting value, so diff --git a/library/std/src/process.rs b/library/std/src/process.rs index 62ce2cb33..1952e19e6 100644 --- a/library/std/src/process.rs +++ b/library/std/src/process.rs @@ -1416,7 +1416,7 @@ impl From<fs::File> for Stdio { /// use std::fs::File; /// use std::process::Command; /// - /// // With the `foo.txt` file containing `Hello, world!" + /// // With the `foo.txt` file containing "Hello, world!" /// let file = File::open("foo.txt").unwrap(); /// /// let reverse = Command::new("rev") diff --git a/library/std/src/sync/lazy_lock.rs b/library/std/src/sync/lazy_lock.rs index 4a1530530..7e85d6a06 100644 --- a/library/std/src/sync/lazy_lock.rs +++ b/library/std/src/sync/lazy_lock.rs @@ -1,8 +1,21 @@ -use crate::cell::Cell; +use crate::cell::UnsafeCell; use crate::fmt; +use crate::mem::ManuallyDrop; use crate::ops::Deref; use crate::panic::{RefUnwindSafe, UnwindSafe}; -use crate::sync::OnceLock; +use crate::sync::Once; + +use super::once::ExclusiveState; + +// We use the state of a Once as discriminant value. Upon creation, the state is +// "incomplete" and `f` contains the initialization closure. In the first call to +// `call_once`, `f` is taken and run. If it succeeds, `value` is set and the state +// is changed to "complete". If it panics, the Once is poisoned, so none of the +// two fields is initialized. +union Data<T, F> { + value: ManuallyDrop<T>, + f: ManuallyDrop<F>, +} /// A value which is initialized on the first access. /// @@ -43,16 +56,17 @@ use crate::sync::OnceLock; /// ``` #[unstable(feature = "once_cell", issue = "74465")] pub struct LazyLock<T, F = fn() -> T> { - cell: OnceLock<T>, - init: Cell<Option<F>>, + once: Once, + data: UnsafeCell<Data<T, F>>, } + impl<T, F: FnOnce() -> T> LazyLock<T, F> { /// Creates a new lazy value with the given initializing /// function. #[inline] #[unstable(feature = "once_cell", issue = "74465")] pub const fn new(f: F) -> LazyLock<T, F> { - LazyLock { cell: OnceLock::new(), init: Cell::new(Some(f)) } + LazyLock { once: Once::new(), data: UnsafeCell::new(Data { f: ManuallyDrop::new(f) }) } } /// Forces the evaluation of this lazy value and @@ -74,10 +88,50 @@ impl<T, F: FnOnce() -> T> LazyLock<T, F> { #[inline] #[unstable(feature = "once_cell", issue = "74465")] pub fn force(this: &LazyLock<T, F>) -> &T { - this.cell.get_or_init(|| match this.init.take() { - Some(f) => f(), - None => panic!("Lazy instance has previously been poisoned"), - }) + this.once.call_once(|| { + // SAFETY: `call_once` only runs this closure once, ever. + let data = unsafe { &mut *this.data.get() }; + let f = unsafe { ManuallyDrop::take(&mut data.f) }; + let value = f(); + data.value = ManuallyDrop::new(value); + }); + + // SAFETY: + // There are four possible scenarios: + // * the closure was called and initialized `value`. + // * the closure was called and panicked, so this point is never reached. + // * the closure was not called, but a previous call initialized `value`. + // * the closure was not called because the Once is poisoned, so this point + // is never reached. + // So `value` has definitely been initialized and will not be modified again. + unsafe { &*(*this.data.get()).value } + } +} + +impl<T, F> LazyLock<T, F> { + /// Get the inner value if it has already been initialized. + fn get(&self) -> Option<&T> { + if self.once.is_completed() { + // SAFETY: + // The closure has been run successfully, so `value` has been initialized + // and will not be modified again. + Some(unsafe { &*(*self.data.get()).value }) + } else { + None + } + } +} + +#[unstable(feature = "once_cell", issue = "74465")] +impl<T, F> Drop for LazyLock<T, F> { + fn drop(&mut self) { + match self.once.state() { + ExclusiveState::Incomplete => unsafe { ManuallyDrop::drop(&mut self.data.get_mut().f) }, + ExclusiveState::Complete => unsafe { + ManuallyDrop::drop(&mut self.data.get_mut().value) + }, + ExclusiveState::Poisoned => {} + } } } @@ -103,23 +157,23 @@ impl<T: Default> Default for LazyLock<T> { #[unstable(feature = "once_cell", issue = "74465")] impl<T: fmt::Debug, F> fmt::Debug for LazyLock<T, F> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Lazy").field("cell", &self.cell).finish_non_exhaustive() + match self.get() { + Some(v) => f.debug_tuple("LazyLock").field(v).finish(), + None => f.write_str("LazyLock(Uninit)"), + } } } // We never create a `&F` from a `&LazyLock<T, F>` so it is fine // to not impl `Sync` for `F` -// we do create a `&mut Option<F>` in `force`, but this is -// properly synchronized, so it only happens once -// so it also does not contribute to this impl. #[unstable(feature = "once_cell", issue = "74465")] -unsafe impl<T, F: Send> Sync for LazyLock<T, F> where OnceLock<T>: Sync {} +unsafe impl<T: Sync + Send, F: Send> Sync for LazyLock<T, F> {} // auto-derived `Send` impl is OK. #[unstable(feature = "once_cell", issue = "74465")] -impl<T, F: UnwindSafe> RefUnwindSafe for LazyLock<T, F> where OnceLock<T>: RefUnwindSafe {} +impl<T: RefUnwindSafe + UnwindSafe, F: UnwindSafe> RefUnwindSafe for LazyLock<T, F> {} #[unstable(feature = "once_cell", issue = "74465")] -impl<T, F: UnwindSafe> UnwindSafe for LazyLock<T, F> where OnceLock<T>: UnwindSafe {} +impl<T: UnwindSafe, F: UnwindSafe> UnwindSafe for LazyLock<T, F> {} #[cfg(test)] mod tests; diff --git a/library/std/src/sync/mod.rs b/library/std/src/sync/mod.rs index ba20bab87..4edc95617 100644 --- a/library/std/src/sync/mod.rs +++ b/library/std/src/sync/mod.rs @@ -186,7 +186,7 @@ mod condvar; mod lazy_lock; mod mpmc; mod mutex; -mod once; +pub(crate) mod once; mod once_lock; mod poison; mod remutex; diff --git a/library/std/src/sync/mpmc/array.rs b/library/std/src/sync/mpmc/array.rs index c1e3e48b0..c6bb09b04 100644 --- a/library/std/src/sync/mpmc/array.rs +++ b/library/std/src/sync/mpmc/array.rs @@ -319,19 +319,10 @@ impl<T> Channel<T> { ) -> Result<(), SendTimeoutError<T>> { let token = &mut Token::default(); loop { - // Try sending a message several times. - let backoff = Backoff::new(); - loop { - if self.start_send(token) { - let res = unsafe { self.write(token, msg) }; - return res.map_err(SendTimeoutError::Disconnected); - } - - if backoff.is_completed() { - break; - } else { - backoff.spin_light(); - } + // Try sending a message. + if self.start_send(token) { + let res = unsafe { self.write(token, msg) }; + return res.map_err(SendTimeoutError::Disconnected); } if let Some(d) = deadline { @@ -379,6 +370,7 @@ impl<T> Channel<T> { pub(crate) fn recv(&self, deadline: Option<Instant>) -> Result<T, RecvTimeoutError> { let token = &mut Token::default(); loop { + // Try receiving a message. if self.start_recv(token) { let res = unsafe { self.read(token) }; return res.map_err(|_| RecvTimeoutError::Disconnected); diff --git a/library/std/src/sync/mpmc/utils.rs b/library/std/src/sync/mpmc/utils.rs index cfe42750d..d053d69e2 100644 --- a/library/std/src/sync/mpmc/utils.rs +++ b/library/std/src/sync/mpmc/utils.rs @@ -105,10 +105,8 @@ impl Backoff { /// Backs off using lightweight spinning. /// - /// This method should be used for: - /// - Retrying an operation because another thread made progress. i.e. on CAS failure. - /// - Waiting for an operation to complete by spinning optimistically for a few iterations - /// before falling back to parking the thread (see `Backoff::is_completed`). + /// This method should be used for retrying an operation because another thread made + /// progress. i.e. on CAS failure. #[inline] pub fn spin_light(&self) { let step = self.step.get().min(SPIN_LIMIT); @@ -134,10 +132,4 @@ impl Backoff { self.step.set(self.step.get() + 1); } - - /// Returns `true` if quadratic backoff has completed and parking the thread is advised. - #[inline] - pub fn is_completed(&self) -> bool { - self.step.get() > SPIN_LIMIT - } } diff --git a/library/std/src/sync/once.rs b/library/std/src/sync/once.rs index 0f25417d6..1b17c3108 100644 --- a/library/std/src/sync/once.rs +++ b/library/std/src/sync/once.rs @@ -43,6 +43,12 @@ pub struct OnceState { pub(crate) inner: sys::OnceState, } +pub(crate) enum ExclusiveState { + Incomplete, + Poisoned, + Complete, +} + /// Initialization value for static [`Once`] values. /// /// # Examples @@ -248,6 +254,16 @@ impl Once { pub fn is_completed(&self) -> bool { self.inner.is_completed() } + + /// Returns the current state of the `Once` instance. + /// + /// Since this takes a mutable reference, no initialization can currently + /// be running, so the state must be either "incomplete", "poisoned" or + /// "complete". + #[inline] + pub(crate) fn state(&mut self) -> ExclusiveState { + self.inner.state() + } } #[stable(feature = "std_debug", since = "1.16.0")] diff --git a/library/std/src/sys/common/alloc.rs b/library/std/src/sys/common/alloc.rs index 3edbe7280..403a5e627 100644 --- a/library/std/src/sys/common/alloc.rs +++ b/library/std/src/sys/common/alloc.rs @@ -7,6 +7,7 @@ use crate::ptr; #[cfg(any( target_arch = "x86", target_arch = "arm", + target_arch = "m68k", target_arch = "mips", target_arch = "powerpc", target_arch = "powerpc64", diff --git a/library/std/src/sys/hermit/args.rs b/library/std/src/sys/hermit/args.rs index afcae6c90..220a76e4b 100644 --- a/library/std/src/sys/hermit/args.rs +++ b/library/std/src/sys/hermit/args.rs @@ -1,6 +1,6 @@ use crate::ffi::{c_char, CStr, OsString}; use crate::fmt; -use crate::os::unix::ffi::OsStringExt; +use crate::os::hermit::ffi::OsStringExt; use crate::ptr; use crate::sync::atomic::{ AtomicIsize, AtomicPtr, diff --git a/library/std/src/sys/hermit/fd.rs b/library/std/src/sys/hermit/fd.rs index c400f5f2c..3a2cdd301 100644 --- a/library/std/src/sys/hermit/fd.rs +++ b/library/std/src/sys/hermit/fd.rs @@ -1,36 +1,23 @@ #![unstable(reason = "not public", issue = "none", feature = "fd")] use crate::io::{self, Read}; -use crate::mem; +use crate::os::hermit::io::{FromRawFd, OwnedFd, RawFd}; use crate::sys::cvt; use crate::sys::hermit::abi; use crate::sys::unsupported; -use crate::sys_common::AsInner; +use crate::sys_common::{AsInner, FromInner, IntoInner}; + +use crate::os::hermit::io::*; #[derive(Debug)] pub struct FileDesc { - fd: i32, + fd: OwnedFd, } impl FileDesc { - pub fn new(fd: i32) -> FileDesc { - FileDesc { fd } - } - - pub fn raw(&self) -> i32 { - self.fd - } - - /// Extracts the actual file descriptor without closing it. - pub fn into_raw(self) -> i32 { - let fd = self.fd; - mem::forget(self); - fd - } - pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { - let result = unsafe { abi::read(self.fd, buf.as_mut_ptr(), buf.len()) }; - cvt(result as i32) + let result = cvt(unsafe { abi::read(self.fd.as_raw_fd(), buf.as_mut_ptr(), buf.len()) })?; + Ok(result as usize) } pub fn read_to_end(&self, buf: &mut Vec<u8>) -> io::Result<usize> { @@ -39,8 +26,8 @@ impl FileDesc { } pub fn write(&self, buf: &[u8]) -> io::Result<usize> { - let result = unsafe { abi::write(self.fd, buf.as_ptr(), buf.len()) }; - cvt(result as i32) + let result = cvt(unsafe { abi::write(self.fd.as_raw_fd(), buf.as_ptr(), buf.len()) })?; + Ok(result as usize) } pub fn duplicate(&self) -> io::Result<FileDesc> { @@ -69,19 +56,45 @@ impl<'a> Read for &'a FileDesc { } } -impl AsInner<i32> for FileDesc { - fn as_inner(&self) -> &i32 { +impl IntoInner<OwnedFd> for FileDesc { + fn into_inner(self) -> OwnedFd { + self.fd + } +} + +impl FromInner<OwnedFd> for FileDesc { + fn from_inner(owned_fd: OwnedFd) -> Self { + Self { fd: owned_fd } + } +} + +impl FromRawFd for FileDesc { + unsafe fn from_raw_fd(raw_fd: RawFd) -> Self { + Self { fd: FromRawFd::from_raw_fd(raw_fd) } + } +} + +impl AsInner<OwnedFd> for FileDesc { + fn as_inner(&self) -> &OwnedFd { &self.fd } } -impl Drop for FileDesc { - fn drop(&mut self) { - // Note that errors are ignored when closing a file descriptor. The - // reason for this is that if an error occurs we don't actually know if - // the file descriptor was closed or not, and if we retried (for - // something like EINTR), we might close another valid file descriptor - // (opened after we closed ours. - let _ = unsafe { abi::close(self.fd) }; +impl AsFd for FileDesc { + fn as_fd(&self) -> BorrowedFd<'_> { + self.fd.as_fd() + } +} + +impl AsRawFd for FileDesc { + #[inline] + fn as_raw_fd(&self) -> RawFd { + self.fd.as_raw_fd() + } +} + +impl IntoRawFd for FileDesc { + fn into_raw_fd(self) -> RawFd { + self.fd.into_raw_fd() } } diff --git a/library/std/src/sys/hermit/fs.rs b/library/std/src/sys/hermit/fs.rs index 6fb92c037..c966f2177 100644 --- a/library/std/src/sys/hermit/fs.rs +++ b/library/std/src/sys/hermit/fs.rs @@ -3,14 +3,17 @@ use crate::fmt; use crate::hash::{Hash, Hasher}; use crate::io::{self, Error, ErrorKind}; use crate::io::{BorrowedCursor, IoSlice, IoSliceMut, SeekFrom}; +use crate::os::hermit::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd}; use crate::path::{Path, PathBuf}; use crate::sys::common::small_c_string::run_path_with_cstr; use crate::sys::cvt; -use crate::sys::hermit::abi; -use crate::sys::hermit::abi::{O_APPEND, O_CREAT, O_EXCL, O_RDONLY, O_RDWR, O_TRUNC, O_WRONLY}; +use crate::sys::hermit::abi::{ + self, O_APPEND, O_CREAT, O_EXCL, O_RDONLY, O_RDWR, O_TRUNC, O_WRONLY, +}; use crate::sys::hermit::fd::FileDesc; use crate::sys::time::SystemTime; use crate::sys::unsupported; +use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner}; pub use crate::sys_common::fs::{copy, try_exists}; //pub use crate::sys_common::fs::remove_dir_all; @@ -283,7 +286,7 @@ impl File { } let fd = unsafe { cvt(abi::open(path.as_ptr(), flags, mode))? }; - Ok(File(FileDesc::new(fd as i32))) + Ok(File(unsafe { FileDesc::from_raw_fd(fd as i32) })) } pub fn file_attr(&self) -> io::Result<FileAttr> { @@ -363,6 +366,54 @@ impl DirBuilder { } } +impl AsInner<FileDesc> for File { + fn as_inner(&self) -> &FileDesc { + &self.0 + } +} + +impl AsInnerMut<FileDesc> for File { + fn as_inner_mut(&mut self) -> &mut FileDesc { + &mut self.0 + } +} + +impl IntoInner<FileDesc> for File { + fn into_inner(self) -> FileDesc { + self.0 + } +} + +impl FromInner<FileDesc> for File { + fn from_inner(file_desc: FileDesc) -> Self { + Self(file_desc) + } +} + +impl AsFd for File { + fn as_fd(&self) -> BorrowedFd<'_> { + self.0.as_fd() + } +} + +impl AsRawFd for File { + fn as_raw_fd(&self) -> RawFd { + self.0.as_raw_fd() + } +} + +impl IntoRawFd for File { + fn into_raw_fd(self) -> RawFd { + self.0.into_raw_fd() + } +} + +impl FromRawFd for File { + unsafe fn from_raw_fd(raw_fd: RawFd) -> Self { + Self(FromRawFd::from_raw_fd(raw_fd)) + } +} + pub fn readdir(_p: &Path) -> io::Result<ReadDir> { unsupported() } diff --git a/library/std/src/sys/hermit/mod.rs b/library/std/src/sys/hermit/mod.rs index 6811fadb0..d34a4cfed 100644 --- a/library/std/src/sys/hermit/mod.rs +++ b/library/std/src/sys/hermit/mod.rs @@ -13,7 +13,7 @@ //! compiling for wasm. That way it's a compile time error for something that's //! guaranteed to be a runtime error! -#![allow(unsafe_op_in_unsafe_fn)] +#![allow(missing_docs, nonstandard_style, unsafe_op_in_unsafe_fn)] use crate::intrinsics; use crate::os::raw::c_char; @@ -57,9 +57,7 @@ pub mod locks { } use crate::io::ErrorKind; - -#[allow(unused_extern_crates)] -pub extern crate hermit_abi as abi; +use crate::os::hermit::abi; pub fn unsupported<T>() -> crate::io::Result<T> { Err(unsupported_err()) @@ -72,11 +70,6 @@ pub fn unsupported_err() -> crate::io::Error { ) } -#[no_mangle] -pub extern "C" fn floor(x: f64) -> f64 { - unsafe { intrinsics::floorf64(x) } -} - pub fn abort_internal() -> ! { unsafe { abi::abort(); @@ -131,25 +124,72 @@ pub unsafe extern "C" fn runtime_entry( pub fn decode_error_kind(errno: i32) -> ErrorKind { match errno { - x if x == 13 as i32 => ErrorKind::PermissionDenied, - x if x == 98 as i32 => ErrorKind::AddrInUse, - x if x == 99 as i32 => ErrorKind::AddrNotAvailable, - x if x == 11 as i32 => ErrorKind::WouldBlock, - x if x == 103 as i32 => ErrorKind::ConnectionAborted, - x if x == 111 as i32 => ErrorKind::ConnectionRefused, - x if x == 104 as i32 => ErrorKind::ConnectionReset, - x if x == 17 as i32 => ErrorKind::AlreadyExists, - x if x == 4 as i32 => ErrorKind::Interrupted, - x if x == 22 as i32 => ErrorKind::InvalidInput, - x if x == 2 as i32 => ErrorKind::NotFound, - x if x == 107 as i32 => ErrorKind::NotConnected, - x if x == 1 as i32 => ErrorKind::PermissionDenied, - x if x == 32 as i32 => ErrorKind::BrokenPipe, - x if x == 110 as i32 => ErrorKind::TimedOut, + abi::errno::EACCES => ErrorKind::PermissionDenied, + abi::errno::EADDRINUSE => ErrorKind::AddrInUse, + abi::errno::EADDRNOTAVAIL => ErrorKind::AddrNotAvailable, + abi::errno::EAGAIN => ErrorKind::WouldBlock, + abi::errno::ECONNABORTED => ErrorKind::ConnectionAborted, + abi::errno::ECONNREFUSED => ErrorKind::ConnectionRefused, + abi::errno::ECONNRESET => ErrorKind::ConnectionReset, + abi::errno::EEXIST => ErrorKind::AlreadyExists, + abi::errno::EINTR => ErrorKind::Interrupted, + abi::errno::EINVAL => ErrorKind::InvalidInput, + abi::errno::ENOENT => ErrorKind::NotFound, + abi::errno::ENOTCONN => ErrorKind::NotConnected, + abi::errno::EPERM => ErrorKind::PermissionDenied, + abi::errno::EPIPE => ErrorKind::BrokenPipe, + abi::errno::ETIMEDOUT => ErrorKind::TimedOut, _ => ErrorKind::Uncategorized, } } -pub fn cvt(result: i32) -> crate::io::Result<usize> { - if result < 0 { Err(crate::io::Error::from_raw_os_error(-result)) } else { Ok(result as usize) } +#[doc(hidden)] +pub trait IsNegative { + fn is_negative(&self) -> bool; + fn negate(&self) -> i32; +} + +macro_rules! impl_is_negative { + ($($t:ident)*) => ($(impl IsNegative for $t { + fn is_negative(&self) -> bool { + *self < 0 + } + + fn negate(&self) -> i32 { + i32::try_from(-(*self)).unwrap() + } + })*) +} + +impl IsNegative for i32 { + fn is_negative(&self) -> bool { + *self < 0 + } + + fn negate(&self) -> i32 { + -(*self) + } +} +impl_is_negative! { i8 i16 i64 isize } + +pub fn cvt<T: IsNegative>(t: T) -> crate::io::Result<T> { + if t.is_negative() { + let e = decode_error_kind(t.negate()); + Err(crate::io::Error::from(e)) + } else { + Ok(t) + } +} + +pub fn cvt_r<T, F>(mut f: F) -> crate::io::Result<T> +where + T: IsNegative, + F: FnMut() -> T, +{ + loop { + match cvt(f()) { + Err(ref e) if e.kind() == ErrorKind::Interrupted => {} + other => return other, + } + } } diff --git a/library/std/src/sys/hermit/net.rs b/library/std/src/sys/hermit/net.rs index 8a13879d8..5fb6281aa 100644 --- a/library/std/src/sys/hermit/net.rs +++ b/library/std/src/sys/hermit/net.rs @@ -1,490 +1,353 @@ -use crate::fmt; -use crate::io::{self, ErrorKind, IoSlice, IoSliceMut}; -use crate::net::{IpAddr, Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr}; -use crate::str; -use crate::sync::Arc; -use crate::sys::hermit::abi; -use crate::sys::hermit::abi::IpAddress::{Ipv4, Ipv6}; -use crate::sys::unsupported; -use crate::sys_common::AsInner; +#![allow(dead_code)] + +use crate::cmp; +use crate::io::{self, IoSlice, IoSliceMut}; +use crate::mem; +use crate::net::{Shutdown, SocketAddr}; +use crate::os::hermit::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, RawFd}; +use crate::sys::hermit::fd::FileDesc; +use crate::sys::time::Instant; +use crate::sys_common::net::{getsockopt, setsockopt, sockaddr_to_addr}; +use crate::sys_common::{AsInner, FromInner, IntoInner}; use crate::time::Duration; -/// Checks whether the HermitCore's socket interface has been started already, and -/// if not, starts it. -pub fn init() -> io::Result<()> { - if abi::network_init() < 0 { - return Err(io::const_io_error!( - ErrorKind::Uncategorized, - "Unable to initialize network interface", - )); - } - - Ok(()) -} - -#[derive(Debug, Clone)] -pub struct Socket(abi::Handle); - -impl AsInner<abi::Handle> for Socket { - fn as_inner(&self) -> &abi::Handle { - &self.0 - } -} +use core::ffi::c_int; -impl Drop for Socket { - fn drop(&mut self) { - let _ = abi::tcpstream::close(self.0); - } -} +#[allow(unused_extern_crates)] +pub extern crate hermit_abi as netc; -// Arc is used to count the number of used sockets. -// Only if all sockets are released, the drop -// method will close the socket. -#[derive(Clone)] -pub struct TcpStream(Arc<Socket>); - -impl TcpStream { - pub fn connect(addr: io::Result<&SocketAddr>) -> io::Result<TcpStream> { - let addr = addr?; - - match abi::tcpstream::connect(addr.ip().to_string().as_bytes(), addr.port(), None) { - Ok(handle) => Ok(TcpStream(Arc::new(Socket(handle)))), - _ => Err(io::const_io_error!( - ErrorKind::Uncategorized, - "Unable to initiate a connection on a socket", - )), - } - } +pub use crate::sys::{cvt, cvt_r}; - pub fn connect_timeout(saddr: &SocketAddr, duration: Duration) -> io::Result<TcpStream> { - match abi::tcpstream::connect( - saddr.ip().to_string().as_bytes(), - saddr.port(), - Some(duration.as_millis() as u64), - ) { - Ok(handle) => Ok(TcpStream(Arc::new(Socket(handle)))), - _ => Err(io::const_io_error!( - ErrorKind::Uncategorized, - "Unable to initiate a connection on a socket", - )), - } - } +pub type wrlen_t = usize; - pub fn set_read_timeout(&self, duration: Option<Duration>) -> io::Result<()> { - abi::tcpstream::set_read_timeout(*self.0.as_inner(), duration.map(|d| d.as_millis() as u64)) - .map_err(|_| { - io::const_io_error!(ErrorKind::Uncategorized, "Unable to set timeout value") - }) +pub fn cvt_gai(err: i32) -> io::Result<()> { + if err == 0 { + return Ok(()); } - pub fn set_write_timeout(&self, duration: Option<Duration>) -> io::Result<()> { - abi::tcpstream::set_write_timeout( - *self.0.as_inner(), - duration.map(|d| d.as_millis() as u64), - ) - .map_err(|_| io::const_io_error!(ErrorKind::Uncategorized, "Unable to set timeout value")) - } + let detail = ""; - pub fn read_timeout(&self) -> io::Result<Option<Duration>> { - let duration = abi::tcpstream::get_read_timeout(*self.0.as_inner()).map_err(|_| { - io::const_io_error!(ErrorKind::Uncategorized, "Unable to determine timeout value") - })?; + Err(io::Error::new( + io::ErrorKind::Uncategorized, + &format!("failed to lookup address information: {detail}")[..], + )) +} - Ok(duration.map(|d| Duration::from_millis(d))) +/// Checks whether the HermitCore's socket interface has been started already, and +/// if not, starts it. +pub fn init() { + if unsafe { netc::network_init() } < 0 { + panic!("Unable to initialize network interface"); } +} - pub fn write_timeout(&self) -> io::Result<Option<Duration>> { - let duration = abi::tcpstream::get_write_timeout(*self.0.as_inner()).map_err(|_| { - io::const_io_error!(ErrorKind::Uncategorized, "Unable to determine timeout value") - })?; +#[derive(Debug)] +pub struct Socket(FileDesc); - Ok(duration.map(|d| Duration::from_millis(d))) +impl Socket { + pub fn new(addr: &SocketAddr, ty: i32) -> io::Result<Socket> { + let fam = match *addr { + SocketAddr::V4(..) => netc::AF_INET, + SocketAddr::V6(..) => netc::AF_INET6, + }; + Socket::new_raw(fam, ty) } - pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> { - abi::tcpstream::peek(*self.0.as_inner(), buf) - .map_err(|_| io::const_io_error!(ErrorKind::Uncategorized, "peek failed")) + pub fn new_raw(fam: i32, ty: i32) -> io::Result<Socket> { + let fd = cvt(unsafe { netc::socket(fam, ty, 0) })?; + Ok(Socket(unsafe { FileDesc::from_raw_fd(fd) })) } - pub fn read(&self, buffer: &mut [u8]) -> io::Result<usize> { - self.read_vectored(&mut [IoSliceMut::new(buffer)]) + pub fn new_pair(_fam: i32, _ty: i32) -> io::Result<(Socket, Socket)> { + unimplemented!() } - pub fn read_vectored(&self, ioslice: &mut [IoSliceMut<'_>]) -> io::Result<usize> { - let mut size: usize = 0; - - for i in ioslice.iter_mut() { - let ret = abi::tcpstream::read(*self.0.as_inner(), &mut i[0..]).map_err(|_| { - io::const_io_error!(ErrorKind::Uncategorized, "Unable to read on socket") - })?; + pub fn connect_timeout(&self, addr: &SocketAddr, timeout: Duration) -> io::Result<()> { + self.set_nonblocking(true)?; + let r = unsafe { + let (addr, len) = addr.into_inner(); + cvt(netc::connect(self.as_raw_fd(), addr.as_ptr(), len)) + }; + self.set_nonblocking(false)?; - if ret != 0 { - size += ret; - } + match r { + Ok(_) => return Ok(()), + // there's no ErrorKind for EINPROGRESS :( + Err(ref e) if e.raw_os_error() == Some(netc::errno::EINPROGRESS) => {} + Err(e) => return Err(e), } - Ok(size) - } - - #[inline] - pub fn is_read_vectored(&self) -> bool { - true - } - - pub fn write(&self, buffer: &[u8]) -> io::Result<usize> { - self.write_vectored(&[IoSlice::new(buffer)]) - } + let mut pollfd = netc::pollfd { fd: self.as_raw_fd(), events: netc::POLLOUT, revents: 0 }; - pub fn write_vectored(&self, ioslice: &[IoSlice<'_>]) -> io::Result<usize> { - let mut size: usize = 0; - - for i in ioslice.iter() { - size += abi::tcpstream::write(*self.0.as_inner(), i).map_err(|_| { - io::const_io_error!(ErrorKind::Uncategorized, "Unable to write on socket") - })?; + if timeout.as_secs() == 0 && timeout.subsec_nanos() == 0 { + return Err(io::const_io_error!( + io::ErrorKind::InvalidInput, + "cannot set a 0 duration timeout", + )); } - Ok(size) - } - - #[inline] - pub fn is_write_vectored(&self) -> bool { - true - } - - pub fn peer_addr(&self) -> io::Result<SocketAddr> { - let (ipaddr, port) = abi::tcpstream::peer_addr(*self.0.as_inner()) - .map_err(|_| io::const_io_error!(ErrorKind::Uncategorized, "peer_addr failed"))?; + let start = Instant::now(); - let saddr = match ipaddr { - Ipv4(ref addr) => SocketAddr::new(IpAddr::V4(Ipv4Addr::from(addr.0)), port), - Ipv6(ref addr) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from(addr.0)), port), - _ => { - return Err(io::const_io_error!(ErrorKind::Uncategorized, "peer_addr failed")); + loop { + let elapsed = start.elapsed(); + if elapsed >= timeout { + return Err(io::const_io_error!(io::ErrorKind::TimedOut, "connection timed out")); } - }; - - Ok(saddr) - } - - pub fn socket_addr(&self) -> io::Result<SocketAddr> { - unsupported() - } - - pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { - abi::tcpstream::shutdown(*self.0.as_inner(), how as i32) - .map_err(|_| io::const_io_error!(ErrorKind::Uncategorized, "unable to shutdown socket")) - } - - pub fn duplicate(&self) -> io::Result<TcpStream> { - Ok(self.clone()) - } - - pub fn set_linger(&self, _linger: Option<Duration>) -> io::Result<()> { - unsupported() - } - - pub fn linger(&self) -> io::Result<Option<Duration>> { - unsupported() - } - - pub fn set_nodelay(&self, mode: bool) -> io::Result<()> { - abi::tcpstream::set_nodelay(*self.0.as_inner(), mode) - .map_err(|_| io::const_io_error!(ErrorKind::Uncategorized, "set_nodelay failed")) - } - - pub fn nodelay(&self) -> io::Result<bool> { - abi::tcpstream::nodelay(*self.0.as_inner()) - .map_err(|_| io::const_io_error!(ErrorKind::Uncategorized, "nodelay failed")) - } - - pub fn set_ttl(&self, tll: u32) -> io::Result<()> { - abi::tcpstream::set_tll(*self.0.as_inner(), tll) - .map_err(|_| io::const_io_error!(ErrorKind::Uncategorized, "unable to set TTL")) - } - - pub fn ttl(&self) -> io::Result<u32> { - abi::tcpstream::get_tll(*self.0.as_inner()) - .map_err(|_| io::const_io_error!(ErrorKind::Uncategorized, "unable to get TTL")) - } - pub fn take_error(&self) -> io::Result<Option<io::Error>> { - unsupported() - } - - pub fn set_nonblocking(&self, mode: bool) -> io::Result<()> { - abi::tcpstream::set_nonblocking(*self.0.as_inner(), mode).map_err(|_| { - io::const_io_error!(ErrorKind::Uncategorized, "unable to set blocking mode") - }) - } -} - -impl fmt::Debug for TcpStream { - fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { - Ok(()) - } -} - -#[derive(Clone)] -pub struct TcpListener(SocketAddr); - -impl TcpListener { - pub fn bind(addr: io::Result<&SocketAddr>) -> io::Result<TcpListener> { - let addr = addr?; - - Ok(TcpListener(*addr)) - } - - pub fn socket_addr(&self) -> io::Result<SocketAddr> { - Ok(self.0) - } - - pub fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> { - let (handle, ipaddr, port) = abi::tcplistener::accept(self.0.port()) - .map_err(|_| io::const_io_error!(ErrorKind::Uncategorized, "accept failed"))?; - let saddr = match ipaddr { - Ipv4(ref addr) => SocketAddr::new(IpAddr::V4(Ipv4Addr::from(addr.0)), port), - Ipv6(ref addr) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from(addr.0)), port), - _ => { - return Err(io::const_io_error!(ErrorKind::Uncategorized, "accept failed")); + let timeout = timeout - elapsed; + let mut timeout = timeout + .as_secs() + .saturating_mul(1_000) + .saturating_add(timeout.subsec_nanos() as u64 / 1_000_000); + if timeout == 0 { + timeout = 1; } - }; - - Ok((TcpStream(Arc::new(Socket(handle))), saddr)) - } - pub fn duplicate(&self) -> io::Result<TcpListener> { - Ok(self.clone()) - } - - pub fn set_ttl(&self, _: u32) -> io::Result<()> { - unsupported() - } - - pub fn ttl(&self) -> io::Result<u32> { - unsupported() - } - - pub fn set_only_v6(&self, _: bool) -> io::Result<()> { - unsupported() + let timeout = cmp::min(timeout, c_int::MAX as u64) as c_int; + + match unsafe { netc::poll(&mut pollfd, 1, timeout) } { + -1 => { + let err = io::Error::last_os_error(); + if err.kind() != io::ErrorKind::Interrupted { + return Err(err); + } + } + 0 => {} + _ => { + // linux returns POLLOUT|POLLERR|POLLHUP for refused connections (!), so look + // for POLLHUP rather than read readiness + if pollfd.revents & netc::POLLHUP != 0 { + let e = self.take_error()?.unwrap_or_else(|| { + io::const_io_error!( + io::ErrorKind::Uncategorized, + "no error set after POLLHUP", + ) + }); + return Err(e); + } + + return Ok(()); + } + } + } } - pub fn only_v6(&self) -> io::Result<bool> { - unsupported() + pub fn accept( + &self, + storage: *mut netc::sockaddr, + len: *mut netc::socklen_t, + ) -> io::Result<Socket> { + let fd = cvt(unsafe { netc::accept(self.0.as_raw_fd(), storage, len) })?; + Ok(Socket(unsafe { FileDesc::from_raw_fd(fd) })) } - pub fn take_error(&self) -> io::Result<Option<io::Error>> { - unsupported() + pub fn duplicate(&self) -> io::Result<Socket> { + let fd = cvt(unsafe { netc::dup(self.0.as_raw_fd()) })?; + Ok(Socket(unsafe { FileDesc::from_raw_fd(fd) })) } - pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { - unsupported() + fn recv_with_flags(&self, buf: &mut [u8], flags: i32) -> io::Result<usize> { + let ret = + cvt(unsafe { netc::recv(self.0.as_raw_fd(), buf.as_mut_ptr(), buf.len(), flags) })?; + Ok(ret as usize) } -} -impl fmt::Debug for TcpListener { - fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { - Ok(()) + pub fn read(&self, buf: &mut [u8]) -> io::Result<usize> { + self.recv_with_flags(buf, 0) } -} - -pub struct UdpSocket(abi::Handle); -impl UdpSocket { - pub fn bind(_: io::Result<&SocketAddr>) -> io::Result<UdpSocket> { - unsupported() + pub fn peek(&self, buf: &mut [u8]) -> io::Result<usize> { + self.recv_with_flags(buf, netc::MSG_PEEK) } - pub fn peer_addr(&self) -> io::Result<SocketAddr> { - unsupported() - } + pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { + let mut size: isize = 0; - pub fn socket_addr(&self) -> io::Result<SocketAddr> { - unsupported() - } + for i in bufs.iter_mut() { + let ret: isize = + cvt(unsafe { netc::read(self.0.as_raw_fd(), i.as_mut_ptr(), i.len()) })?; - pub fn recv_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - unsupported() - } + if ret != 0 { + size += ret; + } + } - pub fn peek_from(&self, _: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - unsupported() + Ok(size.try_into().unwrap()) } - pub fn send_to(&self, _: &[u8], _: &SocketAddr) -> io::Result<usize> { - unsupported() + #[inline] + pub fn is_read_vectored(&self) -> bool { + true } - pub fn duplicate(&self) -> io::Result<UdpSocket> { - unsupported() - } + fn recv_from_with_flags(&self, buf: &mut [u8], flags: i32) -> io::Result<(usize, SocketAddr)> { + let mut storage: netc::sockaddr_storage = unsafe { mem::zeroed() }; + let mut addrlen = mem::size_of_val(&storage) as netc::socklen_t; - pub fn set_read_timeout(&self, _: Option<Duration>) -> io::Result<()> { - unsupported() + let n = cvt(unsafe { + netc::recvfrom( + self.as_raw_fd(), + buf.as_mut_ptr(), + buf.len(), + flags, + &mut storage as *mut _ as *mut _, + &mut addrlen, + ) + })?; + Ok((n as usize, sockaddr_to_addr(&storage, addrlen as usize)?)) } - pub fn set_write_timeout(&self, _: Option<Duration>) -> io::Result<()> { - unsupported() + pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + self.recv_from_with_flags(buf, 0) } - pub fn read_timeout(&self) -> io::Result<Option<Duration>> { - unsupported() + pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { + self.recv_from_with_flags(buf, netc::MSG_PEEK) } - pub fn write_timeout(&self) -> io::Result<Option<Duration>> { - unsupported() + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { + let sz = cvt(unsafe { netc::write(self.0.as_raw_fd(), buf.as_ptr(), buf.len()) })?; + Ok(sz.try_into().unwrap()) } - pub fn set_broadcast(&self, _: bool) -> io::Result<()> { - unsupported() - } + pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { + let mut size: isize = 0; - pub fn broadcast(&self) -> io::Result<bool> { - unsupported() - } + for i in bufs.iter() { + size += cvt(unsafe { netc::write(self.0.as_raw_fd(), i.as_ptr(), i.len()) })?; + } - pub fn set_multicast_loop_v4(&self, _: bool) -> io::Result<()> { - unsupported() + Ok(size.try_into().unwrap()) } - pub fn multicast_loop_v4(&self) -> io::Result<bool> { - unsupported() + pub fn is_write_vectored(&self) -> bool { + true } - pub fn set_multicast_ttl_v4(&self, _: u32) -> io::Result<()> { - unsupported() - } + pub fn set_timeout(&self, dur: Option<Duration>, kind: i32) -> io::Result<()> { + let timeout = match dur { + Some(dur) => { + if dur.as_secs() == 0 && dur.subsec_nanos() == 0 { + return Err(io::const_io_error!( + io::ErrorKind::InvalidInput, + "cannot set a 0 duration timeout", + )); + } + + let secs = if dur.as_secs() > netc::time_t::MAX as u64 { + netc::time_t::MAX + } else { + dur.as_secs() as netc::time_t + }; + let mut timeout = netc::timeval { + tv_sec: secs, + tv_usec: dur.subsec_micros() as netc::suseconds_t, + }; + if timeout.tv_sec == 0 && timeout.tv_usec == 0 { + timeout.tv_usec = 1; + } + timeout + } + None => netc::timeval { tv_sec: 0, tv_usec: 0 }, + }; - pub fn multicast_ttl_v4(&self) -> io::Result<u32> { - unsupported() + setsockopt(self, netc::SOL_SOCKET, kind, timeout) } - pub fn set_multicast_loop_v6(&self, _: bool) -> io::Result<()> { - unsupported() + pub fn timeout(&self, kind: i32) -> io::Result<Option<Duration>> { + let raw: netc::timeval = getsockopt(self, netc::SOL_SOCKET, kind)?; + if raw.tv_sec == 0 && raw.tv_usec == 0 { + Ok(None) + } else { + let sec = raw.tv_sec as u64; + let nsec = (raw.tv_usec as u32) * 1000; + Ok(Some(Duration::new(sec, nsec))) + } } - pub fn multicast_loop_v6(&self) -> io::Result<bool> { - unsupported() + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { + let how = match how { + Shutdown::Write => netc::SHUT_WR, + Shutdown::Read => netc::SHUT_RD, + Shutdown::Both => netc::SHUT_RDWR, + }; + cvt(unsafe { netc::shutdown_socket(self.as_raw_fd(), how) })?; + Ok(()) } - pub fn join_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> { - unsupported() - } + pub fn set_linger(&self, linger: Option<Duration>) -> io::Result<()> { + let linger = netc::linger { + l_onoff: linger.is_some() as i32, + l_linger: linger.unwrap_or_default().as_secs() as libc::c_int, + }; - pub fn join_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> { - unsupported() + setsockopt(self, netc::SOL_SOCKET, netc::SO_LINGER, linger) } - pub fn leave_multicast_v4(&self, _: &Ipv4Addr, _: &Ipv4Addr) -> io::Result<()> { - unsupported() - } + pub fn linger(&self) -> io::Result<Option<Duration>> { + let val: netc::linger = getsockopt(self, netc::SOL_SOCKET, netc::SO_LINGER)?; - pub fn leave_multicast_v6(&self, _: &Ipv6Addr, _: u32) -> io::Result<()> { - unsupported() + Ok((val.l_onoff != 0).then(|| Duration::from_secs(val.l_linger as u64))) } - pub fn set_ttl(&self, _: u32) -> io::Result<()> { - unsupported() + pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { + let value: i32 = if nodelay { 1 } else { 0 }; + setsockopt(self, netc::IPPROTO_TCP, netc::TCP_NODELAY, value) } - pub fn ttl(&self) -> io::Result<u32> { - unsupported() + pub fn nodelay(&self) -> io::Result<bool> { + let raw: i32 = getsockopt(self, netc::IPPROTO_TCP, netc::TCP_NODELAY)?; + Ok(raw != 0) + } + + pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { + let mut nonblocking: i32 = if nonblocking { 1 } else { 0 }; + cvt(unsafe { + netc::ioctl( + self.as_raw_fd(), + netc::FIONBIO, + &mut nonblocking as *mut _ as *mut core::ffi::c_void, + ) + }) + .map(drop) } pub fn take_error(&self) -> io::Result<Option<io::Error>> { - unsupported() - } - - pub fn set_nonblocking(&self, _: bool) -> io::Result<()> { - unsupported() - } - - pub fn recv(&self, _: &mut [u8]) -> io::Result<usize> { - unsupported() - } - - pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> { - unsupported() + unimplemented!() } - pub fn send(&self, _: &[u8]) -> io::Result<usize> { - unsupported() - } - - pub fn connect(&self, _: io::Result<&SocketAddr>) -> io::Result<()> { - unsupported() + // This is used by sys_common code to abstract over Windows and Unix. + pub fn as_raw(&self) -> RawFd { + self.0.as_raw_fd() } } -impl fmt::Debug for UdpSocket { - fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { - Ok(()) +impl AsInner<FileDesc> for Socket { + fn as_inner(&self) -> &FileDesc { + &self.0 } } -pub struct LookupHost(!); - -impl LookupHost { - pub fn port(&self) -> u16 { +impl IntoInner<FileDesc> for Socket { + fn into_inner(self) -> FileDesc { self.0 } } -impl Iterator for LookupHost { - type Item = SocketAddr; - fn next(&mut self) -> Option<SocketAddr> { - self.0 +impl FromInner<FileDesc> for Socket { + fn from_inner(file_desc: FileDesc) -> Self { + Self(file_desc) } } -impl TryFrom<&str> for LookupHost { - type Error = io::Error; - - fn try_from(_v: &str) -> io::Result<LookupHost> { - unsupported() +impl AsFd for Socket { + fn as_fd(&self) -> BorrowedFd<'_> { + self.0.as_fd() } } -impl<'a> TryFrom<(&'a str, u16)> for LookupHost { - type Error = io::Error; - - fn try_from(_v: (&'a str, u16)) -> io::Result<LookupHost> { - unsupported() +impl AsRawFd for Socket { + fn as_raw_fd(&self) -> RawFd { + self.0.as_raw_fd() } } - -#[allow(nonstandard_style)] -pub mod netc { - pub const AF_INET: u8 = 0; - pub const AF_INET6: u8 = 1; - pub type sa_family_t = u8; - - #[derive(Copy, Clone)] - pub struct in_addr { - pub s_addr: u32, - } - - #[derive(Copy, Clone)] - pub struct sockaddr_in { - pub sin_family: sa_family_t, - pub sin_port: u16, - pub sin_addr: in_addr, - } - - #[derive(Copy, Clone)] - pub struct in6_addr { - pub s6_addr: [u8; 16], - } - - #[derive(Copy, Clone)] - pub struct sockaddr_in6 { - pub sin6_family: sa_family_t, - pub sin6_port: u16, - pub sin6_addr: in6_addr, - pub sin6_flowinfo: u32, - pub sin6_scope_id: u32, - } - - #[derive(Copy, Clone)] - pub struct sockaddr {} -} diff --git a/library/std/src/sys/hermit/os.rs b/library/std/src/sys/hermit/os.rs index 8f927df85..e53dbae61 100644 --- a/library/std/src/sys/hermit/os.rs +++ b/library/std/src/sys/hermit/os.rs @@ -4,7 +4,7 @@ use crate::ffi::{CStr, OsStr, OsString}; use crate::fmt; use crate::io; use crate::marker::PhantomData; -use crate::os::unix::ffi::OsStringExt; +use crate::os::hermit::ffi::OsStringExt; use crate::path::{self, PathBuf}; use crate::str; use crate::sync::Mutex; diff --git a/library/std/src/sys/hermit/thread.rs b/library/std/src/sys/hermit/thread.rs index 8f65544a9..2507f7069 100644 --- a/library/std/src/sys/hermit/thread.rs +++ b/library/std/src/sys/hermit/thread.rs @@ -27,10 +27,10 @@ impl Thread { p: Box<dyn FnOnce()>, core_id: isize, ) -> io::Result<Thread> { - let p = Box::into_raw(box p); + let p = Box::into_raw(Box::new(p)); let tid = abi::spawn2( thread_start, - p as usize, + p.expose_addr(), abi::Priority::into(abi::NORMAL_PRIO), stack, core_id, diff --git a/library/std/src/sys/hermit/thread_local_dtor.rs b/library/std/src/sys/hermit/thread_local_dtor.rs index 9b683fce1..613266b95 100644 --- a/library/std/src/sys/hermit/thread_local_dtor.rs +++ b/library/std/src/sys/hermit/thread_local_dtor.rs @@ -5,32 +5,23 @@ // The this solution works like the implementation of macOS and // doesn't additional OS support -use crate::cell::Cell; -use crate::ptr; +use crate::mem; #[thread_local] -static DTORS: Cell<*mut List> = Cell::new(ptr::null_mut()); - -type List = Vec<(*mut u8, unsafe extern "C" fn(*mut u8))>; +static mut DTORS: Vec<(*mut u8, unsafe extern "C" fn(*mut u8))> = Vec::new(); pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) { - if DTORS.get().is_null() { - let v: Box<List> = box Vec::new(); - DTORS.set(Box::into_raw(v)); - } - - let list: &mut List = &mut *DTORS.get(); + let list = &mut DTORS; list.push((t, dtor)); } // every thread call this function to run through all possible destructors pub unsafe fn run_dtors() { - let mut ptr = DTORS.replace(ptr::null_mut()); - while !ptr.is_null() { - let list = Box::from_raw(ptr); - for (ptr, dtor) in list.into_iter() { + let mut list = mem::take(&mut DTORS); + while !list.is_empty() { + for (ptr, dtor) in list { dtor(ptr); } - ptr = DTORS.replace(ptr::null_mut()); + list = mem::take(&mut DTORS); } } diff --git a/library/std/src/sys/hermit/time.rs b/library/std/src/sys/hermit/time.rs index c17e6c8af..32ddc4346 100644 --- a/library/std/src/sys/hermit/time.rs +++ b/library/std/src/sys/hermit/time.rs @@ -1,6 +1,7 @@ #![allow(dead_code)] use crate::cmp::Ordering; +use crate::ops::{Add, AddAssign, Sub, SubAssign}; use crate::sys::hermit::abi; use crate::sys::hermit::abi::timespec; use crate::sys::hermit::abi::{CLOCK_MONOTONIC, CLOCK_REALTIME, NSEC_PER_SEC}; @@ -102,55 +103,122 @@ impl Hash for Timespec { } #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] -pub struct Instant { - t: Timespec, -} +pub struct Instant(Timespec); impl Instant { pub fn now() -> Instant { let mut time: Timespec = Timespec::zero(); let _ = unsafe { abi::clock_gettime(CLOCK_MONOTONIC, &mut time.t as *mut timespec) }; - Instant { t: time } + Instant(time) + } + + #[stable(feature = "time2", since = "1.8.0")] + pub fn elapsed(&self) -> Duration { + Instant::now() - *self + } + + pub fn duration_since(&self, earlier: Instant) -> Duration { + self.checked_duration_since(earlier).unwrap_or_default() + } + + pub fn checked_duration_since(&self, earlier: Instant) -> Option<Duration> { + self.checked_sub_instant(&earlier) } pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> { - self.t.sub_timespec(&other.t).ok() + self.0.sub_timespec(&other.0).ok() } pub fn checked_add_duration(&self, other: &Duration) -> Option<Instant> { - Some(Instant { t: self.t.checked_add_duration(other)? }) + Some(Instant(self.0.checked_add_duration(other)?)) } pub fn checked_sub_duration(&self, other: &Duration) -> Option<Instant> { - Some(Instant { t: self.t.checked_sub_duration(other)? }) + Some(Instant(self.0.checked_sub_duration(other)?)) + } + + pub fn checked_add(&self, duration: Duration) -> Option<Instant> { + self.0.checked_add_duration(&duration).map(Instant) + } + + pub fn checked_sub(&self, duration: Duration) -> Option<Instant> { + self.0.checked_sub_duration(&duration).map(Instant) } } -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] -pub struct SystemTime { - t: Timespec, +impl Add<Duration> for Instant { + type Output = Instant; + + /// # Panics + /// + /// This function may panic if the resulting point in time cannot be represented by the + /// underlying data structure. See [`Instant::checked_add`] for a version without panic. + fn add(self, other: Duration) -> Instant { + self.checked_add(other).expect("overflow when adding duration to instant") + } } -pub const UNIX_EPOCH: SystemTime = SystemTime { t: Timespec::zero() }; +impl AddAssign<Duration> for Instant { + fn add_assign(&mut self, other: Duration) { + *self = *self + other; + } +} + +impl Sub<Duration> for Instant { + type Output = Instant; + + fn sub(self, other: Duration) -> Instant { + self.checked_sub(other).expect("overflow when subtracting duration from instant") + } +} + +impl SubAssign<Duration> for Instant { + fn sub_assign(&mut self, other: Duration) { + *self = *self - other; + } +} + +impl Sub<Instant> for Instant { + type Output = Duration; + + /// Returns the amount of time elapsed from another instant to this one, + /// or zero duration if that instant is later than this one. + /// + /// # Panics + /// + /// Previous rust versions panicked when `other` was later than `self`. Currently this + /// method saturates. Future versions may reintroduce the panic in some circumstances. + /// See [Monotonicity]. + /// + /// [Monotonicity]: Instant#monotonicity + fn sub(self, other: Instant) -> Duration { + self.duration_since(other) + } +} + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub struct SystemTime(Timespec); + +pub const UNIX_EPOCH: SystemTime = SystemTime(Timespec::zero()); impl SystemTime { pub fn now() -> SystemTime { let mut time: Timespec = Timespec::zero(); let _ = unsafe { abi::clock_gettime(CLOCK_REALTIME, &mut time.t as *mut timespec) }; - SystemTime { t: time } + SystemTime(time) } pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> { - self.t.sub_timespec(&other.t) + self.0.sub_timespec(&other.0) } pub fn checked_add_duration(&self, other: &Duration) -> Option<SystemTime> { - Some(SystemTime { t: self.t.checked_add_duration(other)? }) + Some(SystemTime(self.0.checked_add_duration(other)?)) } pub fn checked_sub_duration(&self, other: &Duration) -> Option<SystemTime> { - Some(SystemTime { t: self.t.checked_sub_duration(other)? }) + Some(SystemTime(self.0.checked_sub_duration(other)?)) } } diff --git a/library/std/src/sys/itron/thread.rs b/library/std/src/sys/itron/thread.rs index 19350b83f..ae0f71853 100644 --- a/library/std/src/sys/itron/thread.rs +++ b/library/std/src/sys/itron/thread.rs @@ -247,7 +247,7 @@ impl Thread { // [FINISHED → JOINED] // To synchronize with the child task's memory accesses to // `inner` up to the point of the assignment of `FINISHED`, - // `Ordering::Acquire` must be used for the above `swap` call`. + // `Ordering::Acquire` must be used for the above `swap` call. } _ => unsafe { hint::unreachable_unchecked() }, } diff --git a/library/std/src/sys/itron/thread_parking.rs b/library/std/src/sys/itron/thread_parking.rs new file mode 100644 index 000000000..fe9934439 --- /dev/null +++ b/library/std/src/sys/itron/thread_parking.rs @@ -0,0 +1,37 @@ +use super::abi; +use super::error::expect_success_aborting; +use super::time::with_tmos; +use crate::time::Duration; + +pub type ThreadId = abi::ID; + +pub use super::task::current_task_id_aborting as current; + +pub fn park(_hint: usize) { + match unsafe { abi::slp_tsk() } { + abi::E_OK | abi::E_RLWAI => {} + err => { + expect_success_aborting(err, &"slp_tsk"); + } + } +} + +pub fn park_timeout(dur: Duration, _hint: usize) { + match with_tmos(dur, |tmo| unsafe { abi::tslp_tsk(tmo) }) { + abi::E_OK | abi::E_RLWAI | abi::E_TMOUT => {} + err => { + expect_success_aborting(err, &"tslp_tsk"); + } + } +} + +pub fn unpark(id: ThreadId, _hint: usize) { + match unsafe { abi::wup_tsk(id) } { + // It is allowed to try to wake up a destroyed or unrelated task, so we ignore all + // errors that could result from that situation. + abi::E_OK | abi::E_NOEXS | abi::E_OBJ | abi::E_QOVR => {} + err => { + expect_success_aborting(err, &"wup_tsk"); + } + } +} diff --git a/library/std/src/sys/itron/wait_flag.rs b/library/std/src/sys/itron/wait_flag.rs deleted file mode 100644 index e432edd20..000000000 --- a/library/std/src/sys/itron/wait_flag.rs +++ /dev/null @@ -1,72 +0,0 @@ -use crate::mem::MaybeUninit; -use crate::time::Duration; - -use super::{ - abi, - error::{expect_success, fail}, - time::with_tmos, -}; - -const CLEAR: abi::FLGPTN = 0; -const RAISED: abi::FLGPTN = 1; - -/// A thread parking primitive that is not susceptible to race conditions, -/// but provides no atomic ordering guarantees and allows only one `raise` per wait. -pub struct WaitFlag { - flag: abi::ID, -} - -impl WaitFlag { - /// Creates a new wait flag. - pub fn new() -> WaitFlag { - let flag = expect_success( - unsafe { - abi::acre_flg(&abi::T_CFLG { - flgatr: abi::TA_FIFO | abi::TA_WSGL | abi::TA_CLR, - iflgptn: CLEAR, - }) - }, - &"acre_flg", - ); - - WaitFlag { flag } - } - - /// Wait for the wait flag to be raised. - pub fn wait(&self) { - let mut token = MaybeUninit::uninit(); - expect_success( - unsafe { abi::wai_flg(self.flag, RAISED, abi::TWF_ORW, token.as_mut_ptr()) }, - &"wai_flg", - ); - } - - /// Wait for the wait flag to be raised or the timeout to occur. - /// - /// Returns whether the flag was raised (`true`) or the operation timed out (`false`). - pub fn wait_timeout(&self, dur: Duration) -> bool { - let mut token = MaybeUninit::uninit(); - let res = with_tmos(dur, |tmout| unsafe { - abi::twai_flg(self.flag, RAISED, abi::TWF_ORW, token.as_mut_ptr(), tmout) - }); - - match res { - abi::E_OK => true, - abi::E_TMOUT => false, - error => fail(error, &"twai_flg"), - } - } - - /// Raise the wait flag. - /// - /// Calls to this function should be balanced with the number of successful waits. - pub fn raise(&self) { - expect_success(unsafe { abi::set_flg(self.flag, RAISED) }, &"set_flg"); - } -} - -impl Drop for WaitFlag { - fn drop(&mut self) { - expect_success(unsafe { abi::del_flg(self.flag) }, &"del_flg"); - } -} diff --git a/library/std/src/sys/solid/mod.rs b/library/std/src/sys/solid/mod.rs index 5867979a2..923d27fd9 100644 --- a/library/std/src/sys/solid/mod.rs +++ b/library/std/src/sys/solid/mod.rs @@ -13,9 +13,9 @@ mod itron { pub(super) mod spin; pub(super) mod task; pub mod thread; + pub mod thread_parking; pub(super) mod time; use super::unsupported; - pub mod wait_flag; } pub mod alloc; @@ -43,8 +43,8 @@ pub use self::itron::thread; pub mod memchr; pub mod thread_local_dtor; pub mod thread_local_key; +pub use self::itron::thread_parking; pub mod time; -pub use self::itron::wait_flag; mod rwlock; diff --git a/library/std/src/sys/solid/thread_local_dtor.rs b/library/std/src/sys/solid/thread_local_dtor.rs index 973564570..bad14bb37 100644 --- a/library/std/src/sys/solid/thread_local_dtor.rs +++ b/library/std/src/sys/solid/thread_local_dtor.rs @@ -5,43 +5,35 @@ use super::{abi, itron::task}; use crate::cell::Cell; -use crate::ptr; +use crate::mem; #[thread_local] -static DTORS: Cell<*mut List> = Cell::new(ptr::null_mut()); +static REGISTERED: Cell<bool> = Cell::new(false); -type List = Vec<(*mut u8, unsafe extern "C" fn(*mut u8))>; +#[thread_local] +static mut DTORS: Vec<(*mut u8, unsafe extern "C" fn(*mut u8))> = Vec::new(); pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) { - if DTORS.get().is_null() { + if !REGISTERED.get() { let tid = task::current_task_id_aborting(); - let v: Box<List> = box Vec::new(); - DTORS.set(Box::into_raw(v)); - // Register `tls_dtor` to make sure the TLS destructors are called // for tasks created by other means than `std::thread` unsafe { abi::SOLID_TLS_AddDestructor(tid as i32, tls_dtor) }; + REGISTERED.set(true); } - let list: &mut List = unsafe { &mut *DTORS.get() }; + let list = unsafe { &mut DTORS }; list.push((t, dtor)); } pub unsafe fn run_dtors() { - let ptr = DTORS.get(); - if !ptr.is_null() { - // Swap the destructor list, call all registered destructors, - // and repeat this until the list becomes permanently empty. - while let Some(list) = Some(crate::mem::replace(unsafe { &mut *ptr }, Vec::new())) - .filter(|list| !list.is_empty()) - { - for (ptr, dtor) in list.into_iter() { - unsafe { dtor(ptr) }; - } + let mut list = mem::take(unsafe { &mut DTORS }); + while !list.is_empty() { + for (ptr, dtor) in list { + unsafe { dtor(ptr) }; } - // Drop the destructor list - unsafe { Box::from_raw(DTORS.replace(ptr::null_mut())) }; + list = mem::take(unsafe { &mut DTORS }); } } diff --git a/library/std/src/sys/unix/args.rs b/library/std/src/sys/unix/args.rs index a342f0f5e..3d79058b3 100644 --- a/library/std/src/sys/unix/args.rs +++ b/library/std/src/sys/unix/args.rs @@ -69,7 +69,8 @@ impl DoubleEndedIterator for Args { target_os = "fuchsia", target_os = "redox", target_os = "vxworks", - target_os = "horizon" + target_os = "horizon", + target_os = "nto", ))] mod imp { use super::Args; @@ -141,12 +142,28 @@ mod imp { // list. let argv = ARGV.load(Ordering::Relaxed); let argc = if argv.is_null() { 0 } else { ARGC.load(Ordering::Relaxed) }; - (0..argc) - .map(|i| { - let cstr = CStr::from_ptr(*argv.offset(i) as *const libc::c_char); - OsStringExt::from_vec(cstr.to_bytes().to_vec()) - }) - .collect() + let mut args = Vec::with_capacity(argc as usize); + for i in 0..argc { + let ptr = *argv.offset(i) as *const libc::c_char; + + // Some C commandline parsers (e.g. GLib and Qt) are replacing already + // handled arguments in `argv` with `NULL` and move them to the end. That + // means that `argc` might be bigger than the actual number of non-`NULL` + // pointers in `argv` at this point. + // + // To handle this we simply stop iterating at the first `NULL` argument. + // + // `argv` is also guaranteed to be `NULL`-terminated so any non-`NULL` arguments + // after the first `NULL` can safely be ignored. + if ptr.is_null() { + break; + } + + let cstr = CStr::from_ptr(ptr); + args.push(OsStringExt::from_vec(cstr.to_bytes().to_vec())); + } + + args } } } diff --git a/library/std/src/sys/unix/env.rs b/library/std/src/sys/unix/env.rs index c9ba661c8..1a9276f11 100644 --- a/library/std/src/sys/unix/env.rs +++ b/library/std/src/sys/unix/env.rs @@ -185,6 +185,17 @@ pub mod os { pub const EXE_EXTENSION: &str = ""; } +#[cfg(target_os = "nto")] +pub mod os { + pub const FAMILY: &str = "unix"; + pub const OS: &str = "nto"; + pub const DLL_PREFIX: &str = "lib"; + pub const DLL_SUFFIX: &str = ".so"; + pub const DLL_EXTENSION: &str = "so"; + pub const EXE_SUFFIX: &str = ""; + pub const EXE_EXTENSION: &str = ""; +} + #[cfg(target_os = "redox")] pub mod os { pub const FAMILY: &str = "unix"; diff --git a/library/std/src/sys/unix/fd.rs b/library/std/src/sys/unix/fd.rs index dbaa3c33e..9874af4d3 100644 --- a/library/std/src/sys/unix/fd.rs +++ b/library/std/src/sys/unix/fd.rs @@ -53,7 +53,12 @@ const fn max_iov() -> usize { libc::IOV_MAX as usize } -#[cfg(any(target_os = "android", target_os = "emscripten", target_os = "linux"))] +#[cfg(any( + target_os = "android", + target_os = "emscripten", + target_os = "linux", + target_os = "nto", +))] const fn max_iov() -> usize { libc::UIO_MAXIOV as usize } @@ -67,6 +72,7 @@ const fn max_iov() -> usize { target_os = "linux", target_os = "macos", target_os = "netbsd", + target_os = "nto", target_os = "openbsd", target_os = "horizon", target_os = "watchos", @@ -92,7 +98,7 @@ impl FileDesc { let ret = cvt(unsafe { libc::readv( self.as_raw_fd(), - bufs.as_ptr() as *const libc::iovec, + bufs.as_mut_ptr() as *mut libc::iovec as *const libc::iovec, cmp::min(bufs.len(), max_iov()) as libc::c_int, ) })?; @@ -101,7 +107,7 @@ impl FileDesc { #[cfg(any(target_os = "espidf", target_os = "horizon"))] pub fn read_vectored(&self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { - return crate::io::default_read_vectored(|b| self.read(b), bufs); + io::default_read_vectored(|b| self.read(b), bufs) } #[inline] @@ -147,6 +153,95 @@ impl FileDesc { Ok(()) } + #[cfg(any( + target_os = "emscripten", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + ))] + pub fn read_vectored_at(&self, bufs: &mut [IoSliceMut<'_>], offset: u64) -> io::Result<usize> { + let ret = cvt(unsafe { + libc::preadv( + self.as_raw_fd(), + bufs.as_mut_ptr() as *mut libc::iovec as *const libc::iovec, + cmp::min(bufs.len(), max_iov()) as libc::c_int, + offset as _, + ) + })?; + Ok(ret as usize) + } + + #[cfg(not(any( + target_os = "android", + target_os = "emscripten", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "ios", + target_os = "linux", + target_os = "macos", + target_os = "netbsd", + )))] + pub fn read_vectored_at(&self, bufs: &mut [IoSliceMut<'_>], offset: u64) -> io::Result<usize> { + io::default_read_vectored(|b| self.read_at(b, offset), bufs) + } + + // We support some old Android versions that do not have `preadv` in libc, + // so we use weak linkage and fallback to a direct syscall if not available. + // + // On 32-bit targets, we don't want to deal with weird ABI issues around + // passing 64-bits parameters to syscalls, so we fallback to the default + // implementation if `preadv` is not available. + #[cfg(all(target_os = "android", target_pointer_width = "64"))] + pub fn read_vectored_at(&self, bufs: &mut [IoSliceMut<'_>], offset: u64) -> io::Result<usize> { + super::weak::syscall! { + fn preadv( + fd: libc::c_int, + iovec: *const libc::iovec, + n_iovec: libc::c_int, + offset: off64_t + ) -> isize + } + + let ret = cvt(unsafe { + preadv( + self.as_raw_fd(), + bufs.as_mut_ptr() as *mut libc::iovec as *const libc::iovec, + cmp::min(bufs.len(), max_iov()) as libc::c_int, + offset as _, + ) + })?; + Ok(ret as usize) + } + + // We support old MacOS and iOS versions that do not have `preadv`. There is + // no `syscall` possible in these platform. + #[cfg(any( + all(target_os = "android", target_pointer_width = "32"), + target_os = "ios", + target_os = "macos", + ))] + pub fn read_vectored_at(&self, bufs: &mut [IoSliceMut<'_>], offset: u64) -> io::Result<usize> { + super::weak::weak!(fn preadv64(libc::c_int, *const libc::iovec, libc::c_int, off64_t) -> isize); + + match preadv64.get() { + Some(preadv) => { + let ret = cvt(unsafe { + preadv( + self.as_raw_fd(), + bufs.as_mut_ptr() as *mut libc::iovec as *const libc::iovec, + cmp::min(bufs.len(), max_iov()) as libc::c_int, + offset as _, + ) + })?; + Ok(ret as usize) + } + None => io::default_read_vectored(|b| self.read_at(b, offset), bufs), + } + } + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { let ret = cvt(unsafe { libc::write( @@ -172,7 +267,7 @@ impl FileDesc { #[cfg(any(target_os = "espidf", target_os = "horizon"))] pub fn write_vectored(&self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { - return crate::io::default_write_vectored(|b| self.write(b), bufs); + io::default_write_vectored(|b| self.write(b), bufs) } #[inline] @@ -197,9 +292,93 @@ impl FileDesc { } } - #[cfg(target_os = "linux")] - pub fn get_cloexec(&self) -> io::Result<bool> { - unsafe { Ok((cvt(libc::fcntl(self.as_raw_fd(), libc::F_GETFD))? & libc::FD_CLOEXEC) != 0) } + #[cfg(any( + target_os = "emscripten", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + ))] + pub fn write_vectored_at(&self, bufs: &[IoSlice<'_>], offset: u64) -> io::Result<usize> { + let ret = cvt(unsafe { + libc::pwritev( + self.as_raw_fd(), + bufs.as_ptr() as *const libc::iovec, + cmp::min(bufs.len(), max_iov()) as libc::c_int, + offset as _, + ) + })?; + Ok(ret as usize) + } + + #[cfg(not(any( + target_os = "android", + target_os = "emscripten", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "ios", + target_os = "linux", + target_os = "macos", + target_os = "netbsd", + )))] + pub fn write_vectored_at(&self, bufs: &[IoSlice<'_>], offset: u64) -> io::Result<usize> { + io::default_write_vectored(|b| self.write_at(b, offset), bufs) + } + + // We support some old Android versions that do not have `pwritev` in libc, + // so we use weak linkage and fallback to a direct syscall if not available. + // + // On 32-bit targets, we don't want to deal with weird ABI issues around + // passing 64-bits parameters to syscalls, so we fallback to the default + // implementation if `pwritev` is not available. + #[cfg(all(target_os = "android", target_pointer_width = "64"))] + pub fn write_vectored_at(&self, bufs: &[IoSlice<'_>], offset: u64) -> io::Result<usize> { + super::weak::syscall! { + fn pwritev( + fd: libc::c_int, + iovec: *const libc::iovec, + n_iovec: libc::c_int, + offset: off64_t + ) -> isize + } + + let ret = cvt(unsafe { + pwritev( + self.as_raw_fd(), + bufs.as_ptr() as *const libc::iovec, + cmp::min(bufs.len(), max_iov()) as libc::c_int, + offset as _, + ) + })?; + Ok(ret as usize) + } + + // We support old MacOS and iOS versions that do not have `pwritev`. There is + // no `syscall` possible in these platform. + #[cfg(any( + all(target_os = "android", target_pointer_width = "32"), + target_os = "ios", + target_os = "macos", + ))] + pub fn write_vectored_at(&self, bufs: &[IoSlice<'_>], offset: u64) -> io::Result<usize> { + super::weak::weak!(fn pwritev64(libc::c_int, *const libc::iovec, libc::c_int, off64_t) -> isize); + + match pwritev64.get() { + Some(pwritev) => { + let ret = cvt(unsafe { + pwritev( + self.as_raw_fd(), + bufs.as_ptr() as *const libc::iovec, + cmp::min(bufs.len(), max_iov()) as libc::c_int, + offset as _, + ) + })?; + Ok(ret as usize) + } + None => io::default_write_vectored(|b| self.write_at(b, offset), bufs), + } } #[cfg(not(any( @@ -212,7 +391,8 @@ impl FileDesc { target_os = "linux", target_os = "haiku", target_os = "redox", - target_os = "vxworks" + target_os = "vxworks", + target_os = "nto", )))] pub fn set_cloexec(&self) -> io::Result<()> { unsafe { @@ -230,7 +410,8 @@ impl FileDesc { target_os = "linux", target_os = "haiku", target_os = "redox", - target_os = "vxworks" + target_os = "vxworks", + target_os = "nto", ))] pub fn set_cloexec(&self) -> io::Result<()> { unsafe { @@ -284,6 +465,10 @@ impl<'a> Read for &'a FileDesc { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { (**self).read(buf) } + + fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> { + (**self).read_buf(cursor) + } } impl AsInner<OwnedFd> for FileDesc { diff --git a/library/std/src/sys/unix/fs.rs b/library/std/src/sys/unix/fs.rs index 8e1f35d6c..7566fafda 100644 --- a/library/std/src/sys/unix/fs.rs +++ b/library/std/src/sys/unix/fs.rs @@ -13,7 +13,8 @@ use crate::mem; target_os = "solaris", target_os = "fuchsia", target_os = "redox", - target_os = "illumos" + target_os = "illumos", + target_os = "nto", ))] use crate::mem::MaybeUninit; use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd}; @@ -54,7 +55,8 @@ use libc::fstatat64; target_os = "solaris", target_os = "fuchsia", target_os = "redox", - target_os = "illumos" + target_os = "illumos", + target_os = "nto", ))] use libc::readdir as readdir64; #[cfg(target_os = "linux")] @@ -69,7 +71,8 @@ use libc::readdir64_r; target_os = "illumos", target_os = "l4re", target_os = "fuchsia", - target_os = "redox" + target_os = "redox", + target_os = "nto", )))] use libc::readdir_r as readdir64_r; #[cfg(target_os = "android")] @@ -277,7 +280,8 @@ unsafe impl Sync for Dir {} target_os = "solaris", target_os = "illumos", target_os = "fuchsia", - target_os = "redox" + target_os = "redox", + target_os = "nto", ))] pub struct DirEntry { dir: Arc<InnerReadDir>, @@ -297,11 +301,12 @@ pub struct DirEntry { target_os = "solaris", target_os = "illumos", target_os = "fuchsia", - target_os = "redox" + target_os = "redox", + target_os = "nto", ))] struct dirent64_min { d_ino: u64, - #[cfg(not(any(target_os = "solaris", target_os = "illumos")))] + #[cfg(not(any(target_os = "solaris", target_os = "illumos", target_os = "nto")))] d_type: u8, } @@ -311,7 +316,8 @@ struct dirent64_min { target_os = "solaris", target_os = "illumos", target_os = "fuchsia", - target_os = "redox" + target_os = "redox", + target_os = "nto", )))] pub struct DirEntry { dir: Arc<InnerReadDir>, @@ -438,7 +444,7 @@ impl FileAttr { } } -#[cfg(not(target_os = "netbsd"))] +#[cfg(not(any(target_os = "netbsd", target_os = "nto")))] impl FileAttr { #[cfg(not(any(target_os = "vxworks", target_os = "espidf", target_os = "horizon")))] pub fn modified(&self) -> io::Result<SystemTime> { @@ -524,6 +530,21 @@ impl FileAttr { } } +#[cfg(target_os = "nto")] +impl FileAttr { + pub fn modified(&self) -> io::Result<SystemTime> { + Ok(SystemTime::new(self.stat.st_mtim.tv_sec, self.stat.st_mtim.tv_nsec)) + } + + pub fn accessed(&self) -> io::Result<SystemTime> { + Ok(SystemTime::new(self.stat.st_atim.tv_sec, self.stat.st_atim.tv_nsec)) + } + + pub fn created(&self) -> io::Result<SystemTime> { + Ok(SystemTime::new(self.stat.st_ctim.tv_sec, self.stat.st_ctim.tv_nsec)) + } +} + impl AsInner<stat64> for FileAttr { fn as_inner(&self) -> &stat64 { &self.stat @@ -603,7 +624,8 @@ impl Iterator for ReadDir { target_os = "solaris", target_os = "fuchsia", target_os = "redox", - target_os = "illumos" + target_os = "illumos", + target_os = "nto", ))] fn next(&mut self) -> Option<io::Result<DirEntry>> { if self.end_of_stream { @@ -686,7 +708,11 @@ impl Iterator for ReadDir { let entry = dirent64_min { d_ino: *offset_ptr!(entry_ptr, d_ino) as u64, - #[cfg(not(any(target_os = "solaris", target_os = "illumos")))] + #[cfg(not(any( + target_os = "solaris", + target_os = "illumos", + target_os = "nto", + )))] d_type: *offset_ptr!(entry_ptr, d_type) as u8, }; @@ -705,7 +731,8 @@ impl Iterator for ReadDir { target_os = "solaris", target_os = "fuchsia", target_os = "redox", - target_os = "illumos" + target_os = "illumos", + target_os = "nto", )))] fn next(&mut self) -> Option<io::Result<DirEntry>> { if self.end_of_stream { @@ -794,7 +821,8 @@ impl DirEntry { target_os = "solaris", target_os = "illumos", target_os = "haiku", - target_os = "vxworks" + target_os = "vxworks", + target_os = "nto", ))] pub fn file_type(&self) -> io::Result<FileType> { self.metadata().map(|m| m.file_type()) @@ -804,7 +832,8 @@ impl DirEntry { target_os = "solaris", target_os = "illumos", target_os = "haiku", - target_os = "vxworks" + target_os = "vxworks", + target_os = "nto", )))] pub fn file_type(&self) -> io::Result<FileType> { match self.entry.d_type { @@ -834,7 +863,8 @@ impl DirEntry { target_os = "redox", target_os = "vxworks", target_os = "espidf", - target_os = "horizon" + target_os = "horizon", + target_os = "nto", ))] pub fn ino(&self) -> u64 { self.entry.d_ino as u64 @@ -887,7 +917,8 @@ impl DirEntry { target_os = "solaris", target_os = "illumos", target_os = "fuchsia", - target_os = "redox" + target_os = "redox", + target_os = "nto", )))] fn name_cstr(&self) -> &CStr { unsafe { CStr::from_ptr(self.entry.d_name.as_ptr()) } @@ -898,7 +929,8 @@ impl DirEntry { target_os = "solaris", target_os = "illumos", target_os = "fuchsia", - target_os = "redox" + target_os = "redox", + target_os = "nto", ))] fn name_cstr(&self) -> &CStr { &self.name @@ -1051,7 +1083,8 @@ impl File { target_os = "linux", target_os = "android", target_os = "netbsd", - target_os = "openbsd" + target_os = "openbsd", + target_os = "nto", ))] unsafe fn os_datasync(fd: c_int) -> c_int { libc::fdatasync(fd) @@ -1065,6 +1098,7 @@ impl File { target_os = "netbsd", target_os = "openbsd", target_os = "watchos", + target_os = "nto", )))] unsafe fn os_datasync(fd: c_int) -> c_int { libc::fsync(fd) @@ -1098,6 +1132,10 @@ impl File { self.0.read_buf(cursor) } + pub fn read_vectored_at(&self, bufs: &mut [IoSliceMut<'_>], offset: u64) -> io::Result<usize> { + self.0.read_vectored_at(bufs, offset) + } + pub fn write(&self, buf: &[u8]) -> io::Result<usize> { self.0.write(buf) } @@ -1115,6 +1153,10 @@ impl File { self.0.write_at(buf, offset) } + pub fn write_vectored_at(&self, bufs: &[IoSlice<'_>], offset: u64) -> io::Result<usize> { + self.0.write_vectored_at(bufs, offset) + } + pub fn flush(&self) -> io::Result<()> { Ok(()) } @@ -1750,13 +1792,25 @@ pub fn chroot(dir: &Path) -> io::Result<()> { pub use remove_dir_impl::remove_dir_all; // Fallback for REDOX, ESP-ID, Horizon, and Miri -#[cfg(any(target_os = "redox", target_os = "espidf", target_os = "horizon", miri))] +#[cfg(any( + target_os = "redox", + target_os = "espidf", + target_os = "horizon", + target_os = "nto", + miri +))] mod remove_dir_impl { pub use crate::sys_common::fs::remove_dir_all; } // Modern implementation using openat(), unlinkat() and fdopendir() -#[cfg(not(any(target_os = "redox", target_os = "espidf", target_os = "horizon", miri)))] +#[cfg(not(any( + target_os = "redox", + target_os = "espidf", + target_os = "horizon", + target_os = "nto", + miri +)))] mod remove_dir_impl { use super::{lstat, Dir, DirEntry, InnerReadDir, ReadDir}; use crate::ffi::CStr; diff --git a/library/std/src/sys/unix/locks/pthread_condvar.rs b/library/std/src/sys/unix/locks/pthread_condvar.rs index 6be1abc2b..192fa216d 100644 --- a/library/std/src/sys/unix/locks/pthread_condvar.rs +++ b/library/std/src/sys/unix/locks/pthread_condvar.rs @@ -2,7 +2,10 @@ use crate::cell::UnsafeCell; use crate::ptr; use crate::sync::atomic::{AtomicPtr, Ordering::Relaxed}; use crate::sys::locks::{pthread_mutex, Mutex}; +#[cfg(not(target_os = "nto"))] use crate::sys::time::TIMESPEC_MAX; +#[cfg(target_os = "nto")] +use crate::sys::time::TIMESPEC_MAX_CAPPED; use crate::sys_common::lazy_box::{LazyBox, LazyInit}; use crate::time::Duration; @@ -132,10 +135,18 @@ impl Condvar { let mutex = pthread_mutex::raw(mutex); self.verify(mutex); + #[cfg(not(target_os = "nto"))] let timeout = Timespec::now(libc::CLOCK_MONOTONIC) .checked_add_duration(&dur) .and_then(|t| t.to_timespec()) .unwrap_or(TIMESPEC_MAX); + + #[cfg(target_os = "nto")] + let timeout = Timespec::now(libc::CLOCK_MONOTONIC) + .checked_add_duration(&dur) + .and_then(|t| t.to_timespec_capped()) + .unwrap_or(TIMESPEC_MAX_CAPPED); + let r = libc::pthread_cond_timedwait(raw(self), mutex, &timeout); assert!(r == libc::ETIMEDOUT || r == 0); r == 0 diff --git a/library/std/src/sys/unix/mod.rs b/library/std/src/sys/unix/mod.rs index 30a96be14..68c9520cc 100644 --- a/library/std/src/sys/unix/mod.rs +++ b/library/std/src/sys/unix/mod.rs @@ -329,7 +329,7 @@ pub fn cvt_nz(error: libc::c_int) -> crate::io::Result<()> { // do so. In 1003.1-2004 this was fixed. // // glibc's implementation did the flush, unsafely, before glibc commit -// 91e7cf982d01 `abort: Do not flush stdio streams [BZ #15436]' by Florian +// 91e7cf982d01 `abort: Do not flush stdio streams [BZ #15436]` by Florian // Weimer. According to glibc's NEWS: // // The abort function terminates the process immediately, without flushing diff --git a/library/std/src/sys/unix/net.rs b/library/std/src/sys/unix/net.rs index c86f80972..8e05b618d 100644 --- a/library/std/src/sys/unix/net.rs +++ b/library/std/src/sys/unix/net.rs @@ -78,6 +78,7 @@ impl Socket { target_os = "linux", target_os = "netbsd", target_os = "openbsd", + target_os = "nto", ))] { // On platforms that support it we pass the SOCK_CLOEXEC // flag to atomically create the socket and set it as @@ -115,6 +116,7 @@ impl Socket { target_os = "linux", target_os = "netbsd", target_os = "openbsd", + target_os = "nto", ))] { // Like above, set cloexec atomically cvt(libc::socketpair(fam, ty | libc::SOCK_CLOEXEC, 0, fds.as_mut_ptr()))?; diff --git a/library/std/src/sys/unix/os.rs b/library/std/src/sys/unix/os.rs index 2f2663db6..21b035fb3 100644 --- a/library/std/src/sys/unix/os.rs +++ b/library/std/src/sys/unix/os.rs @@ -62,6 +62,7 @@ extern "C" { link_name = "__errno" )] #[cfg_attr(any(target_os = "solaris", target_os = "illumos"), link_name = "___errno")] + #[cfg_attr(target_os = "nto", link_name = "__get_errno_ptr")] #[cfg_attr( any(target_os = "macos", target_os = "ios", target_os = "freebsd", target_os = "watchos"), link_name = "__error" @@ -361,6 +362,17 @@ pub fn current_exe() -> io::Result<PathBuf> { } } +#[cfg(target_os = "nto")] +pub fn current_exe() -> io::Result<PathBuf> { + let mut e = crate::fs::read("/proc/self/exefile")?; + // Current versions of QNX Neutrino provide a null-terminated path. + // Ensure the trailing null byte is not returned here. + if let Some(0) = e.last() { + e.pop(); + } + Ok(PathBuf::from(OsString::from_vec(e))) +} + #[cfg(any(target_os = "macos", target_os = "ios", target_os = "watchos"))] pub fn current_exe() -> io::Result<PathBuf> { unsafe { diff --git a/library/std/src/sys/unix/process/process_unix.rs b/library/std/src/sys/unix/process/process_unix.rs index 3bc17b775..ceaff5966 100644 --- a/library/std/src/sys/unix/process/process_unix.rs +++ b/library/std/src/sys/unix/process/process_unix.rs @@ -18,6 +18,7 @@ use crate::sys::weak::raw_syscall; target_os = "freebsd", all(target_os = "linux", target_env = "gnu"), all(target_os = "linux", target_env = "musl"), + target_os = "nto", ))] use crate::sys::weak::weak; @@ -30,6 +31,15 @@ use libc::{c_int, pid_t}; #[cfg(not(any(target_os = "vxworks", target_os = "l4re")))] use libc::{gid_t, uid_t}; +cfg_if::cfg_if! { + if #[cfg(all(target_os = "nto", target_env = "nto71"))] { + use crate::thread; + use libc::{c_char, posix_spawn_file_actions_t, posix_spawnattr_t}; + // arbitrary number of tries: + const MAX_FORKSPAWN_TRIES: u32 = 4; + } +} + //////////////////////////////////////////////////////////////////////////////// // Command //////////////////////////////////////////////////////////////////////////////// @@ -140,11 +150,31 @@ impl Command { // Attempts to fork the process. If successful, returns Ok((0, -1)) // in the child, and Ok((child_pid, -1)) in the parent. - #[cfg(not(target_os = "linux"))] + #[cfg(not(any(target_os = "linux", all(target_os = "nto", target_env = "nto71"))))] unsafe fn do_fork(&mut self) -> Result<(pid_t, pid_t), io::Error> { cvt(libc::fork()).map(|res| (res, -1)) } + // On QNX Neutrino, fork can fail with EBADF in case "another thread might have opened + // or closed a file descriptor while the fork() was occurring". + // Documentation says "... or try calling fork() again". This is what we do here. + // See also https://www.qnx.com/developers/docs/7.1/#com.qnx.doc.neutrino.lib_ref/topic/f/fork.html + #[cfg(all(target_os = "nto", target_env = "nto71"))] + unsafe fn do_fork(&mut self) -> Result<(pid_t, pid_t), io::Error> { + use crate::sys::os::errno; + + let mut tries_left = MAX_FORKSPAWN_TRIES; + loop { + let r = libc::fork(); + if r == -1 as libc::pid_t && tries_left > 0 && errno() as libc::c_int == libc::EBADF { + thread::yield_now(); + tries_left -= 1; + } else { + return cvt(r).map(|res| (res, -1)); + } + } + } + // Attempts to fork the process. If successful, returns Ok((0, -1)) // in the child, and Ok((child_pid, child_pidfd)) in the parent. #[cfg(target_os = "linux")] @@ -389,6 +419,7 @@ impl Command { target_os = "freebsd", all(target_os = "linux", target_env = "gnu"), all(target_os = "linux", target_env = "musl"), + target_os = "nto", )))] fn posix_spawn( &mut self, @@ -405,6 +436,7 @@ impl Command { target_os = "freebsd", all(target_os = "linux", target_env = "gnu"), all(target_os = "linux", target_env = "musl"), + target_os = "nto", ))] fn posix_spawn( &mut self, @@ -436,6 +468,34 @@ impl Command { } } + // On QNX Neutrino, posix_spawnp can fail with EBADF in case "another thread might have opened + // or closed a file descriptor while the posix_spawn() was occurring". + // Documentation says "... or try calling posix_spawn() again". This is what we do here. + // See also http://www.qnx.com/developers/docs/7.1/#com.qnx.doc.neutrino.lib_ref/topic/p/posix_spawn.html + #[cfg(all(target_os = "nto", target_env = "nto71"))] + unsafe fn retrying_libc_posix_spawnp( + pid: *mut pid_t, + file: *const c_char, + file_actions: *const posix_spawn_file_actions_t, + attrp: *const posix_spawnattr_t, + argv: *const *mut c_char, + envp: *const *mut c_char, + ) -> i32 { + let mut tries_left = MAX_FORKSPAWN_TRIES; + loop { + match libc::posix_spawnp(pid, file, file_actions, attrp, argv, envp) { + libc::EBADF if tries_left > 0 => { + thread::yield_now(); + tries_left -= 1; + continue; + } + r => { + return r; + } + } + } + } + // Solaris, glibc 2.29+, and musl 1.24+ can set a new working directory, // and maybe others will gain this non-POSIX function too. We'll check // for this weak symbol as soon as it's needed, so we can return early @@ -555,7 +615,12 @@ impl Command { // Make sure we synchronize access to the global `environ` resource let _env_lock = sys::os::env_read_lock(); let envp = envp.map(|c| c.as_ptr()).unwrap_or_else(|| *sys::os::environ() as *const _); - cvt_nz(libc::posix_spawnp( + + #[cfg(not(target_os = "nto"))] + let spawn_fn = libc::posix_spawnp; + #[cfg(target_os = "nto")] + let spawn_fn = retrying_libc_posix_spawnp; + cvt_nz(spawn_fn( &mut p.pid, self.get_program_cstr().as_ptr(), file_actions.0.as_ptr(), @@ -760,7 +825,7 @@ fn signal_string(signal: i32) -> &'static str { ) ))] libc::SIGSTKFLT => " (SIGSTKFLT)", - #[cfg(target_os = "linux")] + #[cfg(any(target_os = "linux", target_os = "nto"))] libc::SIGPWR => " (SIGPWR)", #[cfg(any( target_os = "macos", @@ -769,7 +834,8 @@ fn signal_string(signal: i32) -> &'static str { target_os = "freebsd", target_os = "netbsd", target_os = "openbsd", - target_os = "dragonfly" + target_os = "dragonfly", + target_os = "nto", ))] libc::SIGEMT => " (SIGEMT)", #[cfg(any( diff --git a/library/std/src/sys/unix/thread.rs b/library/std/src/sys/unix/thread.rs index 2a1830d06..15070b1f6 100644 --- a/library/std/src/sys/unix/thread.rs +++ b/library/std/src/sys/unix/thread.rs @@ -9,7 +9,7 @@ use crate::time::Duration; #[cfg(all(target_os = "linux", target_env = "gnu"))] use crate::sys::weak::dlsym; -#[cfg(any(target_os = "solaris", target_os = "illumos"))] +#[cfg(any(target_os = "solaris", target_os = "illumos", target_os = "nto"))] use crate::sys::weak::weak; #[cfg(not(any(target_os = "l4re", target_os = "vxworks", target_os = "espidf")))] pub const DEFAULT_MIN_STACK_SIZE: usize = 2 * 1024 * 1024; @@ -49,7 +49,7 @@ unsafe impl Sync for Thread {} impl Thread { // unsafe: see thread::Builder::spawn_unchecked for safety requirements pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> { - let p = Box::into_raw(box p); + let p = Box::into_raw(Box::new(p)); let mut native: libc::pthread_t = mem::zeroed(); let mut attr: libc::pthread_attr_t = mem::zeroed(); assert_eq!(libc::pthread_attr_init(&mut attr), 0); @@ -173,7 +173,7 @@ impl Thread { } } - #[cfg(any(target_os = "solaris", target_os = "illumos"))] + #[cfg(any(target_os = "solaris", target_os = "illumos", target_os = "nto"))] pub fn set_name(name: &CStr) { weak! { fn pthread_setname_np( @@ -381,6 +381,17 @@ pub fn available_parallelism() -> io::Result<NonZeroUsize> { } Ok(unsafe { NonZeroUsize::new_unchecked(cpus as usize) }) + } else if #[cfg(target_os = "nto")] { + unsafe { + use libc::_syspage_ptr; + if _syspage_ptr.is_null() { + Err(io::const_io_error!(io::ErrorKind::NotFound, "No syspage available")) + } else { + let cpus = (*_syspage_ptr).num_cpu; + NonZeroUsize::new(cpus as usize) + .ok_or(io::const_io_error!(io::ErrorKind::NotFound, "The number of hardware threads is not known for the target platform")) + } + } } else if #[cfg(target_os = "haiku")] { // system_info cpu_count field gets the static data set at boot time with `smp_set_num_cpus` // `get_system_info` calls then `smp_get_num_cpus` diff --git a/library/std/src/sys/unix/thread_local_dtor.rs b/library/std/src/sys/unix/thread_local_dtor.rs index d7fd2130f..236d2f2ee 100644 --- a/library/std/src/sys/unix/thread_local_dtor.rs +++ b/library/std/src/sys/unix/thread_local_dtor.rs @@ -11,13 +11,7 @@ // Note, however, that we run on lots older linuxes, as well as cross // compiling from a newer linux to an older linux, so we also have a // fallback implementation to use as well. -#[cfg(any( - target_os = "linux", - target_os = "fuchsia", - target_os = "redox", - target_os = "emscripten" -))] -#[cfg_attr(target_family = "wasm", allow(unused))] // might remain unused depending on target details (e.g. wasm32-unknown-emscripten) +#[cfg(any(target_os = "linux", target_os = "fuchsia", target_os = "redox"))] pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) { use crate::mem; use crate::sys_common::thread_local_dtor::register_dtor_fallback; @@ -57,44 +51,40 @@ pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) { #[cfg(target_os = "macos")] pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) { use crate::cell::Cell; + use crate::mem; use crate::ptr; #[thread_local] static REGISTERED: Cell<bool> = Cell::new(false); + + #[thread_local] + static mut DTORS: Vec<(*mut u8, unsafe extern "C" fn(*mut u8))> = Vec::new(); + if !REGISTERED.get() { _tlv_atexit(run_dtors, ptr::null_mut()); REGISTERED.set(true); } - type List = Vec<(*mut u8, unsafe extern "C" fn(*mut u8))>; - - #[thread_local] - static DTORS: Cell<*mut List> = Cell::new(ptr::null_mut()); - if DTORS.get().is_null() { - let v: Box<List> = box Vec::new(); - DTORS.set(Box::into_raw(v)); - } - extern "C" { fn _tlv_atexit(dtor: unsafe extern "C" fn(*mut u8), arg: *mut u8); } - let list: &mut List = &mut *DTORS.get(); + let list = &mut DTORS; list.push((t, dtor)); unsafe extern "C" fn run_dtors(_: *mut u8) { - let mut ptr = DTORS.replace(ptr::null_mut()); - while !ptr.is_null() { - let list = Box::from_raw(ptr); - for (ptr, dtor) in list.into_iter() { + let mut list = mem::take(&mut DTORS); + while !list.is_empty() { + for (ptr, dtor) in list { dtor(ptr); } - ptr = DTORS.replace(ptr::null_mut()); + list = mem::take(&mut DTORS); } } } -#[cfg(any(target_os = "vxworks", target_os = "horizon"))] +#[cfg(any(target_os = "vxworks", target_os = "horizon", target_os = "emscripten"))] +#[cfg_attr(target_family = "wasm", allow(unused))] // might remain unused depending on target details (e.g. wasm32-unknown-emscripten) pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern "C" fn(*mut u8)) { use crate::sys_common::thread_local_dtor::register_dtor_fallback; register_dtor_fallback(t, dtor); diff --git a/library/std/src/sys/unix/thread_parking/pthread.rs b/library/std/src/sys/unix/thread_parking/pthread.rs index 082d25e68..43046ed07 100644 --- a/library/std/src/sys/unix/thread_parking/pthread.rs +++ b/library/std/src/sys/unix/thread_parking/pthread.rs @@ -6,7 +6,10 @@ use crate::pin::Pin; use crate::ptr::addr_of_mut; use crate::sync::atomic::AtomicUsize; use crate::sync::atomic::Ordering::SeqCst; +#[cfg(not(target_os = "nto"))] use crate::sys::time::TIMESPEC_MAX; +#[cfg(target_os = "nto")] +use crate::sys::time::TIMESPEC_MAX_CAPPED; use crate::time::Duration; const EMPTY: usize = 0; @@ -80,8 +83,14 @@ unsafe fn wait_timeout( (Timespec::now(libc::CLOCK_MONOTONIC), dur) }; + #[cfg(not(target_os = "nto"))] let timeout = now.checked_add_duration(&dur).and_then(|t| t.to_timespec()).unwrap_or(TIMESPEC_MAX); + #[cfg(target_os = "nto")] + let timeout = now + .checked_add_duration(&dur) + .and_then(|t| t.to_timespec_capped()) + .unwrap_or(TIMESPEC_MAX_CAPPED); let r = libc::pthread_cond_timedwait(cond, lock, &timeout); debug_assert!(r == libc::ETIMEDOUT || r == 0); } diff --git a/library/std/src/sys/unix/time.rs b/library/std/src/sys/unix/time.rs index 2daad981b..0f11de8f5 100644 --- a/library/std/src/sys/unix/time.rs +++ b/library/std/src/sys/unix/time.rs @@ -9,6 +9,14 @@ pub const UNIX_EPOCH: SystemTime = SystemTime { t: Timespec::zero() }; pub const TIMESPEC_MAX: libc::timespec = libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 }; +// This additional constant is only used when calling +// `libc::pthread_cond_timedwait`. +#[cfg(target_os = "nto")] +pub(super) const TIMESPEC_MAX_CAPPED: libc::timespec = libc::timespec { + tv_sec: (u64::MAX / NSEC_PER_SEC) as i64, + tv_nsec: (u64::MAX % NSEC_PER_SEC) as i64, +}; + #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] #[repr(transparent)] #[rustc_layout_scalar_valid_range_start(0)] @@ -144,6 +152,20 @@ impl Timespec { tv_nsec: self.tv_nsec.0.try_into().ok()?, }) } + + // On QNX Neutrino, the maximum timespec for e.g. pthread_cond_timedwait + // is 2^64 nanoseconds + #[cfg(target_os = "nto")] + pub(super) fn to_timespec_capped(&self) -> Option<libc::timespec> { + // Check if timeout in nanoseconds would fit into an u64 + if (self.tv_nsec.0 as u64) + .checked_add((self.tv_sec as u64).checked_mul(NSEC_PER_SEC)?) + .is_none() + { + return None; + } + self.to_timespec() + } } impl From<libc::timespec> for Timespec { diff --git a/library/std/src/sys/unsupported/once.rs b/library/std/src/sys/unsupported/once.rs index b4bb4975f..11fde1888 100644 --- a/library/std/src/sys/unsupported/once.rs +++ b/library/std/src/sys/unsupported/once.rs @@ -1,5 +1,6 @@ use crate::cell::Cell; use crate::sync as public; +use crate::sync::once::ExclusiveState; pub struct Once { state: Cell<State>, @@ -44,6 +45,16 @@ impl Once { self.state.get() == State::Complete } + #[inline] + pub(crate) fn state(&mut self) -> ExclusiveState { + match self.state.get() { + State::Incomplete => ExclusiveState::Incomplete, + State::Poisoned => ExclusiveState::Poisoned, + State::Complete => ExclusiveState::Complete, + _ => unreachable!("invalid Once state"), + } + } + #[cold] #[track_caller] pub fn call(&self, ignore_poisoning: bool, f: &mut impl FnMut(&public::OnceState)) { diff --git a/library/std/src/sys/wasi/os.rs b/library/std/src/sys/wasi/os.rs index f5513e999..9919dc708 100644 --- a/library/std/src/sys/wasi/os.rs +++ b/library/std/src/sys/wasi/os.rs @@ -21,6 +21,7 @@ mod libc { extern "C" { pub fn getcwd(buf: *mut c_char, size: size_t) -> *mut c_char; pub fn chdir(dir: *const c_char) -> c_int; + pub fn __wasilibc_get_environ() -> *mut *mut c_char; } } @@ -161,7 +162,12 @@ impl Iterator for Env { pub fn env() -> Env { unsafe { let _guard = env_read_lock(); - let mut environ = libc::environ; + + // Use `__wasilibc_get_environ` instead of `environ` here so that we + // don't require wasi-libc to eagerly initialize the environment + // variables. + let mut environ = libc::__wasilibc_get_environ(); + let mut result = Vec::new(); if !environ.is_null() { while !(*environ).is_null() { diff --git a/library/std/src/sys/windows/args.rs b/library/std/src/sys/windows/args.rs index 6741ae46d..30356fa85 100644 --- a/library/std/src/sys/windows/args.rs +++ b/library/std/src/sys/windows/args.rs @@ -270,7 +270,7 @@ pub(crate) fn make_bat_command_line( // It is necessary to surround the command in an extra pair of quotes, // hence the trailing quote here. It will be closed after all arguments // have been added. - let mut cmd: Vec<u16> = "cmd.exe /c \"".encode_utf16().collect(); + let mut cmd: Vec<u16> = "cmd.exe /d /c \"".encode_utf16().collect(); // Push the script name surrounded by its quote pair. cmd.push(b'"' as u16); @@ -290,6 +290,15 @@ pub(crate) fn make_bat_command_line( // reconstructed by the batch script by default. for arg in args { cmd.push(' ' as u16); + // Make sure to always quote special command prompt characters, including: + // * Characters `cmd /?` says require quotes. + // * `%` for environment variables, as in `%TMP%`. + // * `|<>` pipe/redirect characters. + const SPECIAL: &[u8] = b"\t &()[]{}^=;!'+,`~%|<>"; + let force_quotes = match arg { + Arg::Regular(arg) if !force_quotes => arg.bytes().iter().any(|c| SPECIAL.contains(c)), + _ => force_quotes, + }; append_arg(&mut cmd, arg, force_quotes)?; } diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs index f58dcf128..5d150eca0 100644 --- a/library/std/src/sys/windows/c.rs +++ b/library/std/src/sys/windows/c.rs @@ -6,13 +6,15 @@ use crate::ffi::CStr; use crate::mem; -use crate::os::raw::{c_char, c_int, c_long, c_longlong, c_uint, c_ulong, c_ushort}; +use crate::os::raw::{c_char, c_long, c_longlong, c_uint, c_ulong, c_ushort}; use crate::os::windows::io::{BorrowedHandle, HandleOrInvalid, HandleOrNull}; use crate::ptr; use core::ffi::NonZero_c_ulong; use libc::{c_void, size_t, wchar_t}; +pub use crate::os::raw::c_int; + #[path = "c/errors.rs"] // c.rs is included from two places so we need to specify this mod errors; pub use errors::*; @@ -47,16 +49,19 @@ pub type ACCESS_MASK = DWORD; pub type LPBOOL = *mut BOOL; pub type LPBYTE = *mut BYTE; +pub type LPCCH = *const CHAR; pub type LPCSTR = *const CHAR; +pub type LPCWCH = *const WCHAR; pub type LPCWSTR = *const WCHAR; +pub type LPCVOID = *const c_void; pub type LPDWORD = *mut DWORD; pub type LPHANDLE = *mut HANDLE; pub type LPOVERLAPPED = *mut OVERLAPPED; pub type LPPROCESS_INFORMATION = *mut PROCESS_INFORMATION; pub type LPSECURITY_ATTRIBUTES = *mut SECURITY_ATTRIBUTES; pub type LPSTARTUPINFO = *mut STARTUPINFO; +pub type LPSTR = *mut CHAR; pub type LPVOID = *mut c_void; -pub type LPCVOID = *const c_void; pub type LPWCH = *mut WCHAR; pub type LPWIN32_FIND_DATAW = *mut WIN32_FIND_DATAW; pub type LPWSADATA = *mut WSADATA; @@ -132,6 +137,10 @@ pub const MAX_PATH: usize = 260; pub const FILE_TYPE_PIPE: u32 = 3; +pub const CP_UTF8: DWORD = 65001; +pub const MB_ERR_INVALID_CHARS: DWORD = 0x08; +pub const WC_ERR_INVALID_CHARS: DWORD = 0x80; + #[repr(C)] #[derive(Copy)] pub struct WIN32_FIND_DATAW { @@ -539,14 +548,6 @@ pub struct SYMBOLIC_LINK_REPARSE_BUFFER { pub PathBuffer: WCHAR, } -/// NB: Use carefully! In general using this as a reference is likely to get the -/// provenance wrong for the `PathBuffer` field! -#[repr(C)] -pub struct FILE_NAME_INFO { - pub FileNameLength: DWORD, - pub FileName: [WCHAR; 1], -} - #[repr(C)] pub struct MOUNT_POINT_REPARSE_BUFFER { pub SubstituteNameOffset: c_ushort, @@ -1155,6 +1156,25 @@ extern "system" { lpFilePart: *mut LPWSTR, ) -> DWORD; pub fn GetFileAttributesW(lpFileName: LPCWSTR) -> DWORD; + + pub fn MultiByteToWideChar( + CodePage: UINT, + dwFlags: DWORD, + lpMultiByteStr: LPCCH, + cbMultiByte: c_int, + lpWideCharStr: LPWSTR, + cchWideChar: c_int, + ) -> c_int; + pub fn WideCharToMultiByte( + CodePage: UINT, + dwFlags: DWORD, + lpWideCharStr: LPCWCH, + cchWideChar: c_int, + lpMultiByteStr: LPSTR, + cbMultiByte: c_int, + lpDefaultChar: LPCCH, + lpUsedDefaultChar: LPBOOL, + ) -> c_int; } #[link(name = "ws2_32")] diff --git a/library/std/src/sys/windows/fs.rs b/library/std/src/sys/windows/fs.rs index 378098038..d2c597664 100644 --- a/library/std/src/sys/windows/fs.rs +++ b/library/std/src/sys/windows/fs.rs @@ -1266,7 +1266,12 @@ fn metadata(path: &Path, reparse: ReparsePoint) -> io::Result<FileAttr> { // If the fallback fails for any reason we return the original error. match File::open(path, &opts) { Ok(file) => file.file_attr(), - Err(e) if e.raw_os_error() == Some(c::ERROR_SHARING_VIOLATION as _) => { + Err(e) + if [Some(c::ERROR_SHARING_VIOLATION as _), Some(c::ERROR_ACCESS_DENIED as _)] + .contains(&e.raw_os_error()) => + { + // `ERROR_ACCESS_DENIED` is returned when the user doesn't have permission for the resource. + // One such example is `System Volume Information` as default but can be created as well // `ERROR_SHARING_VIOLATION` will almost never be returned. // Usually if a file is locked you can still read some metadata. // However, there are special system files, such as @@ -1393,6 +1398,8 @@ fn symlink_junction_inner(original: &Path, junction: &Path) -> io::Result<()> { let mut data = Align8([MaybeUninit::<u8>::uninit(); c::MAXIMUM_REPARSE_DATA_BUFFER_SIZE]); let data_ptr = data.0.as_mut_ptr(); let db = data_ptr.cast::<c::REPARSE_MOUNTPOINT_DATA_BUFFER>(); + // Zero the header to ensure it's fully initialized, including reserved parameters. + *db = mem::zeroed(); let buf = ptr::addr_of_mut!((*db).ReparseTarget).cast::<c::WCHAR>(); let mut i = 0; // FIXME: this conversion is very hacky diff --git a/library/std/src/sys/windows/io.rs b/library/std/src/sys/windows/io.rs index 2cc34c986..7fdd1f702 100644 --- a/library/std/src/sys/windows/io.rs +++ b/library/std/src/sys/windows/io.rs @@ -2,8 +2,7 @@ use crate::marker::PhantomData; use crate::mem::size_of; use crate::os::windows::io::{AsHandle, AsRawHandle, BorrowedHandle}; use crate::slice; -use crate::sys::{c, Align8}; -use core; +use crate::sys::c; use libc; #[derive(Copy, Clone)] @@ -125,22 +124,33 @@ unsafe fn msys_tty_on(handle: c::HANDLE) -> bool { return false; } - const SIZE: usize = size_of::<c::FILE_NAME_INFO>() + c::MAX_PATH * size_of::<c::WCHAR>(); - let mut name_info_bytes = Align8([0u8; SIZE]); + /// Mirrors [`FILE_NAME_INFO`], giving it a fixed length that we can stack + /// allocate + /// + /// [`FILE_NAME_INFO`]: https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_name_info + #[repr(C)] + #[allow(non_snake_case)] + struct FILE_NAME_INFO { + FileNameLength: u32, + FileName: [u16; c::MAX_PATH as usize], + } + let mut name_info = FILE_NAME_INFO { FileNameLength: 0, FileName: [0; c::MAX_PATH as usize] }; + // Safety: buffer length is fixed. let res = c::GetFileInformationByHandleEx( handle, c::FileNameInfo, - name_info_bytes.0.as_mut_ptr() as *mut libc::c_void, - SIZE as u32, + &mut name_info as *mut _ as *mut libc::c_void, + size_of::<FILE_NAME_INFO>() as u32, ); if res == 0 { return false; } - let name_info: &c::FILE_NAME_INFO = &*(name_info_bytes.0.as_ptr() as *const c::FILE_NAME_INFO); - let name_len = name_info.FileNameLength as usize / 2; - // Offset to get the `FileName` field. - let name_ptr = name_info_bytes.0.as_ptr().offset(size_of::<c::DWORD>() as isize).cast::<u16>(); - let s = core::slice::from_raw_parts(name_ptr, name_len); + + // Use `get` because `FileNameLength` can be out of range. + let s = match name_info.FileName.get(..name_info.FileNameLength as usize / 2) { + None => return false, + Some(s) => s, + }; let name = String::from_utf16_lossy(s); // Get the file name only. let name = name.rsplit('\\').next().unwrap_or(&name); diff --git a/library/std/src/sys/windows/stdio.rs b/library/std/src/sys/windows/stdio.rs index 70c9b14a0..32c6ccffb 100644 --- a/library/std/src/sys/windows/stdio.rs +++ b/library/std/src/sys/windows/stdio.rs @@ -1,6 +1,5 @@ #![unstable(issue = "none", feature = "windows_stdio")] -use crate::char::decode_utf16; use crate::cmp; use crate::io; use crate::mem::MaybeUninit; @@ -170,14 +169,27 @@ fn write( } fn write_valid_utf8_to_console(handle: c::HANDLE, utf8: &str) -> io::Result<usize> { + debug_assert!(!utf8.is_empty()); + let mut utf16 = [MaybeUninit::<u16>::uninit(); MAX_BUFFER_SIZE / 2]; - let mut len_utf16 = 0; - for (chr, dest) in utf8.encode_utf16().zip(utf16.iter_mut()) { - *dest = MaybeUninit::new(chr); - len_utf16 += 1; - } - // Safety: We've initialized `len_utf16` values. - let utf16: &[u16] = unsafe { MaybeUninit::slice_assume_init_ref(&utf16[..len_utf16]) }; + let utf8 = &utf8[..utf8.floor_char_boundary(utf16.len())]; + + let utf16: &[u16] = unsafe { + // Note that this theoretically checks validity twice in the (most common) case + // where the underlying byte sequence is valid utf-8 (given the check in `write()`). + let result = c::MultiByteToWideChar( + c::CP_UTF8, // CodePage + c::MB_ERR_INVALID_CHARS, // dwFlags + utf8.as_ptr() as c::LPCCH, // lpMultiByteStr + utf8.len() as c::c_int, // cbMultiByte + utf16.as_mut_ptr() as c::LPWSTR, // lpWideCharStr + utf16.len() as c::c_int, // cchWideChar + ); + assert!(result != 0, "Unexpected error in MultiByteToWideChar"); + + // Safety: MultiByteToWideChar initializes `result` values. + MaybeUninit::slice_assume_init_ref(&utf16[..result as usize]) + }; let mut written = write_u16s(handle, &utf16)?; @@ -190,8 +202,8 @@ fn write_valid_utf8_to_console(handle: c::HANDLE, utf8: &str) -> io::Result<usiz // a missing surrogate can be produced (and also because of the UTF-8 validation above), // write the missing surrogate out now. // Buffering it would mean we have to lie about the number of bytes written. - let first_char_remaining = utf16[written]; - if first_char_remaining >= 0xDCEE && first_char_remaining <= 0xDFFF { + let first_code_unit_remaining = utf16[written]; + if first_code_unit_remaining >= 0xDCEE && first_code_unit_remaining <= 0xDFFF { // low surrogate // We just hope this works, and give up otherwise let _ = write_u16s(handle, &utf16[written..written + 1]); @@ -213,6 +225,7 @@ fn write_valid_utf8_to_console(handle: c::HANDLE, utf8: &str) -> io::Result<usiz } fn write_u16s(handle: c::HANDLE, data: &[u16]) -> io::Result<usize> { + debug_assert!(data.len() < u32::MAX as usize); let mut written = 0; cvt(unsafe { c::WriteConsoleW( @@ -366,26 +379,32 @@ fn read_u16s(handle: c::HANDLE, buf: &mut [MaybeUninit<u16>]) -> io::Result<usiz Ok(amount as usize) } -#[allow(unused)] fn utf16_to_utf8(utf16: &[u16], utf8: &mut [u8]) -> io::Result<usize> { - let mut written = 0; - for chr in decode_utf16(utf16.iter().cloned()) { - match chr { - Ok(chr) => { - chr.encode_utf8(&mut utf8[written..]); - written += chr.len_utf8(); - } - Err(_) => { - // We can't really do any better than forget all data and return an error. - return Err(io::const_io_error!( - io::ErrorKind::InvalidData, - "Windows stdin in console mode does not support non-UTF-16 input; \ - encountered unpaired surrogate", - )); - } - } + debug_assert!(utf16.len() <= c::c_int::MAX as usize); + debug_assert!(utf8.len() <= c::c_int::MAX as usize); + + let result = unsafe { + c::WideCharToMultiByte( + c::CP_UTF8, // CodePage + c::WC_ERR_INVALID_CHARS, // dwFlags + utf16.as_ptr(), // lpWideCharStr + utf16.len() as c::c_int, // cchWideChar + utf8.as_mut_ptr() as c::LPSTR, // lpMultiByteStr + utf8.len() as c::c_int, // cbMultiByte + ptr::null(), // lpDefaultChar + ptr::null_mut(), // lpUsedDefaultChar + ) + }; + if result == 0 { + // We can't really do any better than forget all data and return an error. + Err(io::const_io_error!( + io::ErrorKind::InvalidData, + "Windows stdin in console mode does not support non-UTF-16 input; \ + encountered unpaired surrogate", + )) + } else { + Ok(result as usize) } - Ok(written) } impl IncompleteUtf8 { diff --git a/library/std/src/sys/windows/thread.rs b/library/std/src/sys/windows/thread.rs index 1cb576c95..ed58c47e0 100644 --- a/library/std/src/sys/windows/thread.rs +++ b/library/std/src/sys/windows/thread.rs @@ -22,7 +22,7 @@ pub struct Thread { impl Thread { // unsafe: see thread::Builder::spawn_unchecked for safety requirements pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> { - let p = Box::into_raw(box p); + let p = Box::into_raw(Box::new(p)); // FIXME On UNIX, we guard against stack sizes that are too small but // that's because pthreads enforces that stacks are at least diff --git a/library/std/src/sys/windows/thread_parking.rs b/library/std/src/sys/windows/thread_parking.rs index 5d43676ad..eb9167cd8 100644 --- a/library/std/src/sys/windows/thread_parking.rs +++ b/library/std/src/sys/windows/thread_parking.rs @@ -221,7 +221,7 @@ impl Parker { fn keyed_event_handle() -> c::HANDLE { const INVALID: c::HANDLE = ptr::invalid_mut(!0); - static HANDLE: AtomicPtr<libc::c_void> = AtomicPtr::new(INVALID); + static HANDLE: AtomicPtr<crate::ffi::c_void> = AtomicPtr::new(INVALID); match HANDLE.load(Relaxed) { INVALID => { let mut handle = c::INVALID_HANDLE_VALUE; diff --git a/library/std/src/sys_common/backtrace.rs b/library/std/src/sys_common/backtrace.rs index f1d804ef4..8752f46ff 100644 --- a/library/std/src/sys_common/backtrace.rs +++ b/library/std/src/sys_common/backtrace.rs @@ -91,6 +91,19 @@ unsafe fn _print_fmt(fmt: &mut fmt::Formatter<'_>, print_fmt: PrintFmt) -> fmt:: if stop { return false; } + #[cfg(target_os = "nto")] + if libc::__my_thread_exit as *mut libc::c_void == frame.ip() { + if !hit && start { + use crate::backtrace_rs::SymbolName; + res = bt_fmt.frame().print_raw( + frame.ip(), + Some(SymbolName::new("__my_thread_exit".as_bytes())), + None, + None, + ); + } + return false; + } if !hit && start { res = bt_fmt.frame().print_raw(frame.ip(), None, None, None); } diff --git a/library/std/src/sys_common/mod.rs b/library/std/src/sys_common/mod.rs index 6b24b0e9a..e9c727cbb 100644 --- a/library/std/src/sys_common/mod.rs +++ b/library/std/src/sys_common/mod.rs @@ -44,7 +44,6 @@ cfg_if::cfg_if! { cfg_if::cfg_if! { if #[cfg(any(target_os = "l4re", - target_os = "hermit", feature = "restricted-std", all(target_family = "wasm", not(target_os = "emscripten")), all(target_vendor = "fortanix", target_env = "sgx")))] { diff --git a/library/std/src/sys_common/net.rs b/library/std/src/sys_common/net.rs index fad4a6333..85ecc1def 100644 --- a/library/std/src/sys_common/net.rs +++ b/library/std/src/sys_common/net.rs @@ -14,14 +14,14 @@ use crate::sys::net::{cvt, cvt_gai, cvt_r, init, wrlen_t, Socket}; use crate::sys_common::{AsInner, FromInner, IntoInner}; use crate::time::Duration; -use libc::{c_int, c_void}; +use crate::ffi::{c_int, c_void}; cfg_if::cfg_if! { if #[cfg(any( target_os = "dragonfly", target_os = "freebsd", target_os = "ios", target_os = "macos", target_os = "watchos", target_os = "openbsd", target_os = "netbsd", target_os = "illumos", - target_os = "solaris", target_os = "haiku", target_os = "l4re"))] { + target_os = "solaris", target_os = "haiku", target_os = "l4re", target_os = "nto"))] { use crate::sys::net::netc::IPV6_JOIN_GROUP as IPV6_ADD_MEMBERSHIP; use crate::sys::net::netc::IPV6_LEAVE_GROUP as IPV6_DROP_MEMBERSHIP; } else { @@ -35,7 +35,7 @@ cfg_if::cfg_if! { target_os = "linux", target_os = "android", target_os = "dragonfly", target_os = "freebsd", target_os = "openbsd", target_os = "netbsd", - target_os = "haiku"))] { + target_os = "haiku", target_os = "nto"))] { use libc::MSG_NOSIGNAL; } else { const MSG_NOSIGNAL: c_int = 0x0; @@ -46,8 +46,9 @@ cfg_if::cfg_if! { if #[cfg(any( target_os = "dragonfly", target_os = "freebsd", target_os = "openbsd", target_os = "netbsd", - target_os = "solaris", target_os = "illumos"))] { - use libc::c_uchar; + target_os = "solaris", target_os = "illumos", + target_os = "nto"))] { + use crate::ffi::c_uchar; type IpV4MultiCastType = c_uchar; } else { type IpV4MultiCastType = c_int; @@ -127,8 +128,8 @@ fn to_ipv6mr_interface(value: u32) -> c_int { } #[cfg(not(target_os = "android"))] -fn to_ipv6mr_interface(value: u32) -> libc::c_uint { - value as libc::c_uint +fn to_ipv6mr_interface(value: u32) -> crate::ffi::c_uint { + value as crate::ffi::c_uint } //////////////////////////////////////////////////////////////////////////////// diff --git a/library/std/src/sys_common/net/tests.rs b/library/std/src/sys_common/net/tests.rs index ac75d9ebf..fc236b802 100644 --- a/library/std/src/sys_common/net/tests.rs +++ b/library/std/src/sys_common/net/tests.rs @@ -6,7 +6,7 @@ fn no_lookup_host_duplicates() { let mut addrs = HashMap::new(); let lh = match LookupHost::try_from(("localhost", 0)) { Ok(lh) => lh, - Err(e) => panic!("couldn't resolve `localhost': {e}"), + Err(e) => panic!("couldn't resolve `localhost`: {e}"), }; for sa in lh { *addrs.entry(sa).or_insert(0) += 1; diff --git a/library/std/src/sys_common/once/futex.rs b/library/std/src/sys_common/once/futex.rs index 5c7e6c013..42db5fad4 100644 --- a/library/std/src/sys_common/once/futex.rs +++ b/library/std/src/sys_common/once/futex.rs @@ -4,6 +4,7 @@ use crate::sync::atomic::{ AtomicU32, Ordering::{Acquire, Relaxed, Release}, }; +use crate::sync::once::ExclusiveState; use crate::sys::futex::{futex_wait, futex_wake_all}; // On some platforms, the OS is very nice and handles the waiter queue for us. @@ -78,6 +79,16 @@ impl Once { self.state.load(Acquire) == COMPLETE } + #[inline] + pub(crate) fn state(&mut self) -> ExclusiveState { + match *self.state.get_mut() { + INCOMPLETE => ExclusiveState::Incomplete, + POISONED => ExclusiveState::Poisoned, + COMPLETE => ExclusiveState::Complete, + _ => unreachable!("invalid Once state"), + } + } + // This uses FnMut to match the API of the generic implementation. As this // implementation is quite light-weight, it is generic over the closure and // so avoids the cost of dynamic dispatch. diff --git a/library/std/src/sys_common/once/queue.rs b/library/std/src/sys_common/once/queue.rs index d953a6745..def0bcd6f 100644 --- a/library/std/src/sys_common/once/queue.rs +++ b/library/std/src/sys_common/once/queue.rs @@ -60,6 +60,7 @@ use crate::fmt; use crate::ptr; use crate::sync as public; use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; +use crate::sync::once::ExclusiveState; use crate::thread::{self, Thread}; type Masked = (); @@ -121,6 +122,16 @@ impl Once { self.state_and_queue.load(Ordering::Acquire).addr() == COMPLETE } + #[inline] + pub(crate) fn state(&mut self) -> ExclusiveState { + match self.state_and_queue.get_mut().addr() { + INCOMPLETE => ExclusiveState::Incomplete, + POISONED => ExclusiveState::Poisoned, + COMPLETE => ExclusiveState::Complete, + _ => unreachable!("invalid Once state"), + } + } + // This is a non-generic function to reduce the monomorphization cost of // using `call_once` (this isn't exactly a trivial or small implementation). // diff --git a/library/std/src/sys_common/thread_local_dtor.rs b/library/std/src/sys_common/thread_local_dtor.rs index 1d13a7171..844946eda 100644 --- a/library/std/src/sys_common/thread_local_dtor.rs +++ b/library/std/src/sys_common/thread_local_dtor.rs @@ -30,7 +30,7 @@ pub unsafe fn register_dtor_fallback(t: *mut u8, dtor: unsafe extern "C" fn(*mut static DTORS: StaticKey = StaticKey::new(Some(run_dtors)); type List = Vec<(*mut u8, unsafe extern "C" fn(*mut u8))>; if DTORS.get().is_null() { - let v: Box<List> = box Vec::new(); + let v: Box<List> = Box::new(Vec::new()); DTORS.set(Box::into_raw(v) as *mut u8); } let list: &mut List = &mut *(DTORS.get() as *mut List); diff --git a/library/std/src/sys_common/thread_local_key.rs b/library/std/src/sys_common/thread_local_key.rs index 2672a2a75..89360e456 100644 --- a/library/std/src/sys_common/thread_local_key.rs +++ b/library/std/src/sys_common/thread_local_key.rs @@ -117,9 +117,15 @@ pub struct Key { /// This value specifies no destructor by default. pub const INIT: StaticKey = StaticKey::new(None); -// Define a sentinel value that is unlikely to be returned -// as a TLS key (but it may be returned). +// Define a sentinel value that is likely not to be returned +// as a TLS key. +#[cfg(not(target_os = "nto"))] const KEY_SENTVAL: usize = 0; +// On QNX Neutrino, 0 is always returned when currently not in use. +// Using 0 would mean to always create two keys and remote the first +// one (with value of 0) immediately afterwards. +#[cfg(target_os = "nto")] +const KEY_SENTVAL: usize = libc::PTHREAD_KEYS_MAX + 1; impl StaticKey { #[rustc_const_unstable(feature = "thread_local_internals", issue = "none")] diff --git a/library/std/src/sys_common/thread_parking/id.rs b/library/std/src/sys_common/thread_parking/id.rs index e98169597..575988ec7 100644 --- a/library/std/src/sys_common/thread_parking/id.rs +++ b/library/std/src/sys_common/thread_parking/id.rs @@ -60,7 +60,7 @@ impl Parker { if state == PARKED { // Loop to guard against spurious wakeups. while state == PARKED { - park(self.state.as_mut_ptr().addr()); + park(self.state.as_ptr().addr()); state = self.state.load(Acquire); } @@ -76,7 +76,7 @@ impl Parker { let state = self.state.fetch_sub(1, Acquire).wrapping_sub(1); if state == PARKED { - park_timeout(dur, self.state.as_mut_ptr().addr()); + park_timeout(dur, self.state.as_ptr().addr()); // Swap to ensure that we observe all state changes with acquire // ordering, even if the state has been changed after the timeout // occured. @@ -99,7 +99,7 @@ impl Parker { // and terminated before this call is made. This call then returns an // error or wakes up an unrelated thread. The platform API and // environment does allow this, however. - unpark(tid, self.state.as_mut_ptr().addr()); + unpark(tid, self.state.as_ptr().addr()); } } } diff --git a/library/std/src/sys_common/thread_parking/mod.rs b/library/std/src/sys_common/thread_parking/mod.rs index 0ead6633c..e8e028bb3 100644 --- a/library/std/src/sys_common/thread_parking/mod.rs +++ b/library/std/src/sys_common/thread_parking/mod.rs @@ -14,12 +14,10 @@ cfg_if::cfg_if! { } else if #[cfg(any( target_os = "netbsd", all(target_vendor = "fortanix", target_env = "sgx"), + target_os = "solid_asp3", ))] { mod id; pub use id::Parker; - } else if #[cfg(target_os = "solid_asp3")] { - mod wait_flag; - pub use wait_flag::Parker; } else if #[cfg(any(windows, target_family = "unix"))] { pub use crate::sys::thread_parking::Parker; } else { diff --git a/library/std/src/sys_common/thread_parking/wait_flag.rs b/library/std/src/sys_common/thread_parking/wait_flag.rs deleted file mode 100644 index d0f8899a9..000000000 --- a/library/std/src/sys_common/thread_parking/wait_flag.rs +++ /dev/null @@ -1,102 +0,0 @@ -//! A wait-flag-based thread parker. -//! -//! Some operating systems provide low-level parking primitives like wait counts, -//! event flags or semaphores which are not susceptible to race conditions (meaning -//! the wakeup can occur before the wait operation). To implement the `std` thread -//! parker on top of these primitives, we only have to ensure that parking is fast -//! when the thread token is available, the atomic ordering guarantees are maintained -//! and spurious wakeups are minimized. -//! -//! To achieve this, this parker uses an atomic variable with three states: `EMPTY`, -//! `PARKED` and `NOTIFIED`: -//! * `EMPTY` means the token has not been made available, but the thread is not -//! currently waiting on it. -//! * `PARKED` means the token is not available and the thread is parked. -//! * `NOTIFIED` means the token is available. -//! -//! `park` and `park_timeout` change the state from `EMPTY` to `PARKED` and from -//! `NOTIFIED` to `EMPTY`. If the state was `NOTIFIED`, the thread was unparked and -//! execution can continue without calling into the OS. If the state was `EMPTY`, -//! the token is not available and the thread waits on the primitive (here called -//! "wait flag"). -//! -//! `unpark` changes the state to `NOTIFIED`. If the state was `PARKED`, the thread -//! is or will be sleeping on the wait flag, so we raise it. - -use crate::pin::Pin; -use crate::sync::atomic::AtomicI8; -use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release}; -use crate::sys::wait_flag::WaitFlag; -use crate::time::Duration; - -const EMPTY: i8 = 0; -const PARKED: i8 = -1; -const NOTIFIED: i8 = 1; - -pub struct Parker { - state: AtomicI8, - wait_flag: WaitFlag, -} - -impl Parker { - /// Construct a parker for the current thread. The UNIX parker - /// implementation requires this to happen in-place. - pub unsafe fn new_in_place(parker: *mut Parker) { - parker.write(Parker { state: AtomicI8::new(EMPTY), wait_flag: WaitFlag::new() }) - } - - // This implementation doesn't require `unsafe` and `Pin`, but other implementations do. - pub unsafe fn park(self: Pin<&Self>) { - match self.state.fetch_sub(1, Acquire) { - // NOTIFIED => EMPTY - NOTIFIED => return, - // EMPTY => PARKED - EMPTY => (), - _ => panic!("inconsistent park state"), - } - - // Avoid waking up from spurious wakeups (these are quite likely, see below). - loop { - self.wait_flag.wait(); - - match self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Relaxed) { - Ok(_) => return, - Err(PARKED) => (), - Err(_) => panic!("inconsistent park state"), - } - } - } - - // This implementation doesn't require `unsafe` and `Pin`, but other implementations do. - pub unsafe fn park_timeout(self: Pin<&Self>, dur: Duration) { - match self.state.fetch_sub(1, Acquire) { - NOTIFIED => return, - EMPTY => (), - _ => panic!("inconsistent park state"), - } - - self.wait_flag.wait_timeout(dur); - - // Either a wakeup or a timeout occurred. Wakeups may be spurious, as there can be - // a race condition when `unpark` is performed between receiving the timeout and - // resetting the state, resulting in the eventflag being set unnecessarily. `park` - // is protected against this by looping until the token is actually given, but - // here we cannot easily tell. - - // Use `swap` to provide acquire ordering. - match self.state.swap(EMPTY, Acquire) { - NOTIFIED => (), - PARKED => (), - _ => panic!("inconsistent park state"), - } - } - - // This implementation doesn't require `Pin`, but other implementations do. - pub fn unpark(self: Pin<&Self>) { - let state = self.state.swap(NOTIFIED, Release); - - if state == PARKED { - self.wait_flag.raise(); - } - } -} diff --git a/library/std/src/sys_common/wtf8.rs b/library/std/src/sys_common/wtf8.rs index dd53767d4..bc588bdbb 100644 --- a/library/std/src/sys_common/wtf8.rs +++ b/library/std/src/sys_common/wtf8.rs @@ -18,10 +18,10 @@ #[cfg(test)] mod tests; +use core::char::{encode_utf16_raw, encode_utf8_raw}; use core::str::next_code_point; use crate::borrow::Cow; -use crate::char; use crate::collections::TryReserveError; use crate::fmt; use crate::hash::{Hash, Hasher}; @@ -235,7 +235,7 @@ impl Wtf8Buf { /// This does **not** include the WTF-8 concatenation check or `is_known_utf8` check. fn push_code_point_unchecked(&mut self, code_point: CodePoint) { let mut bytes = [0; 4]; - let bytes = char::encode_utf8_raw(code_point.value, &mut bytes); + let bytes = encode_utf8_raw(code_point.value, &mut bytes); self.bytes.extend_from_slice(bytes) } @@ -594,7 +594,7 @@ impl Wtf8 { } /// Returns the code point at `position` if it is in the ASCII range, - /// or `b'\xFF' otherwise. + /// or `b'\xFF'` otherwise. /// /// # Panics /// @@ -939,7 +939,7 @@ impl<'a> Iterator for EncodeWide<'a> { let mut buf = [0; 2]; self.code_points.next().map(|code_point| { - let n = char::encode_utf16_raw(code_point.value, &mut buf).len(); + let n = encode_utf16_raw(code_point.value, &mut buf).len(); if n == 2 { self.extra = buf[1]; } diff --git a/library/std/src/thread/local.rs b/library/std/src/thread/local.rs index b30bb7b77..cf7c2e05a 100644 --- a/library/std/src/thread/local.rs +++ b/library/std/src/thread/local.rs @@ -1110,8 +1110,7 @@ pub mod os { let ptr = if ptr.is_null() { // If the lookup returned null, we haven't initialized our own // local copy, so do that now. - let ptr: Box<Value<T>> = box Value { inner: LazyKeyInner::new(), key: self }; - let ptr = Box::into_raw(ptr); + let ptr = Box::into_raw(Box::new(Value { inner: LazyKeyInner::new(), key: self })); // SAFETY: At this point we are sure there is no value inside // ptr so setting it will not affect anyone else. unsafe { diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs index 692ff0cbc..489af7767 100644 --- a/library/std/src/thread/mod.rs +++ b/library/std/src/thread/mod.rs @@ -124,8 +124,10 @@ //! //! ## Stack size //! -//! The default stack size is platform-dependent and subject to change. Currently it is 2MB on all -//! Tier-1 platforms. There are two ways to manually specify the stack size for spawned threads: +//! The default stack size is platform-dependent and subject to change. +//! Currently, it is 2 MiB on all Tier-1 platforms. +//! +//! There are two ways to manually specify the stack size for spawned threads: //! //! * Build the thread with [`Builder`] and pass the desired stack size to [`Builder::stack_size`]. //! * Set the `RUST_MIN_STACK` environment variable to an integer representing the desired stack diff --git a/library/std/src/time.rs b/library/std/src/time.rs index ecd06ebf7..5c2e9da70 100644 --- a/library/std/src/time.rs +++ b/library/std/src/time.rs @@ -1,6 +1,6 @@ //! Temporal quantification. //! -//! # Examples: +//! # Examples //! //! There are multiple ways to create a new [`Duration`]: //! @@ -352,7 +352,7 @@ impl Instant { self.checked_duration_since(earlier).unwrap_or_default() } - /// Returns the amount of time elapsed since this instant was created. + /// Returns the amount of time elapsed since this instant. /// /// # Panics /// @@ -525,8 +525,8 @@ impl SystemTime { self.0.sub_time(&earlier.0).map_err(SystemTimeError) } - /// Returns the difference between the clock time when this - /// system time was created, and the current clock time. + /// Returns the difference from this system time to the + /// current clock time. /// /// This function may fail as the underlying system clock is susceptible to /// drift and updates (e.g., the system clock could go backwards), so this diff --git a/library/std/tests/common/mod.rs b/library/std/tests/common/mod.rs new file mode 100644 index 000000000..fce220223 --- /dev/null +++ b/library/std/tests/common/mod.rs @@ -0,0 +1,58 @@ +#![allow(unused)] + +use std::env; +use std::fs; +use std::path::{Path, PathBuf}; +use std::thread; +use rand::RngCore; + +/// Copied from `std::test_helpers::test_rng`, since these tests rely on the +/// seed not being the same for every RNG invocation too. +#[track_caller] +pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng { + use core::hash::{BuildHasher, Hash, Hasher}; + let mut hasher = std::collections::hash_map::RandomState::new().build_hasher(); + core::panic::Location::caller().hash(&mut hasher); + let hc64 = hasher.finish(); + let seed_vec = hc64.to_le_bytes().into_iter().chain(0u8..8).collect::<Vec<u8>>(); + let seed: [u8; 16] = seed_vec.as_slice().try_into().unwrap(); + rand::SeedableRng::from_seed(seed) +} + +// Copied from std::sys_common::io +pub struct TempDir(PathBuf); + +impl TempDir { + pub fn join(&self, path: &str) -> PathBuf { + let TempDir(ref p) = *self; + p.join(path) + } + + pub fn path(&self) -> &Path { + let TempDir(ref p) = *self; + p + } +} + +impl Drop for TempDir { + fn drop(&mut self) { + // Gee, seeing how we're testing the fs module I sure hope that we + // at least implement this correctly! + let TempDir(ref p) = *self; + let result = fs::remove_dir_all(p); + // Avoid panicking while panicking as this causes the process to + // immediately abort, without displaying test results. + if !thread::panicking() { + result.unwrap(); + } + } +} + +#[track_caller] // for `test_rng` +pub fn tmpdir() -> TempDir { + let p = env::temp_dir(); + let mut r = test_rng(); + let ret = p.join(&format!("rust-{}", r.next_u32())); + fs::create_dir(&ret).unwrap(); + TempDir(ret) +} diff --git a/library/std/tests/create_dir_all_bare.rs b/library/std/tests/create_dir_all_bare.rs new file mode 100644 index 000000000..fe789323f --- /dev/null +++ b/library/std/tests/create_dir_all_bare.rs @@ -0,0 +1,39 @@ +#![cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx"))))] + +//! Note that this test changes the current directory so +//! should not be in the same process as other tests. +use std::env; +use std::fs; +use std::path::{Path, PathBuf}; + +mod common; + +// On some platforms, setting the current directory will prevent deleting it. +// So this helper ensures the current directory is reset. +struct CurrentDir(PathBuf); +impl CurrentDir { + fn new() -> Self { + Self(env::current_dir().unwrap()) + } + fn set(&self, path: &Path) { + env::set_current_dir(path).unwrap(); + } + fn with(path: &Path, f: impl FnOnce()) { + let current_dir = Self::new(); + current_dir.set(path); + f(); + } +} +impl Drop for CurrentDir { + fn drop(&mut self) { + env::set_current_dir(&self.0).unwrap(); + } +} + +#[test] +fn create_dir_all_bare() { + let tmpdir = common::tmpdir(); + CurrentDir::with(tmpdir.path(), || { + fs::create_dir_all("create-dir-all-bare").unwrap(); + }); +} diff --git a/library/std/tests/env.rs b/library/std/tests/env.rs index aae2c49d8..96b4f372b 100644 --- a/library/std/tests/env.rs +++ b/library/std/tests/env.rs @@ -3,18 +3,8 @@ use std::ffi::{OsStr, OsString}; use rand::distributions::{Alphanumeric, DistString}; -/// Copied from `std::test_helpers::test_rng`, since these tests rely on the -/// seed not being the same for every RNG invocation too. -#[track_caller] -pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng { - use core::hash::{BuildHasher, Hash, Hasher}; - let mut hasher = std::collections::hash_map::RandomState::new().build_hasher(); - core::panic::Location::caller().hash(&mut hasher); - let hc64 = hasher.finish(); - let seed_vec = hc64.to_le_bytes().into_iter().chain(0u8..8).collect::<Vec<u8>>(); - let seed: [u8; 16] = seed_vec.as_slice().try_into().unwrap(); - rand::SeedableRng::from_seed(seed) -} +mod common; +use common::test_rng; #[track_caller] fn make_rand_name() -> OsString { diff --git a/library/std/tests/run-time-detect.rs b/library/std/tests/run-time-detect.rs index 1a2c12556..bf3c81fcc 100644 --- a/library/std/tests/run-time-detect.rs +++ b/library/std/tests/run-time-detect.rs @@ -120,16 +120,13 @@ fn x86_all() { println!("avx512dq: {:?}", is_x86_feature_detected!("avx512dq")); println!("avx512er: {:?}", is_x86_feature_detected!("avx512er")); println!("avx512f: {:?}", is_x86_feature_detected!("avx512f")); - println!("avx512gfni: {:?}", is_x86_feature_detected!("avx512gfni")); println!("avx512ifma: {:?}", is_x86_feature_detected!("avx512ifma")); println!("avx512pf: {:?}", is_x86_feature_detected!("avx512pf")); - println!("avx512vaes: {:?}", is_x86_feature_detected!("avx512vaes")); println!("avx512vbmi2: {:?}", is_x86_feature_detected!("avx512vbmi2")); println!("avx512vbmi: {:?}", is_x86_feature_detected!("avx512vbmi")); println!("avx512vl: {:?}", is_x86_feature_detected!("avx512vl")); println!("avx512vnni: {:?}", is_x86_feature_detected!("avx512vnni")); println!("avx512vp2intersect: {:?}", is_x86_feature_detected!("avx512vp2intersect")); - println!("avx512vpclmulqdq: {:?}", is_x86_feature_detected!("avx512vpclmulqdq")); println!("avx512vpopcntdq: {:?}", is_x86_feature_detected!("avx512vpopcntdq")); println!("avx: {:?}", is_x86_feature_detected!("avx")); println!("bmi1: {:?}", is_x86_feature_detected!("bmi1")); @@ -138,6 +135,7 @@ fn x86_all() { println!("f16c: {:?}", is_x86_feature_detected!("f16c")); println!("fma: {:?}", is_x86_feature_detected!("fma")); println!("fxsr: {:?}", is_x86_feature_detected!("fxsr")); + println!("gfni: {:?}", is_x86_feature_detected!("gfni")); println!("lzcnt: {:?}", is_x86_feature_detected!("lzcnt")); //println!("movbe: {:?}", is_x86_feature_detected!("movbe")); // movbe is unsupported as a target feature println!("pclmulqdq: {:?}", is_x86_feature_detected!("pclmulqdq")); @@ -154,6 +152,8 @@ fn x86_all() { println!("sse: {:?}", is_x86_feature_detected!("sse")); println!("ssse3: {:?}", is_x86_feature_detected!("ssse3")); println!("tbm: {:?}", is_x86_feature_detected!("tbm")); + println!("vaes: {:?}", is_x86_feature_detected!("vaes")); + println!("vpclmulqdq: {:?}", is_x86_feature_detected!("vpclmulqdq")); println!("xsave: {:?}", is_x86_feature_detected!("xsave")); println!("xsavec: {:?}", is_x86_feature_detected!("xsavec")); println!("xsaveopt: {:?}", is_x86_feature_detected!("xsaveopt")); diff --git a/library/stdarch/.github/workflows/main.yml b/library/stdarch/.github/workflows/main.yml index fd8713ff8..7d4085334 100644 --- a/library/stdarch/.github/workflows/main.yml +++ b/library/stdarch/.github/workflows/main.yml @@ -80,10 +80,7 @@ jobs: - s390x-unknown-linux-gnu - wasm32-wasi - i586-unknown-linux-gnu - - x86_64-linux-android - - arm-linux-androideabi - mipsel-unknown-linux-musl - - aarch64-linux-android - nvptx64-nvidia-cuda - thumbv6m-none-eabi - thumbv7m-none-eabi @@ -146,18 +143,9 @@ jobs: os: windows-latest - target: i586-unknown-linux-gnu os: ubuntu-latest - - target: x86_64-linux-android - os: ubuntu-latest - disable_assert_instr: 1 - - target: arm-linux-androideabi - os: ubuntu-latest - disable_assert_instr: 1 - target: mipsel-unknown-linux-musl os: ubuntu-latest norun: 1 - - target: aarch64-linux-android - os: ubuntu-latest - disable_assert_instr: 1 - target: nvptx64-nvidia-cuda os: ubuntu-latest - target: thumbv6m-none-eabi diff --git a/library/stdarch/ci/android-install-ndk.sh b/library/stdarch/ci/android-install-ndk.sh deleted file mode 100644 index 944a8389a..000000000 --- a/library/stdarch/ci/android-install-ndk.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env sh -# Copyright 2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license -# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -set -ex - -curl --retry 5 -O \ - https://dl.google.com/android/repository/android-ndk-r15b-linux-x86_64.zip -unzip -q android-ndk-r15b-linux-x86_64.zip - -case "${1}" in - aarch64) - arch=arm64 - ;; - - i686) - arch=x86 - ;; - - *) - arch="${1}" - ;; -esac; - -android-ndk-r15b/build/tools/make_standalone_toolchain.py \ - --unified-headers \ - --install-dir "/android/ndk-${1}" \ - --arch "${arch}" \ - --api 24 - -rm -rf ./android-ndk-r15b-linux-x86_64.zip ./android-ndk-r15b diff --git a/library/stdarch/ci/android-install-sdk.sh b/library/stdarch/ci/android-install-sdk.sh deleted file mode 100644 index 3383dcb7f..000000000 --- a/library/stdarch/ci/android-install-sdk.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env sh -# Copyright 2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license -# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -set -ex - -# Prep the SDK and emulator -# -# Note that the update process requires that we accept a bunch of licenses, and -# we can't just pipe `yes` into it for some reason, so we take the same strategy -# located in https://github.com/appunite/docker by just wrapping it in a script -# which apparently magically accepts the licenses. - -mkdir sdk -curl --retry 5 https://dl.google.com/android/repository/sdk-tools-linux-4333796.zip -O -unzip -d sdk sdk-tools-linux-4333796.zip - -case "$1" in - arm | armv7) - abi=armeabi-v7a - ;; - - aarch64) - abi=arm64-v8a - ;; - - i686) - abi=x86 - ;; - - x86_64) - abi=x86_64 - ;; - - *) - echo "invalid arch: $1" - exit 1 - ;; -esac; - -# --no_https avoids -# javax.net.ssl.SSLHandshakeException: sun.security.validator.ValidatorException: No trusted certificate found -yes | ./sdk/tools/bin/sdkmanager --licenses --no_https -yes | ./sdk/tools/bin/sdkmanager --no_https \ - "emulator" \ - "platform-tools" \ - "platforms;android-24" \ - "system-images;android-24;default;$abi" - -echo "no" | - ./sdk/tools/bin/avdmanager create avd \ - --name "${1}" \ - --package "system-images;android-24;default;$abi" diff --git a/library/stdarch/ci/android-sysimage.sh b/library/stdarch/ci/android-sysimage.sh deleted file mode 100644 index 31a6762cb..000000000 --- a/library/stdarch/ci/android-sysimage.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license -# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -set -ex - -URL=https://dl.google.com/android/repository/sys-img/android - -main() { - local arch="${1}" - local name="${2}" - local dest=/system - local td - td="$(mktemp -d)" - - apt-get install --no-install-recommends e2tools - - pushd "$td" - curl --retry 5 -O "${URL}/${name}" - unzip -q "${name}" - - local system - system=$(find . -name system.img) - mkdir -p $dest/{bin,lib,lib64} - - # Extract android linker and libraries to /system - # This allows android executables to be run directly (or with qemu) - if [ "${arch}" = "x86_64" ] || [ "${arch}" = "arm64" ]; then - e2cp -p "${system}:/bin/linker64" "${dest}/bin/" - e2cp -p "${system}:/lib64/libdl.so" "${dest}/lib64/" - e2cp -p "${system}:/lib64/libc.so" "${dest}/lib64/" - e2cp -p "${system}:/lib64/libm.so" "${dest}/lib64/" - else - e2cp -p "${system}:/bin/linker" "${dest}/bin/" - e2cp -p "${system}:/lib/libdl.so" "${dest}/lib/" - e2cp -p "${system}:/lib/libc.so" "${dest}/lib/" - e2cp -p "${system}:/lib/libm.so" "${dest}/lib/" - fi - - # clean up - apt-get purge --auto-remove -y e2tools - - popd - - rm -rf "${td}" -} - -main "${@}" diff --git a/library/stdarch/ci/docker/aarch64-linux-android/Dockerfile b/library/stdarch/ci/docker/aarch64-linux-android/Dockerfile deleted file mode 100644 index 6cf9b5061..000000000 --- a/library/stdarch/ci/docker/aarch64-linux-android/Dockerfile +++ /dev/null @@ -1,46 +0,0 @@ -FROM ubuntu:22.04 - -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - file \ - make \ - curl \ - ca-certificates \ - python-is-python3 \ - unzip \ - expect \ - openjdk-8-jre \ - libstdc++6-i386-cross \ - libpulse0 \ - gcc \ - libc6-dev - -WORKDIR /android/ -COPY android* /android/ - -ENV ANDROID_ARCH=aarch64 -ENV PATH=$PATH:/android/ndk-$ANDROID_ARCH/bin:/android/sdk/tools:/android/sdk/platform-tools - -RUN sh /android/android-install-ndk.sh $ANDROID_ARCH -RUN sh /android/android-install-sdk.sh $ANDROID_ARCH -RUN mv /root/.android /tmp -RUN chmod 777 -R /tmp/.android -RUN chmod 755 /android/sdk/tools/* /android/sdk/emulator/qemu/linux-x86_64/* - -ENV PATH=$PATH:/rust/bin \ - CARGO_TARGET_AARCH64_LINUX_ANDROID_LINKER=aarch64-linux-android-gcc \ - CARGO_TARGET_AARCH64_LINUX_ANDROID_RUNNER=/tmp/runtest \ - OBJDUMP=aarch64-linux-android-objdump \ - HOME=/tmp - -ADD runtest-android.rs /tmp/runtest.rs -ENTRYPOINT [ \ - "bash", \ - "-c", \ - # set SHELL so android can detect a 64bits system, see - # http://stackoverflow.com/a/41789144 - "SHELL=/bin/dash /android/sdk/emulator/emulator @aarch64 -no-window & \ - rustc /tmp/runtest.rs -o /tmp/runtest && \ - exec \"$@\"", \ - "--" \ -] diff --git a/library/stdarch/ci/docker/arm-linux-androideabi/Dockerfile b/library/stdarch/ci/docker/arm-linux-androideabi/Dockerfile deleted file mode 100644 index fb1a0cecf..000000000 --- a/library/stdarch/ci/docker/arm-linux-androideabi/Dockerfile +++ /dev/null @@ -1,46 +0,0 @@ -FROM ubuntu:22.04 - -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - file \ - make \ - curl \ - ca-certificates \ - python-is-python3 \ - unzip \ - expect \ - openjdk-8-jre \ - libstdc++6-i386-cross \ - libpulse0 \ - gcc \ - libc6-dev - -WORKDIR /android/ -COPY android* /android/ - -ENV ANDROID_ARCH=arm -ENV PATH=$PATH:/android/ndk-$ANDROID_ARCH/bin:/android/sdk/tools:/android/sdk/platform-tools - -RUN sh /android/android-install-ndk.sh $ANDROID_ARCH -RUN sh /android/android-install-sdk.sh $ANDROID_ARCH -RUN mv /root/.android /tmp -RUN chmod 777 -R /tmp/.android -RUN chmod 755 /android/sdk/tools/* /android/sdk/emulator/qemu/linux-x86_64/* - -ENV PATH=$PATH:/rust/bin \ - CARGO_TARGET_ARM_LINUX_ANDROIDEABI_LINKER=arm-linux-androideabi-gcc \ - CARGO_TARGET_ARM_LINUX_ANDROIDEABI_RUNNER=/tmp/runtest \ - OBJDUMP=arm-linux-androideabi-objdump \ - HOME=/tmp - -ADD runtest-android.rs /tmp/runtest.rs -ENTRYPOINT [ \ - "bash", \ - "-c", \ - # set SHELL so android can detect a 64bits system, see - # http://stackoverflow.com/a/41789144 - "SHELL=/bin/dash /android/sdk/emulator/emulator @arm -no-window & \ - rustc /tmp/runtest.rs -o /tmp/runtest && \ - exec \"$@\"", \ - "--" \ -] diff --git a/library/stdarch/ci/docker/x86_64-linux-android/Dockerfile b/library/stdarch/ci/docker/x86_64-linux-android/Dockerfile deleted file mode 100644 index 82119be74..000000000 --- a/library/stdarch/ci/docker/x86_64-linux-android/Dockerfile +++ /dev/null @@ -1,29 +0,0 @@ -FROM ubuntu:22.04 - -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - ca-certificates \ - curl \ - gcc \ - libc-dev \ - python-is-python3 \ - unzip \ - file \ - make - -WORKDIR /android/ -ENV ANDROID_ARCH=x86_64 -COPY android-install-ndk.sh /android/ -RUN sh /android/android-install-ndk.sh $ANDROID_ARCH - -# We do not run x86_64-linux-android tests on an android emulator. -# See ci/android-sysimage.sh for information about how tests are run. -COPY android-sysimage.sh /android/ -RUN bash /android/android-sysimage.sh x86_64 x86_64-24_r07.zip - -ENV PATH=$PATH:/rust/bin:/android/ndk-$ANDROID_ARCH/bin \ - CARGO_TARGET_X86_64_LINUX_ANDROID_LINKER=x86_64-linux-android-gcc \ - CC_x86_64_linux_android=x86_64-linux-android-gcc \ - CXX_x86_64_linux_android=x86_64-linux-android-g++ \ - OBJDUMP=x86_64-linux-android-objdump \ - HOME=/tmp diff --git a/library/stdarch/ci/runtest-android.rs b/library/stdarch/ci/runtest-android.rs deleted file mode 100644 index ed1cd80c8..000000000 --- a/library/stdarch/ci/runtest-android.rs +++ /dev/null @@ -1,45 +0,0 @@ -use std::env; -use std::process::Command; -use std::path::{Path, PathBuf}; - -fn main() { - let args = env::args_os() - .skip(1) - .filter(|arg| arg != "--quiet") - .collect::<Vec<_>>(); - assert_eq!(args.len(), 1); - let test = PathBuf::from(&args[0]); - let dst = Path::new("/data/local/tmp").join(test.file_name().unwrap()); - - let status = Command::new("adb") - .arg("wait-for-device") - .status() - .expect("failed to run: adb wait-for-device"); - assert!(status.success()); - - let status = Command::new("adb") - .arg("push") - .arg(&test) - .arg(&dst) - .status() - .expect("failed to run: adb pushr"); - assert!(status.success()); - - let output = Command::new("adb") - .arg("shell") - .arg(&dst) - .output() - .expect("failed to run: adb shell"); - assert!(status.success()); - - println!("status: {}\nstdout ---\n{}\nstderr ---\n{}", - output.status, - String::from_utf8_lossy(&output.stdout), - String::from_utf8_lossy(&output.stderr)); - - let stdout = String::from_utf8_lossy(&output.stdout); - let mut lines = stdout.lines().filter(|l| l.starts_with("test result")); - if !lines.all(|l| l.contains("test result: ok") && l.contains("0 failed")) { - panic!("failed to find successful test run"); - } -} diff --git a/library/stdarch/crates/assert-instr-macro/Cargo.toml b/library/stdarch/crates/assert-instr-macro/Cargo.toml index 3d9b32067..4ad654e69 100644 --- a/library/stdarch/crates/assert-instr-macro/Cargo.toml +++ b/library/stdarch/crates/assert-instr-macro/Cargo.toml @@ -2,7 +2,7 @@ name = "assert-instr-macro" version = "0.1.0" authors = ["Alex Crichton <alex@alexcrichton.com>"] -edition = "2018" +edition = "2021" [lib] proc-macro = true diff --git a/library/stdarch/crates/assert-instr-macro/src/lib.rs b/library/stdarch/crates/assert-instr-macro/src/lib.rs index 9fa411df3..99e37c910 100644 --- a/library/stdarch/crates/assert-instr-macro/src/lib.rs +++ b/library/stdarch/crates/assert-instr-macro/src/lib.rs @@ -56,14 +56,14 @@ pub fn assert_instr( .replace('/', "_") .replace(':', "_") .replace(char::is_whitespace, ""); - let assert_name = syn::Ident::new(&format!("assert_{}_{}", name, instr_str), name.span()); + let assert_name = syn::Ident::new(&format!("assert_{name}_{instr_str}"), name.span()); // These name has to be unique enough for us to find it in the disassembly later on: let shim_name = syn::Ident::new( - &format!("stdarch_test_shim_{}_{}", name, instr_str), + &format!("stdarch_test_shim_{name}_{instr_str}"), name.span(), ); let shim_name_ptr = syn::Ident::new( - &format!("stdarch_test_shim_{}_{}_ptr", name, instr_str).to_ascii_uppercase(), + &format!("stdarch_test_shim_{name}_{instr_str}_ptr").to_ascii_uppercase(), name.span(), ); let mut inputs = Vec::new(); @@ -131,7 +131,7 @@ pub fn assert_instr( } else { syn::LitStr::new("C", proc_macro2::Span::call_site()) }; - let shim_name_str = format!("{}{}", shim_name, assert_name); + let shim_name_str = format!("{shim_name}{assert_name}"); let to_test = if disable_dedup_guard { quote! { #attrs diff --git a/library/stdarch/crates/core_arch/Cargo.toml b/library/stdarch/crates/core_arch/Cargo.toml index e2b332af2..a1bb168ee 100644 --- a/library/stdarch/crates/core_arch/Cargo.toml +++ b/library/stdarch/crates/core_arch/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" keywords = ["core", "simd", "arch", "intrinsics"] categories = ["hardware-support", "no-std"] license = "MIT OR Apache-2.0" -edition = "2018" +edition = "2021" [badges] is-it-maintained-issue-resolution = { repository = "rust-lang/stdarch" } diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs index 9d9946b4f..7ff26ac21 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs @@ -162,7 +162,7 @@ extern "unadjusted" { #[link_name = "llvm.aarch64.neon.smaxv.i8.v8i8"] fn vmaxv_s8_(a: int8x8_t) -> i8; - #[link_name = "llvm.aarch64.neon.smaxv.i8.6i8"] + #[link_name = "llvm.aarch64.neon.smaxv.i8.v16i8"] fn vmaxvq_s8_(a: int8x16_t) -> i8; #[link_name = "llvm.aarch64.neon.smaxv.i16.v4i16"] fn vmaxv_s16_(a: int16x4_t) -> i16; @@ -175,7 +175,7 @@ extern "unadjusted" { #[link_name = "llvm.aarch64.neon.umaxv.i8.v8i8"] fn vmaxv_u8_(a: uint8x8_t) -> u8; - #[link_name = "llvm.aarch64.neon.umaxv.i8.6i8"] + #[link_name = "llvm.aarch64.neon.umaxv.i8.v16i8"] fn vmaxvq_u8_(a: uint8x16_t) -> u8; #[link_name = "llvm.aarch64.neon.umaxv.i16.v4i16"] fn vmaxv_u16_(a: uint16x4_t) -> u16; @@ -195,7 +195,7 @@ extern "unadjusted" { #[link_name = "llvm.aarch64.neon.sminv.i8.v8i8"] fn vminv_s8_(a: int8x8_t) -> i8; - #[link_name = "llvm.aarch64.neon.sminv.i8.6i8"] + #[link_name = "llvm.aarch64.neon.sminv.i8.v16i8"] fn vminvq_s8_(a: int8x16_t) -> i8; #[link_name = "llvm.aarch64.neon.sminv.i16.v4i16"] fn vminv_s16_(a: int16x4_t) -> i16; @@ -208,7 +208,7 @@ extern "unadjusted" { #[link_name = "llvm.aarch64.neon.uminv.i8.v8i8"] fn vminv_u8_(a: uint8x8_t) -> u8; - #[link_name = "llvm.aarch64.neon.uminv.i8.6i8"] + #[link_name = "llvm.aarch64.neon.uminv.i8.v16i8"] fn vminvq_u8_(a: uint8x16_t) -> u8; #[link_name = "llvm.aarch64.neon.uminv.i16.v4i16"] fn vminv_u16_(a: uint16x4_t) -> u16; @@ -1964,94 +1964,6 @@ pub unsafe fn vext_f64<const N: i32>(a: float64x1_t, _b: float64x1_t) -> float64 static_assert!(N : i32 where N == 0); a } -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_s8(low: int8x8_t, high: int8x8_t) -> int8x16_t { - simd_shuffle16!( - low, - high, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], - ) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_s16(low: int16x4_t, high: int16x4_t) -> int16x8_t { - simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_s32(low: int32x2_t, high: int32x2_t) -> int32x4_t { - simd_shuffle4!(low, high, [0, 1, 2, 3]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_s64(low: int64x1_t, high: int64x1_t) -> int64x2_t { - simd_shuffle2!(low, high, [0, 1]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_u8(low: uint8x8_t, high: uint8x8_t) -> uint8x16_t { - simd_shuffle16!( - low, - high, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], - ) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_u16(low: uint16x4_t, high: uint16x4_t) -> uint16x8_t { - simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_u32(low: uint32x2_t, high: uint32x2_t) -> uint32x4_t { - simd_shuffle4!(low, high, [0, 1, 2, 3]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_u64(low: uint64x1_t, high: uint64x1_t) -> uint64x2_t { - simd_shuffle2!(low, high, [0, 1]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_p64(low: poly64x1_t, high: poly64x1_t) -> poly64x2_t { - simd_shuffle2!(low, high, [0, 1]) -} /// Duplicate vector element to vector or scalar #[inline] @@ -2183,47 +2095,6 @@ pub unsafe fn vgetq_lane_f64<const IMM5: i32>(v: float64x2_t) -> f64 { simd_extract(v, IMM5 as u32) } -/* FIXME: 16-bit float -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -pub unsafe fn vcombine_f16 ( low: float16x4_t, high: float16x4_t) -> float16x8_t { - simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) -} -*/ - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_f32(low: float32x2_t, high: float32x2_t) -> float32x4_t { - simd_shuffle4!(low, high, [0, 1, 2, 3]) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_p8(low: poly8x8_t, high: poly8x8_t) -> poly8x16_t { - simd_shuffle16!( - low, - high, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], - ) -} - -/// Vector combine -#[inline] -#[target_feature(enable = "neon")] -#[cfg_attr(test, assert_instr(mov))] -#[stable(feature = "neon_intrinsics", since = "1.59.0")] -pub unsafe fn vcombine_p16(low: poly16x4_t, high: poly16x4_t) -> poly16x8_t { - simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) -} - /// Vector combine #[inline] #[target_feature(enable = "neon")] @@ -4478,43 +4349,6 @@ mod tests { assert_eq!(r, e); } - macro_rules! test_vcombine { - ($test_id:ident => $fn_id:ident ([$($a:expr),*], [$($b:expr),*])) => { - #[allow(unused_assignments)] - #[simd_test(enable = "neon")] - unsafe fn $test_id() { - let a = [$($a),*]; - let b = [$($b),*]; - let e = [$($a),* $(, $b)*]; - let c = $fn_id(transmute(a), transmute(b)); - let mut d = e; - d = transmute(c); - assert_eq!(d, e); - } - } - } - - test_vcombine!(test_vcombine_s8 => vcombine_s8([3_i8, -4, 5, -6, 7, 8, 9, 10], [13_i8, -14, 15, -16, 17, 18, 19, 110])); - test_vcombine!(test_vcombine_u8 => vcombine_u8([3_u8, 4, 5, 6, 7, 8, 9, 10], [13_u8, 14, 15, 16, 17, 18, 19, 110])); - test_vcombine!(test_vcombine_p8 => vcombine_p8([3_u8, 4, 5, 6, 7, 8, 9, 10], [13_u8, 14, 15, 16, 17, 18, 19, 110])); - - test_vcombine!(test_vcombine_s16 => vcombine_s16([3_i16, -4, 5, -6], [13_i16, -14, 15, -16])); - test_vcombine!(test_vcombine_u16 => vcombine_u16([3_u16, 4, 5, 6], [13_u16, 14, 15, 16])); - test_vcombine!(test_vcombine_p16 => vcombine_p16([3_u16, 4, 5, 6], [13_u16, 14, 15, 16])); - // FIXME: 16-bit floats - // test_vcombine!(test_vcombine_f16 => vcombine_f16([3_f16, 4., 5., 6.], - // [13_f16, 14., 15., 16.])); - - test_vcombine!(test_vcombine_s32 => vcombine_s32([3_i32, -4], [13_i32, -14])); - test_vcombine!(test_vcombine_u32 => vcombine_u32([3_u32, 4], [13_u32, 14])); - // note: poly32x4 does not exist, and neither does vcombine_p32 - test_vcombine!(test_vcombine_f32 => vcombine_f32([3_f32, -4.], [13_f32, -14.])); - - test_vcombine!(test_vcombine_s64 => vcombine_s64([-3_i64], [13_i64])); - test_vcombine!(test_vcombine_u64 => vcombine_u64([3_u64], [13_u64])); - test_vcombine!(test_vcombine_p64 => vcombine_p64([3_u64], [13_u64])); - test_vcombine!(test_vcombine_f64 => vcombine_f64([-3_f64], [13_f64])); - #[simd_test(enable = "neon")] unsafe fn test_vdup_n_f64() { let a: f64 = 3.3; diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs index ac2709744..fe473c51e 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs @@ -17,7 +17,7 @@ use stdarch_test::assert_instr; #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_and(a, b) } @@ -30,7 +30,7 @@ pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_and(a, b) } @@ -43,7 +43,7 @@ pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_and(a, b) } @@ -56,7 +56,7 @@ pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_and(a, b) } @@ -69,7 +69,7 @@ pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_and(a, b) } @@ -82,7 +82,7 @@ pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_and(a, b) } @@ -95,7 +95,7 @@ pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_and(a, b) } @@ -108,7 +108,7 @@ pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_and(a, b) } @@ -121,7 +121,7 @@ pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_and(a, b) } @@ -134,7 +134,7 @@ pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_and(a, b) } @@ -147,7 +147,7 @@ pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_and(a, b) } @@ -160,7 +160,7 @@ pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_and(a, b) } @@ -173,7 +173,7 @@ pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_and(a, b) } @@ -186,7 +186,7 @@ pub unsafe fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_and(a, b) } @@ -199,7 +199,7 @@ pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_and(a, b) } @@ -212,7 +212,7 @@ pub unsafe fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_and(a, b) } @@ -225,7 +225,7 @@ pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_or(a, b) } @@ -238,7 +238,7 @@ pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_or(a, b) } @@ -251,7 +251,7 @@ pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_or(a, b) } @@ -264,7 +264,7 @@ pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_or(a, b) } @@ -277,7 +277,7 @@ pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_or(a, b) } @@ -290,7 +290,7 @@ pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_or(a, b) } @@ -303,7 +303,7 @@ pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_or(a, b) } @@ -316,7 +316,7 @@ pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_or(a, b) } @@ -329,7 +329,7 @@ pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_or(a, b) } @@ -342,7 +342,7 @@ pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_or(a, b) } @@ -355,7 +355,7 @@ pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_or(a, b) } @@ -368,7 +368,7 @@ pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_or(a, b) } @@ -381,7 +381,7 @@ pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_or(a, b) } @@ -394,7 +394,7 @@ pub unsafe fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_or(a, b) } @@ -407,7 +407,7 @@ pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_or(a, b) } @@ -420,7 +420,7 @@ pub unsafe fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_or(a, b) } @@ -433,7 +433,7 @@ pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_xor(a, b) } @@ -446,7 +446,7 @@ pub unsafe fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_xor(a, b) } @@ -459,7 +459,7 @@ pub unsafe fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_xor(a, b) } @@ -472,7 +472,7 @@ pub unsafe fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_xor(a, b) } @@ -485,7 +485,7 @@ pub unsafe fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_xor(a, b) } @@ -498,7 +498,7 @@ pub unsafe fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_xor(a, b) } @@ -511,7 +511,7 @@ pub unsafe fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_xor(a, b) } @@ -524,7 +524,7 @@ pub unsafe fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_xor(a, b) } @@ -537,7 +537,7 @@ pub unsafe fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_xor(a, b) } @@ -550,7 +550,7 @@ pub unsafe fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_xor(a, b) } @@ -563,7 +563,7 @@ pub unsafe fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_xor(a, b) } @@ -576,7 +576,7 @@ pub unsafe fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_xor(a, b) } @@ -589,7 +589,7 @@ pub unsafe fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_xor(a, b) } @@ -602,7 +602,7 @@ pub unsafe fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_xor(a, b) } @@ -615,7 +615,7 @@ pub unsafe fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_xor(a, b) } @@ -628,7 +628,7 @@ pub unsafe fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(eor))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_xor(a, b) } @@ -641,7 +641,7 @@ pub unsafe fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -660,7 +660,7 @@ vabd_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -679,7 +679,7 @@ vabdq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -698,7 +698,7 @@ vabd_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -717,7 +717,7 @@ vabdq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -736,7 +736,7 @@ vabd_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -755,7 +755,7 @@ vabdq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -774,7 +774,7 @@ vabd_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -793,7 +793,7 @@ vabdq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -812,7 +812,7 @@ vabd_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -831,7 +831,7 @@ vabdq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -850,7 +850,7 @@ vabd_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -869,7 +869,7 @@ vabdq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -888,7 +888,7 @@ vabd_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -907,7 +907,7 @@ vabdq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabdl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { simd_cast(vabd_u8(a, b)) } @@ -920,7 +920,7 @@ pub unsafe fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabdl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { simd_cast(vabd_u16(a, b)) } @@ -933,7 +933,7 @@ pub unsafe fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabdl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { simd_cast(vabd_u32(a, b)) } @@ -946,7 +946,7 @@ pub unsafe fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabdl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { let c: uint8x8_t = simd_cast(vabd_s8(a, b)); simd_cast(c) @@ -960,7 +960,7 @@ pub unsafe fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabdl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { let c: uint16x4_t = simd_cast(vabd_s16(a, b)); simd_cast(c) @@ -974,7 +974,7 @@ pub unsafe fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabdl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { let c: uint32x2_t = simd_cast(vabd_s32(a, b)); simd_cast(c) @@ -988,7 +988,7 @@ pub unsafe fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_eq(a, b) } @@ -1001,7 +1001,7 @@ pub unsafe fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_eq(a, b) } @@ -1014,7 +1014,7 @@ pub unsafe fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_eq(a, b) } @@ -1027,7 +1027,7 @@ pub unsafe fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_eq(a, b) } @@ -1040,7 +1040,7 @@ pub unsafe fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_eq(a, b) } @@ -1053,7 +1053,7 @@ pub unsafe fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_eq(a, b) } @@ -1066,7 +1066,7 @@ pub unsafe fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_eq(a, b) } @@ -1079,7 +1079,7 @@ pub unsafe fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_eq(a, b) } @@ -1092,7 +1092,7 @@ pub unsafe fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_eq(a, b) } @@ -1105,7 +1105,7 @@ pub unsafe fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_eq(a, b) } @@ -1118,7 +1118,7 @@ pub unsafe fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_eq(a, b) } @@ -1131,7 +1131,7 @@ pub unsafe fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_eq(a, b) } @@ -1144,7 +1144,7 @@ pub unsafe fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { simd_eq(a, b) } @@ -1157,7 +1157,7 @@ pub unsafe fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { simd_eq(a, b) } @@ -1170,7 +1170,7 @@ pub unsafe fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_eq(a, b) } @@ -1183,7 +1183,7 @@ pub unsafe fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmeq))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_eq(a, b) } @@ -1196,7 +1196,7 @@ pub unsafe fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { let c: int8x8_t = simd_and(a, b); let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1211,7 +1211,7 @@ pub unsafe fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { let c: int8x16_t = simd_and(a, b); let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); @@ -1226,7 +1226,7 @@ pub unsafe fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { let c: int16x4_t = simd_and(a, b); let d: i16x4 = i16x4::new(0, 0, 0, 0); @@ -1241,7 +1241,7 @@ pub unsafe fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { let c: int16x8_t = simd_and(a, b); let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1256,7 +1256,7 @@ pub unsafe fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { let c: int32x2_t = simd_and(a, b); let d: i32x2 = i32x2::new(0, 0); @@ -1271,7 +1271,7 @@ pub unsafe fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { let c: int32x4_t = simd_and(a, b); let d: i32x4 = i32x4::new(0, 0, 0, 0); @@ -1286,7 +1286,7 @@ pub unsafe fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { let c: poly8x8_t = simd_and(a, b); let d: i8x8 = i8x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1301,7 +1301,7 @@ pub unsafe fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { let c: poly8x16_t = simd_and(a, b); let d: i8x16 = i8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); @@ -1316,7 +1316,7 @@ pub unsafe fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { let c: poly16x4_t = simd_and(a, b); let d: i16x4 = i16x4::new(0, 0, 0, 0); @@ -1331,7 +1331,7 @@ pub unsafe fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { let c: poly16x8_t = simd_and(a, b); let d: i16x8 = i16x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1346,7 +1346,7 @@ pub unsafe fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { let c: uint8x8_t = simd_and(a, b); let d: u8x8 = u8x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1361,7 +1361,7 @@ pub unsafe fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { let c: uint8x16_t = simd_and(a, b); let d: u8x16 = u8x16::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); @@ -1376,7 +1376,7 @@ pub unsafe fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { let c: uint16x4_t = simd_and(a, b); let d: u16x4 = u16x4::new(0, 0, 0, 0); @@ -1391,7 +1391,7 @@ pub unsafe fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { let c: uint16x8_t = simd_and(a, b); let d: u16x8 = u16x8::new(0, 0, 0, 0, 0, 0, 0, 0); @@ -1406,7 +1406,7 @@ pub unsafe fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { let c: uint32x2_t = simd_and(a, b); let d: u32x2 = u32x2::new(0, 0); @@ -1421,7 +1421,7 @@ pub unsafe fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmtst))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { let c: uint32x4_t = simd_and(a, b); let d: u32x4 = u32x4::new(0, 0, 0, 0); @@ -1436,7 +1436,7 @@ pub unsafe fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t { simd_fabs(a) } @@ -1449,7 +1449,7 @@ pub unsafe fn vabs_f32(a: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fabs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t { simd_fabs(a) } @@ -1462,7 +1462,7 @@ pub unsafe fn vabsq_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_gt(a, b) } @@ -1475,7 +1475,7 @@ pub unsafe fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_gt(a, b) } @@ -1488,7 +1488,7 @@ pub unsafe fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_gt(a, b) } @@ -1501,7 +1501,7 @@ pub unsafe fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_gt(a, b) } @@ -1514,7 +1514,7 @@ pub unsafe fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_gt(a, b) } @@ -1527,7 +1527,7 @@ pub unsafe fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_gt(a, b) } @@ -1540,7 +1540,7 @@ pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_gt(a, b) } @@ -1553,7 +1553,7 @@ pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_gt(a, b) } @@ -1566,7 +1566,7 @@ pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_gt(a, b) } @@ -1579,7 +1579,7 @@ pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_gt(a, b) } @@ -1592,7 +1592,7 @@ pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_gt(a, b) } @@ -1605,7 +1605,7 @@ pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_gt(a, b) } @@ -1618,7 +1618,7 @@ pub unsafe fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_gt(a, b) } @@ -1631,7 +1631,7 @@ pub unsafe fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_gt(a, b) } @@ -1644,7 +1644,7 @@ pub unsafe fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_lt(a, b) } @@ -1657,7 +1657,7 @@ pub unsafe fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_lt(a, b) } @@ -1670,7 +1670,7 @@ pub unsafe fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_lt(a, b) } @@ -1683,7 +1683,7 @@ pub unsafe fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_lt(a, b) } @@ -1696,7 +1696,7 @@ pub unsafe fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_lt(a, b) } @@ -1709,7 +1709,7 @@ pub unsafe fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_lt(a, b) } @@ -1722,7 +1722,7 @@ pub unsafe fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_lt(a, b) } @@ -1735,7 +1735,7 @@ pub unsafe fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_lt(a, b) } @@ -1748,7 +1748,7 @@ pub unsafe fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_lt(a, b) } @@ -1761,7 +1761,7 @@ pub unsafe fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_lt(a, b) } @@ -1774,7 +1774,7 @@ pub unsafe fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_lt(a, b) } @@ -1787,7 +1787,7 @@ pub unsafe fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhi))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_lt(a, b) } @@ -1800,7 +1800,7 @@ pub unsafe fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_lt(a, b) } @@ -1813,7 +1813,7 @@ pub unsafe fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_lt(a, b) } @@ -1826,7 +1826,7 @@ pub unsafe fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_le(a, b) } @@ -1839,7 +1839,7 @@ pub unsafe fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_le(a, b) } @@ -1852,7 +1852,7 @@ pub unsafe fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_le(a, b) } @@ -1865,7 +1865,7 @@ pub unsafe fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_le(a, b) } @@ -1878,7 +1878,7 @@ pub unsafe fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_le(a, b) } @@ -1891,7 +1891,7 @@ pub unsafe fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_le(a, b) } @@ -1904,7 +1904,7 @@ pub unsafe fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_le(a, b) } @@ -1917,7 +1917,7 @@ pub unsafe fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_le(a, b) } @@ -1930,7 +1930,7 @@ pub unsafe fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_le(a, b) } @@ -1943,7 +1943,7 @@ pub unsafe fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_le(a, b) } @@ -1956,7 +1956,7 @@ pub unsafe fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_le(a, b) } @@ -1969,7 +1969,7 @@ pub unsafe fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_le(a, b) } @@ -1982,7 +1982,7 @@ pub unsafe fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_le(a, b) } @@ -1995,7 +1995,7 @@ pub unsafe fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_le(a, b) } @@ -2008,7 +2008,7 @@ pub unsafe fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { simd_ge(a, b) } @@ -2021,7 +2021,7 @@ pub unsafe fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { simd_ge(a, b) } @@ -2034,7 +2034,7 @@ pub unsafe fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { simd_ge(a, b) } @@ -2047,7 +2047,7 @@ pub unsafe fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { simd_ge(a, b) } @@ -2060,7 +2060,7 @@ pub unsafe fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { simd_ge(a, b) } @@ -2073,7 +2073,7 @@ pub unsafe fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_ge(a, b) } @@ -2086,7 +2086,7 @@ pub unsafe fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_ge(a, b) } @@ -2099,7 +2099,7 @@ pub unsafe fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_ge(a, b) } @@ -2112,7 +2112,7 @@ pub unsafe fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_ge(a, b) } @@ -2125,7 +2125,7 @@ pub unsafe fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_ge(a, b) } @@ -2138,7 +2138,7 @@ pub unsafe fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_ge(a, b) } @@ -2151,7 +2151,7 @@ pub unsafe fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cmhs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_ge(a, b) } @@ -2164,7 +2164,7 @@ pub unsafe fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { simd_ge(a, b) } @@ -2177,7 +2177,7 @@ pub unsafe fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcmge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { simd_ge(a, b) } @@ -2190,7 +2190,7 @@ pub unsafe fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcls_s8(a: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2209,7 +2209,7 @@ vcls_s8_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclsq_s8(a: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2228,7 +2228,7 @@ vclsq_s8_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcls_s16(a: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2247,7 +2247,7 @@ vcls_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclsq_s16(a: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2266,7 +2266,7 @@ vclsq_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcls_s32(a: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2285,7 +2285,7 @@ vcls_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclsq_s32(a: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2304,7 +2304,7 @@ vclsq_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { transmute(vcls_s8(transmute(a))) } @@ -2317,7 +2317,7 @@ pub unsafe fn vcls_u8(a: uint8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { transmute(vclsq_s8(transmute(a))) } @@ -2330,7 +2330,7 @@ pub unsafe fn vclsq_u8(a: uint8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { transmute(vcls_s16(transmute(a))) } @@ -2343,7 +2343,7 @@ pub unsafe fn vcls_u16(a: uint16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { transmute(vclsq_s16(transmute(a))) } @@ -2356,7 +2356,7 @@ pub unsafe fn vclsq_u16(a: uint16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { transmute(vcls_s32(transmute(a))) } @@ -2369,7 +2369,7 @@ pub unsafe fn vcls_u32(a: uint32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(cls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { transmute(vclsq_s32(transmute(a))) } @@ -2382,7 +2382,7 @@ pub unsafe fn vclsq_u32(a: uint32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { vclz_s8_(a) } @@ -2395,7 +2395,7 @@ pub unsafe fn vclz_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { vclzq_s8_(a) } @@ -2408,7 +2408,7 @@ pub unsafe fn vclzq_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { vclz_s16_(a) } @@ -2421,7 +2421,7 @@ pub unsafe fn vclz_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { vclzq_s16_(a) } @@ -2434,7 +2434,7 @@ pub unsafe fn vclzq_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { vclz_s32_(a) } @@ -2447,7 +2447,7 @@ pub unsafe fn vclz_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { vclzq_s32_(a) } @@ -2460,7 +2460,7 @@ pub unsafe fn vclzq_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { transmute(vclz_s8_(transmute(a))) } @@ -2473,7 +2473,7 @@ pub unsafe fn vclz_u8(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { transmute(vclzq_s8_(transmute(a))) } @@ -2486,7 +2486,7 @@ pub unsafe fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { transmute(vclz_s16_(transmute(a))) } @@ -2499,7 +2499,7 @@ pub unsafe fn vclz_u16(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { transmute(vclzq_s16_(transmute(a))) } @@ -2512,7 +2512,7 @@ pub unsafe fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { transmute(vclz_s32_(transmute(a))) } @@ -2525,7 +2525,7 @@ pub unsafe fn vclz_u32(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(clz))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { transmute(vclzq_s32_(transmute(a))) } @@ -2538,7 +2538,7 @@ pub unsafe fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2557,7 +2557,7 @@ vcagt_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2576,7 +2576,7 @@ vcagtq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2595,7 +2595,7 @@ vcage_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -2614,7 +2614,7 @@ vcageq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { vcagt_f32(b, a) } @@ -2627,7 +2627,7 @@ pub unsafe fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facgt))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { vcagtq_f32(b, a) } @@ -2640,7 +2640,7 @@ pub unsafe fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { vcage_f32(b, a) } @@ -2653,7 +2653,7 @@ pub unsafe fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(facge))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { vcageq_f32(b, a) } @@ -2666,7 +2666,7 @@ pub unsafe fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_s8(a: u64) -> int8x8_t { transmute(a) } @@ -2679,7 +2679,7 @@ pub unsafe fn vcreate_s8(a: u64) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_s16(a: u64) -> int16x4_t { transmute(a) } @@ -2692,7 +2692,7 @@ pub unsafe fn vcreate_s16(a: u64) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_s32(a: u64) -> int32x2_t { transmute(a) } @@ -2705,7 +2705,7 @@ pub unsafe fn vcreate_s32(a: u64) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_s64(a: u64) -> int64x1_t { transmute(a) } @@ -2718,7 +2718,7 @@ pub unsafe fn vcreate_s64(a: u64) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t { transmute(a) } @@ -2731,7 +2731,7 @@ pub unsafe fn vcreate_u8(a: u64) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t { transmute(a) } @@ -2744,7 +2744,7 @@ pub unsafe fn vcreate_u16(a: u64) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t { transmute(a) } @@ -2757,7 +2757,7 @@ pub unsafe fn vcreate_u32(a: u64) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_u64(a: u64) -> uint64x1_t { transmute(a) } @@ -2770,7 +2770,7 @@ pub unsafe fn vcreate_u64(a: u64) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { transmute(a) } @@ -2783,7 +2783,7 @@ pub unsafe fn vcreate_p8(a: u64) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { transmute(a) } @@ -2796,7 +2796,7 @@ pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_p64(a: u64) -> poly64x1_t { transmute(a) } @@ -2809,7 +2809,7 @@ pub unsafe fn vcreate_p64(a: u64) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { transmute(a) } @@ -2822,7 +2822,7 @@ pub unsafe fn vcreate_f32(a: u64) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(scvtf))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { simd_cast(a) } @@ -2835,7 +2835,7 @@ pub unsafe fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(scvtf))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { simd_cast(a) } @@ -2848,7 +2848,7 @@ pub unsafe fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ucvtf))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { simd_cast(a) } @@ -2861,7 +2861,7 @@ pub unsafe fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ucvtf))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { simd_cast(a) } @@ -3170,7 +3170,7 @@ vcvtq_n_u32_f32_(a, N) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3189,7 +3189,7 @@ vcvt_s32_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3208,7 +3208,7 @@ vcvtq_s32_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzu))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3227,7 +3227,7 @@ vcvt_u32_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fcvtzu))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -3247,7 +3247,7 @@ vcvtq_u32_f32_(a) #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_s8<const N: i32>(a: int8x8_t) -> int8x8_t { static_assert_imm3!(N); simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3262,7 +3262,7 @@ pub unsafe fn vdup_lane_s8<const N: i32>(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_s8<const N: i32>(a: int8x16_t) -> int8x16_t { static_assert_imm4!(N); simd_shuffle16!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3277,7 +3277,7 @@ pub unsafe fn vdupq_laneq_s8<const N: i32>(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_s16<const N: i32>(a: int16x4_t) -> int16x4_t { static_assert_imm2!(N); simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32]) @@ -3292,7 +3292,7 @@ pub unsafe fn vdup_lane_s16<const N: i32>(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_s16<const N: i32>(a: int16x8_t) -> int16x8_t { static_assert_imm3!(N); simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3307,7 +3307,7 @@ pub unsafe fn vdupq_laneq_s16<const N: i32>(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_s32<const N: i32>(a: int32x2_t) -> int32x2_t { static_assert_imm1!(N); simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32]) @@ -3322,7 +3322,7 @@ pub unsafe fn vdup_lane_s32<const N: i32>(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_s32<const N: i32>(a: int32x4_t) -> int32x4_t { static_assert_imm2!(N); simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32]) @@ -3337,7 +3337,7 @@ pub unsafe fn vdupq_laneq_s32<const N: i32>(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_s8<const N: i32>(a: int8x16_t) -> int8x8_t { static_assert_imm4!(N); simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3352,7 +3352,7 @@ pub unsafe fn vdup_laneq_s8<const N: i32>(a: int8x16_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_s16<const N: i32>(a: int16x8_t) -> int16x4_t { static_assert_imm3!(N); simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32]) @@ -3367,7 +3367,7 @@ pub unsafe fn vdup_laneq_s16<const N: i32>(a: int16x8_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_s32<const N: i32>(a: int32x4_t) -> int32x2_t { static_assert_imm2!(N); simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32]) @@ -3382,7 +3382,7 @@ pub unsafe fn vdup_laneq_s32<const N: i32>(a: int32x4_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_s8<const N: i32>(a: int8x8_t) -> int8x16_t { static_assert_imm3!(N); simd_shuffle16!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3397,7 +3397,7 @@ pub unsafe fn vdupq_lane_s8<const N: i32>(a: int8x8_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_s16<const N: i32>(a: int16x4_t) -> int16x8_t { static_assert_imm2!(N); simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3412,7 +3412,7 @@ pub unsafe fn vdupq_lane_s16<const N: i32>(a: int16x4_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_s32<const N: i32>(a: int32x2_t) -> int32x4_t { static_assert_imm1!(N); simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32]) @@ -3427,7 +3427,7 @@ pub unsafe fn vdupq_lane_s32<const N: i32>(a: int32x2_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t { static_assert_imm3!(N); simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3442,7 +3442,7 @@ pub unsafe fn vdup_lane_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t { static_assert_imm4!(N); simd_shuffle16!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3457,7 +3457,7 @@ pub unsafe fn vdupq_laneq_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t { static_assert_imm2!(N); simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32]) @@ -3472,7 +3472,7 @@ pub unsafe fn vdup_lane_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t { static_assert_imm3!(N); simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3487,7 +3487,7 @@ pub unsafe fn vdupq_laneq_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t { static_assert_imm1!(N); simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32]) @@ -3502,7 +3502,7 @@ pub unsafe fn vdup_lane_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t { static_assert_imm2!(N); simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32]) @@ -3517,7 +3517,7 @@ pub unsafe fn vdupq_laneq_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_u8<const N: i32>(a: uint8x16_t) -> uint8x8_t { static_assert_imm4!(N); simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3532,7 +3532,7 @@ pub unsafe fn vdup_laneq_u8<const N: i32>(a: uint8x16_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_u16<const N: i32>(a: uint16x8_t) -> uint16x4_t { static_assert_imm3!(N); simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32]) @@ -3547,7 +3547,7 @@ pub unsafe fn vdup_laneq_u16<const N: i32>(a: uint16x8_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_u32<const N: i32>(a: uint32x4_t) -> uint32x2_t { static_assert_imm2!(N); simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32]) @@ -3562,7 +3562,7 @@ pub unsafe fn vdup_laneq_u32<const N: i32>(a: uint32x4_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_u8<const N: i32>(a: uint8x8_t) -> uint8x16_t { static_assert_imm3!(N); simd_shuffle16!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3577,7 +3577,7 @@ pub unsafe fn vdupq_lane_u8<const N: i32>(a: uint8x8_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_u16<const N: i32>(a: uint16x4_t) -> uint16x8_t { static_assert_imm2!(N); simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3592,7 +3592,7 @@ pub unsafe fn vdupq_lane_u16<const N: i32>(a: uint16x4_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_u32<const N: i32>(a: uint32x2_t) -> uint32x4_t { static_assert_imm1!(N); simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32]) @@ -3607,7 +3607,7 @@ pub unsafe fn vdupq_lane_u32<const N: i32>(a: uint32x2_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_p8<const N: i32>(a: poly8x8_t) -> poly8x8_t { static_assert_imm3!(N); simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3622,7 +3622,7 @@ pub unsafe fn vdup_lane_p8<const N: i32>(a: poly8x8_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_p8<const N: i32>(a: poly8x16_t) -> poly8x16_t { static_assert_imm4!(N); simd_shuffle16!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3637,7 +3637,7 @@ pub unsafe fn vdupq_laneq_p8<const N: i32>(a: poly8x16_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_p16<const N: i32>(a: poly16x4_t) -> poly16x4_t { static_assert_imm2!(N); simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32]) @@ -3652,7 +3652,7 @@ pub unsafe fn vdup_lane_p16<const N: i32>(a: poly16x4_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_p16<const N: i32>(a: poly16x8_t) -> poly16x8_t { static_assert_imm3!(N); simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3667,7 +3667,7 @@ pub unsafe fn vdupq_laneq_p16<const N: i32>(a: poly16x8_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 8))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_p8<const N: i32>(a: poly8x16_t) -> poly8x8_t { static_assert_imm4!(N); simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3682,7 +3682,7 @@ pub unsafe fn vdup_laneq_p8<const N: i32>(a: poly8x16_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_p16<const N: i32>(a: poly16x8_t) -> poly16x4_t { static_assert_imm3!(N); simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32]) @@ -3697,7 +3697,7 @@ pub unsafe fn vdup_laneq_p16<const N: i32>(a: poly16x8_t) -> poly16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 4))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_p8<const N: i32>(a: poly8x8_t) -> poly8x16_t { static_assert_imm3!(N); simd_shuffle16!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3712,7 +3712,7 @@ pub unsafe fn vdupq_lane_p8<const N: i32>(a: poly8x8_t) -> poly8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_p16<const N: i32>(a: poly16x4_t) -> poly16x8_t { static_assert_imm2!(N); simd_shuffle8!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32, N as u32]) @@ -3727,7 +3727,7 @@ pub unsafe fn vdupq_lane_p16<const N: i32>(a: poly16x4_t) -> poly16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_s64<const N: i32>(a: int64x2_t) -> int64x2_t { static_assert_imm1!(N); simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32]) @@ -3742,7 +3742,7 @@ pub unsafe fn vdupq_laneq_s64<const N: i32>(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 0))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_s64<const N: i32>(a: int64x1_t) -> int64x2_t { static_assert!(N : i32 where N == 0); simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32]) @@ -3757,7 +3757,7 @@ pub unsafe fn vdupq_lane_s64<const N: i32>(a: int64x1_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t { static_assert_imm1!(N); simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32]) @@ -3772,7 +3772,7 @@ pub unsafe fn vdupq_laneq_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 0))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_u64<const N: i32>(a: uint64x1_t) -> uint64x2_t { static_assert!(N : i32 where N == 0); simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32]) @@ -3787,7 +3787,7 @@ pub unsafe fn vdupq_lane_u64<const N: i32>(a: uint64x1_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_f32<const N: i32>(a: float32x2_t) -> float32x2_t { static_assert_imm1!(N); simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32]) @@ -3802,7 +3802,7 @@ pub unsafe fn vdup_lane_f32<const N: i32>(a: float32x2_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_laneq_f32<const N: i32>(a: float32x4_t) -> float32x4_t { static_assert_imm2!(N); simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32]) @@ -3817,7 +3817,7 @@ pub unsafe fn vdupq_laneq_f32<const N: i32>(a: float32x4_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_f32<const N: i32>(a: float32x4_t) -> float32x2_t { static_assert_imm2!(N); simd_shuffle2!(a, a, <const N: i32> [N as u32, N as u32]) @@ -3832,7 +3832,7 @@ pub unsafe fn vdup_laneq_f32<const N: i32>(a: float32x4_t) -> float32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdupq_lane_f32<const N: i32>(a: float32x2_t) -> float32x4_t { static_assert_imm1!(N); simd_shuffle4!(a, a, <const N: i32> [N as u32, N as u32, N as u32, N as u32]) @@ -3847,7 +3847,7 @@ pub unsafe fn vdupq_lane_f32<const N: i32>(a: float32x2_t) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_s64<const N: i32>(a: int64x1_t) -> int64x1_t { static_assert!(N : i32 where N == 0); a @@ -3862,7 +3862,7 @@ pub unsafe fn vdup_lane_s64<const N: i32>(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_lane_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t { static_assert!(N : i32 where N == 0); a @@ -3877,7 +3877,7 @@ pub unsafe fn vdup_lane_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_s64<const N: i32>(a: int64x2_t) -> int64x1_t { static_assert_imm1!(N); transmute::<i64, _>(simd_extract(a, N as u32)) @@ -3892,7 +3892,7 @@ pub unsafe fn vdup_laneq_s64<const N: i32>(a: int64x2_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vdup_laneq_u64<const N: i32>(a: uint64x2_t) -> uint64x1_t { static_assert_imm1!(N); transmute::<u64, _>(simd_extract(a, N as u32)) @@ -3907,7 +3907,7 @@ pub unsafe fn vdup_laneq_u64<const N: i32>(a: uint64x2_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert_imm3!(N); match N & 0b111 { @@ -3932,7 +3932,7 @@ pub unsafe fn vext_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 15))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert_imm4!(N); match N & 0b1111 { @@ -3965,7 +3965,7 @@ pub unsafe fn vextq_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_imm2!(N); match N & 0b11 { @@ -3986,7 +3986,7 @@ pub unsafe fn vext_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_imm3!(N); match N & 0b111 { @@ -4011,7 +4011,7 @@ pub unsafe fn vextq_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert_imm1!(N); match N & 0b1 { @@ -4030,7 +4030,7 @@ pub unsafe fn vext_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert_imm2!(N); match N & 0b11 { @@ -4051,7 +4051,7 @@ pub unsafe fn vextq_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert_imm3!(N); match N & 0b111 { @@ -4076,7 +4076,7 @@ pub unsafe fn vext_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 15))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert_imm4!(N); match N & 0b1111 { @@ -4109,7 +4109,7 @@ pub unsafe fn vextq_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert_imm2!(N); match N & 0b11 { @@ -4130,7 +4130,7 @@ pub unsafe fn vext_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert_imm3!(N); match N & 0b111 { @@ -4155,7 +4155,7 @@ pub unsafe fn vextq_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert_imm1!(N); match N & 0b1 { @@ -4174,7 +4174,7 @@ pub unsafe fn vext_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert_imm2!(N); match N & 0b11 { @@ -4195,7 +4195,7 @@ pub unsafe fn vextq_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { static_assert_imm3!(N); match N & 0b111 { @@ -4220,7 +4220,7 @@ pub unsafe fn vext_p8<const N: i32>(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 15))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { static_assert_imm4!(N); match N & 0b1111 { @@ -4253,7 +4253,7 @@ pub unsafe fn vextq_p8<const N: i32>(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { static_assert_imm2!(N); match N & 0b11 { @@ -4274,7 +4274,7 @@ pub unsafe fn vext_p16<const N: i32>(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 7))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { static_assert_imm3!(N); match N & 0b111 { @@ -4299,7 +4299,7 @@ pub unsafe fn vextq_p16<const N: i32>(a: poly16x8_t, b: poly16x8_t) -> poly16x8_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert_imm1!(N); match N & 0b1 { @@ -4318,7 +4318,7 @@ pub unsafe fn vextq_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert_imm1!(N); match N & 0b1 { @@ -4337,7 +4337,7 @@ pub unsafe fn vextq_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vext_f32<const N: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t { static_assert_imm1!(N); match N & 0b1 { @@ -4356,7 +4356,7 @@ pub unsafe fn vext_f32<const N: i32>(a: float32x2_t, b: float32x2_t) -> float32x #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext, N = 3))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vextq_f32<const N: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t { static_assert_imm2!(N); match N & 0b11 { @@ -4376,7 +4376,7 @@ pub unsafe fn vextq_f32<const N: i32>(a: float32x4_t, b: float32x4_t) -> float32 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { simd_add(a, simd_mul(b, c)) } @@ -4389,7 +4389,7 @@ pub unsafe fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { simd_add(a, simd_mul(b, c)) } @@ -4402,7 +4402,7 @@ pub unsafe fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { simd_add(a, simd_mul(b, c)) } @@ -4415,7 +4415,7 @@ pub unsafe fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { simd_add(a, simd_mul(b, c)) } @@ -4428,7 +4428,7 @@ pub unsafe fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { simd_add(a, simd_mul(b, c)) } @@ -4441,7 +4441,7 @@ pub unsafe fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { simd_add(a, simd_mul(b, c)) } @@ -4454,7 +4454,7 @@ pub unsafe fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { simd_add(a, simd_mul(b, c)) } @@ -4467,7 +4467,7 @@ pub unsafe fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { simd_add(a, simd_mul(b, c)) } @@ -4480,7 +4480,7 @@ pub unsafe fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { simd_add(a, simd_mul(b, c)) } @@ -4493,7 +4493,7 @@ pub unsafe fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { simd_add(a, simd_mul(b, c)) } @@ -4506,7 +4506,7 @@ pub unsafe fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { simd_add(a, simd_mul(b, c)) } @@ -4519,7 +4519,7 @@ pub unsafe fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { simd_add(a, simd_mul(b, c)) } @@ -4532,7 +4532,7 @@ pub unsafe fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { simd_add(a, simd_mul(b, c)) } @@ -4545,7 +4545,7 @@ pub unsafe fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { simd_add(a, simd_mul(b, c)) } @@ -4558,7 +4558,7 @@ pub unsafe fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { vmla_s16(a, b, vdup_n_s16(c)) } @@ -4571,7 +4571,7 @@ pub unsafe fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { vmlaq_s16(a, b, vdupq_n_s16(c)) } @@ -4584,7 +4584,7 @@ pub unsafe fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { vmla_s32(a, b, vdup_n_s32(c)) } @@ -4597,7 +4597,7 @@ pub unsafe fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { vmlaq_s32(a, b, vdupq_n_s32(c)) } @@ -4610,7 +4610,7 @@ pub unsafe fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { vmla_u16(a, b, vdup_n_u16(c)) } @@ -4623,7 +4623,7 @@ pub unsafe fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { vmlaq_u16(a, b, vdupq_n_u16(c)) } @@ -4636,7 +4636,7 @@ pub unsafe fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { vmla_u32(a, b, vdup_n_u32(c)) } @@ -4649,7 +4649,7 @@ pub unsafe fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { vmlaq_u32(a, b, vdupq_n_u32(c)) } @@ -4662,7 +4662,7 @@ pub unsafe fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vmla_f32(a, b, vdup_n_f32(c)) } @@ -4675,7 +4675,7 @@ pub unsafe fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vmlaq_f32(a, b, vdupq_n_f32(c)) } @@ -4689,7 +4689,7 @@ pub unsafe fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); vmla_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4704,7 +4704,7 @@ pub unsafe fn vmla_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { static_assert_imm3!(LANE); vmla_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4719,7 +4719,7 @@ pub unsafe fn vmla_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { static_assert_imm2!(LANE); vmlaq_s16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4734,7 +4734,7 @@ pub unsafe fn vmlaq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); vmlaq_s16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4749,7 +4749,7 @@ pub unsafe fn vmlaq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE); vmla_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -4764,7 +4764,7 @@ pub unsafe fn vmla_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int3 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { static_assert_imm2!(LANE); vmla_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -4779,7 +4779,7 @@ pub unsafe fn vmla_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { static_assert_imm1!(LANE); vmlaq_s32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4794,7 +4794,7 @@ pub unsafe fn vmlaq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); vmlaq_s32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4809,7 +4809,7 @@ pub unsafe fn vmlaq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { static_assert_imm2!(LANE); vmla_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4824,7 +4824,7 @@ pub unsafe fn vmla_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t { static_assert_imm3!(LANE); vmla_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4839,7 +4839,7 @@ pub unsafe fn vmla_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t { static_assert_imm2!(LANE); vmlaq_u16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4854,7 +4854,7 @@ pub unsafe fn vmlaq_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { static_assert_imm3!(LANE); vmlaq_u16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4869,7 +4869,7 @@ pub unsafe fn vmlaq_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { static_assert_imm1!(LANE); vmla_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -4884,7 +4884,7 @@ pub unsafe fn vmla_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t { static_assert_imm2!(LANE); vmla_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -4899,7 +4899,7 @@ pub unsafe fn vmla_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t { static_assert_imm1!(LANE); vmlaq_u32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4914,7 +4914,7 @@ pub unsafe fn vmlaq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mla, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { static_assert_imm2!(LANE); vmlaq_u32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4929,7 +4929,7 @@ pub unsafe fn vmlaq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { static_assert_imm1!(LANE); vmla_f32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -4944,7 +4944,7 @@ pub unsafe fn vmla_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmla_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { static_assert_imm2!(LANE); vmla_f32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -4959,7 +4959,7 @@ pub unsafe fn vmla_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { static_assert_imm1!(LANE); vmlaq_f32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4974,7 +4974,7 @@ pub unsafe fn vmlaq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlaq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { static_assert_imm2!(LANE); vmlaq_f32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -4988,7 +4988,7 @@ pub unsafe fn vmlaq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { simd_add(a, vmull_s8(b, c)) } @@ -5001,7 +5001,7 @@ pub unsafe fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { simd_add(a, vmull_s16(b, c)) } @@ -5014,7 +5014,7 @@ pub unsafe fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { simd_add(a, vmull_s32(b, c)) } @@ -5027,7 +5027,7 @@ pub unsafe fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { simd_add(a, vmull_u8(b, c)) } @@ -5040,7 +5040,7 @@ pub unsafe fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { simd_add(a, vmull_u16(b, c)) } @@ -5053,7 +5053,7 @@ pub unsafe fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { simd_add(a, vmull_u32(b, c)) } @@ -5066,7 +5066,7 @@ pub unsafe fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vmlal_s16(a, b, vdup_n_s16(c)) } @@ -5079,7 +5079,7 @@ pub unsafe fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vmlal_s32(a, b, vdup_n_s32(c)) } @@ -5092,7 +5092,7 @@ pub unsafe fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { vmlal_u16(a, b, vdup_n_u16(c)) } @@ -5105,7 +5105,7 @@ pub unsafe fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { vmlal_u32(a, b, vdup_n_u32(c)) } @@ -5119,7 +5119,7 @@ pub unsafe fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { static_assert_imm2!(LANE); vmlal_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5134,7 +5134,7 @@ pub unsafe fn vmlal_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { static_assert_imm3!(LANE); vmlal_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5149,7 +5149,7 @@ pub unsafe fn vmlal_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { static_assert_imm1!(LANE); vmlal_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -5164,7 +5164,7 @@ pub unsafe fn vmlal_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlal, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { static_assert_imm2!(LANE); vmlal_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -5179,7 +5179,7 @@ pub unsafe fn vmlal_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { static_assert_imm2!(LANE); vmlal_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5194,7 +5194,7 @@ pub unsafe fn vmlal_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t { static_assert_imm3!(LANE); vmlal_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5209,7 +5209,7 @@ pub unsafe fn vmlal_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { static_assert_imm1!(LANE); vmlal_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -5224,7 +5224,7 @@ pub unsafe fn vmlal_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlal, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlal_laneq_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t { static_assert_imm2!(LANE); vmlal_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -5238,7 +5238,7 @@ pub unsafe fn vmlal_laneq_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { simd_sub(a, simd_mul(b, c)) } @@ -5251,7 +5251,7 @@ pub unsafe fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { simd_sub(a, simd_mul(b, c)) } @@ -5264,7 +5264,7 @@ pub unsafe fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { simd_sub(a, simd_mul(b, c)) } @@ -5277,7 +5277,7 @@ pub unsafe fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { simd_sub(a, simd_mul(b, c)) } @@ -5290,7 +5290,7 @@ pub unsafe fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { simd_sub(a, simd_mul(b, c)) } @@ -5303,7 +5303,7 @@ pub unsafe fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { simd_sub(a, simd_mul(b, c)) } @@ -5316,7 +5316,7 @@ pub unsafe fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { simd_sub(a, simd_mul(b, c)) } @@ -5329,7 +5329,7 @@ pub unsafe fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { simd_sub(a, simd_mul(b, c)) } @@ -5342,7 +5342,7 @@ pub unsafe fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { simd_sub(a, simd_mul(b, c)) } @@ -5355,7 +5355,7 @@ pub unsafe fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { simd_sub(a, simd_mul(b, c)) } @@ -5368,7 +5368,7 @@ pub unsafe fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { simd_sub(a, simd_mul(b, c)) } @@ -5381,7 +5381,7 @@ pub unsafe fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_ #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { simd_sub(a, simd_mul(b, c)) } @@ -5394,7 +5394,7 @@ pub unsafe fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { simd_sub(a, simd_mul(b, c)) } @@ -5407,7 +5407,7 @@ pub unsafe fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { simd_sub(a, simd_mul(b, c)) } @@ -5420,7 +5420,7 @@ pub unsafe fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { vmls_s16(a, b, vdup_n_s16(c)) } @@ -5433,7 +5433,7 @@ pub unsafe fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { vmlsq_s16(a, b, vdupq_n_s16(c)) } @@ -5446,7 +5446,7 @@ pub unsafe fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { vmls_s32(a, b, vdup_n_s32(c)) } @@ -5459,7 +5459,7 @@ pub unsafe fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { vmlsq_s32(a, b, vdupq_n_s32(c)) } @@ -5472,7 +5472,7 @@ pub unsafe fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { vmls_u16(a, b, vdup_n_u16(c)) } @@ -5485,7 +5485,7 @@ pub unsafe fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { vmlsq_u16(a, b, vdupq_n_u16(c)) } @@ -5498,7 +5498,7 @@ pub unsafe fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { vmls_u32(a, b, vdup_n_u32(c)) } @@ -5511,7 +5511,7 @@ pub unsafe fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { vmlsq_u32(a, b, vdupq_n_u32(c)) } @@ -5524,7 +5524,7 @@ pub unsafe fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vmls_f32(a, b, vdup_n_f32(c)) } @@ -5537,7 +5537,7 @@ pub unsafe fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vmlsq_f32(a, b, vdupq_n_f32(c)) } @@ -5551,7 +5551,7 @@ pub unsafe fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); vmls_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5566,7 +5566,7 @@ pub unsafe fn vmls_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int16x8_t) -> int16x4_t { static_assert_imm3!(LANE); vmls_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5581,7 +5581,7 @@ pub unsafe fn vmls_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x4_t) -> int16x8_t { static_assert_imm2!(LANE); vmlsq_s16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5596,7 +5596,7 @@ pub unsafe fn vmlsq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); vmlsq_s16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5611,7 +5611,7 @@ pub unsafe fn vmlsq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE); vmls_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -5626,7 +5626,7 @@ pub unsafe fn vmls_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int3 #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int32x4_t) -> int32x2_t { static_assert_imm2!(LANE); vmls_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -5641,7 +5641,7 @@ pub unsafe fn vmls_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x2_t) -> int32x4_t { static_assert_imm1!(LANE); vmlsq_s32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5656,7 +5656,7 @@ pub unsafe fn vmlsq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); vmlsq_s32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5671,7 +5671,7 @@ pub unsafe fn vmlsq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { static_assert_imm2!(LANE); vmls_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5686,7 +5686,7 @@ pub unsafe fn vmls_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: uint16x8_t) -> uint16x4_t { static_assert_imm3!(LANE); vmls_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5701,7 +5701,7 @@ pub unsafe fn vmls_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: uint16x4_t) -> uint16x8_t { static_assert_imm2!(LANE); vmlsq_u16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5716,7 +5716,7 @@ pub unsafe fn vmlsq_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { static_assert_imm3!(LANE); vmlsq_u16(a, b, simd_shuffle8!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5731,7 +5731,7 @@ pub unsafe fn vmlsq_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { static_assert_imm1!(LANE); vmls_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -5746,7 +5746,7 @@ pub unsafe fn vmls_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: uint32x4_t) -> uint32x2_t { static_assert_imm2!(LANE); vmls_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -5761,7 +5761,7 @@ pub unsafe fn vmls_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x2_t) -> uint32x4_t { static_assert_imm1!(LANE); vmlsq_u32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5776,7 +5776,7 @@ pub unsafe fn vmlsq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mls, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { static_assert_imm2!(LANE); vmlsq_u32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5791,7 +5791,7 @@ pub unsafe fn vmlsq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { static_assert_imm1!(LANE); vmls_f32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -5806,7 +5806,7 @@ pub unsafe fn vmls_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmls_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: float32x4_t) -> float32x2_t { static_assert_imm2!(LANE); vmls_f32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -5821,7 +5821,7 @@ pub unsafe fn vmls_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x2_t) -> float32x4_t { static_assert_imm1!(LANE); vmlsq_f32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5836,7 +5836,7 @@ pub unsafe fn vmlsq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { static_assert_imm2!(LANE); vmlsq_f32(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5850,7 +5850,7 @@ pub unsafe fn vmlsq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t, c #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { simd_sub(a, vmull_s8(b, c)) } @@ -5863,7 +5863,7 @@ pub unsafe fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { simd_sub(a, vmull_s16(b, c)) } @@ -5876,7 +5876,7 @@ pub unsafe fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { simd_sub(a, vmull_s32(b, c)) } @@ -5889,7 +5889,7 @@ pub unsafe fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { simd_sub(a, vmull_u8(b, c)) } @@ -5902,7 +5902,7 @@ pub unsafe fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { simd_sub(a, vmull_u16(b, c)) } @@ -5915,7 +5915,7 @@ pub unsafe fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { simd_sub(a, vmull_u32(b, c)) } @@ -5928,7 +5928,7 @@ pub unsafe fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vmlsl_s16(a, b, vdup_n_s16(c)) } @@ -5941,7 +5941,7 @@ pub unsafe fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vmlsl_s32(a, b, vdup_n_s32(c)) } @@ -5954,7 +5954,7 @@ pub unsafe fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { vmlsl_u16(a, b, vdup_n_u16(c)) } @@ -5967,7 +5967,7 @@ pub unsafe fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { vmlsl_u32(a, b, vdup_n_u32(c)) } @@ -5981,7 +5981,7 @@ pub unsafe fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { static_assert_imm2!(LANE); vmlsl_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -5996,7 +5996,7 @@ pub unsafe fn vmlsl_lane_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: int16x8_t) -> int32x4_t { static_assert_imm3!(LANE); vmlsl_s16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -6011,7 +6011,7 @@ pub unsafe fn vmlsl_laneq_s16<const LANE: i32>(a: int32x4_t, b: int16x4_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { static_assert_imm1!(LANE); vmlsl_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -6026,7 +6026,7 @@ pub unsafe fn vmlsl_lane_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: int #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: int32x4_t) -> int64x2_t { static_assert_imm2!(LANE); vmlsl_s32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -6041,7 +6041,7 @@ pub unsafe fn vmlsl_laneq_s32<const LANE: i32>(a: int64x2_t, b: int32x2_t, c: in #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { static_assert_imm2!(LANE); vmlsl_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -6056,7 +6056,7 @@ pub unsafe fn vmlsl_lane_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: uint16x8_t) -> uint32x4_t { static_assert_imm3!(LANE); vmlsl_u16(a, b, simd_shuffle4!(c, c, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -6071,7 +6071,7 @@ pub unsafe fn vmlsl_laneq_u16<const LANE: i32>(a: uint32x4_t, b: uint16x4_t, c: #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { static_assert_imm1!(LANE); vmlsl_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -6086,7 +6086,7 @@ pub unsafe fn vmlsl_lane_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: u #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umlsl, LANE = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmlsl_laneq_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: uint32x4_t) -> uint64x2_t { static_assert_imm2!(LANE); vmlsl_u32(a, b, simd_shuffle2!(c, c, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -6100,7 +6100,7 @@ pub unsafe fn vmlsl_laneq_u32<const LANE: i32>(a: uint64x2_t, b: uint32x2_t, c: #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t { simd_neg(a) } @@ -6113,7 +6113,7 @@ pub unsafe fn vneg_s8(a: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t { simd_neg(a) } @@ -6126,7 +6126,7 @@ pub unsafe fn vnegq_s8(a: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t { simd_neg(a) } @@ -6139,7 +6139,7 @@ pub unsafe fn vneg_s16(a: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t { simd_neg(a) } @@ -6152,7 +6152,7 @@ pub unsafe fn vnegq_s16(a: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t { simd_neg(a) } @@ -6165,7 +6165,7 @@ pub unsafe fn vneg_s32(a: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(neg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t { simd_neg(a) } @@ -6178,7 +6178,7 @@ pub unsafe fn vnegq_s32(a: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fneg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t { simd_neg(a) } @@ -6191,7 +6191,7 @@ pub unsafe fn vneg_f32(a: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fneg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t { simd_neg(a) } @@ -6204,7 +6204,7 @@ pub unsafe fn vnegq_f32(a: float32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqneg_s8(a: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6223,7 +6223,7 @@ vqneg_s8_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqnegq_s8(a: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6242,7 +6242,7 @@ vqnegq_s8_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqneg_s16(a: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6261,7 +6261,7 @@ vqneg_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqnegq_s16(a: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6280,7 +6280,7 @@ vqnegq_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqneg_s32(a: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6299,7 +6299,7 @@ vqneg_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqneg))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqnegq_s32(a: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6318,7 +6318,7 @@ vqnegq_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6337,7 +6337,7 @@ vqsub_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6356,7 +6356,7 @@ vqsubq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6375,7 +6375,7 @@ vqsub_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6394,7 +6394,7 @@ vqsubq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6413,7 +6413,7 @@ vqsub_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6432,7 +6432,7 @@ vqsubq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6451,7 +6451,7 @@ vqsub_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6470,7 +6470,7 @@ vqsubq_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6489,7 +6489,7 @@ vqsub_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6508,7 +6508,7 @@ vqsubq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6527,7 +6527,7 @@ vqsub_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6546,7 +6546,7 @@ vqsubq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6565,7 +6565,7 @@ vqsub_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6584,7 +6584,7 @@ vqsubq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6603,7 +6603,7 @@ vqsub_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6622,7 +6622,7 @@ vqsubq_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6641,7 +6641,7 @@ vhadd_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6660,7 +6660,7 @@ vhaddq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6679,7 +6679,7 @@ vhadd_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6698,7 +6698,7 @@ vhaddq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6717,7 +6717,7 @@ vhadd_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6736,7 +6736,7 @@ vhaddq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6755,7 +6755,7 @@ vhadd_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6774,7 +6774,7 @@ vhaddq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6793,7 +6793,7 @@ vhadd_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6812,7 +6812,7 @@ vhaddq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6831,7 +6831,7 @@ vhadd_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6850,7 +6850,7 @@ vhaddq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6869,7 +6869,7 @@ vrhadd_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6888,7 +6888,7 @@ vrhaddq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6907,7 +6907,7 @@ vrhadd_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6926,7 +6926,7 @@ vrhaddq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6945,7 +6945,7 @@ vrhadd_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6964,7 +6964,7 @@ vrhaddq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -6983,7 +6983,7 @@ vrhadd_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7002,7 +7002,7 @@ vrhaddq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7021,7 +7021,7 @@ vrhadd_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7040,7 +7040,7 @@ vrhaddq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7059,7 +7059,7 @@ vrhadd_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srhadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7078,7 +7078,7 @@ vrhaddq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frintn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrndn_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7097,7 +7097,7 @@ vrndn_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frintn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrndnq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7116,7 +7116,7 @@ vrndnq_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7135,7 +7135,7 @@ vqadd_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7154,7 +7154,7 @@ vqaddq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7173,7 +7173,7 @@ vqadd_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7192,7 +7192,7 @@ vqaddq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7211,7 +7211,7 @@ vqadd_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7230,7 +7230,7 @@ vqaddq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7249,7 +7249,7 @@ vqadd_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7268,7 +7268,7 @@ vqaddq_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7287,7 +7287,7 @@ vqadd_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7306,7 +7306,7 @@ vqaddq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7325,7 +7325,7 @@ vqadd_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7344,7 +7344,7 @@ vqaddq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7363,7 +7363,7 @@ vqadd_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7382,7 +7382,7 @@ vqaddq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7401,7 +7401,7 @@ vqadd_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqadd))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7420,7 +7420,7 @@ vqaddq_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7439,7 +7439,7 @@ vld1_s8_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7458,7 +7458,7 @@ vld1_s16_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7477,7 +7477,7 @@ vld1_s32_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7496,7 +7496,7 @@ vld1_s64_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7515,7 +7515,7 @@ vld1q_s8_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7534,7 +7534,7 @@ vld1q_s16_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7553,7 +7553,7 @@ vld1q_s32_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7572,7 +7572,7 @@ vld1q_s64_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7591,7 +7591,7 @@ vld1_s8_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7610,7 +7610,7 @@ vld1_s16_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7629,7 +7629,7 @@ vld1_s32_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7648,7 +7648,7 @@ vld1_s64_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7667,7 +7667,7 @@ vld1q_s8_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7686,7 +7686,7 @@ vld1q_s16_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7705,7 +7705,7 @@ vld1q_s32_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7724,7 +7724,7 @@ vld1q_s64_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7743,7 +7743,7 @@ vld1_s8_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7762,7 +7762,7 @@ vld1_s16_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7781,7 +7781,7 @@ vld1_s32_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7800,7 +7800,7 @@ vld1_s64_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7819,7 +7819,7 @@ vld1q_s8_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7838,7 +7838,7 @@ vld1q_s16_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7857,7 +7857,7 @@ vld1q_s32_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -7876,7 +7876,7 @@ vld1q_s64_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { transmute(vld1_s8_x2(transmute(a))) } @@ -7889,7 +7889,7 @@ pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { transmute(vld1_s16_x2(transmute(a))) } @@ -7902,7 +7902,7 @@ pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { transmute(vld1_s32_x2(transmute(a))) } @@ -7915,7 +7915,7 @@ pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { transmute(vld1_s64_x2(transmute(a))) } @@ -7928,7 +7928,7 @@ pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { transmute(vld1q_s8_x2(transmute(a))) } @@ -7941,7 +7941,7 @@ pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { transmute(vld1q_s16_x2(transmute(a))) } @@ -7954,7 +7954,7 @@ pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { transmute(vld1q_s32_x2(transmute(a))) } @@ -7967,7 +7967,7 @@ pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { transmute(vld1q_s64_x2(transmute(a))) } @@ -7980,7 +7980,7 @@ pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { transmute(vld1_s8_x3(transmute(a))) } @@ -7993,7 +7993,7 @@ pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { transmute(vld1_s16_x3(transmute(a))) } @@ -8006,7 +8006,7 @@ pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { transmute(vld1_s32_x3(transmute(a))) } @@ -8019,7 +8019,7 @@ pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { transmute(vld1_s64_x3(transmute(a))) } @@ -8032,7 +8032,7 @@ pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { transmute(vld1q_s8_x3(transmute(a))) } @@ -8045,7 +8045,7 @@ pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { transmute(vld1q_s16_x3(transmute(a))) } @@ -8058,7 +8058,7 @@ pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { transmute(vld1q_s32_x3(transmute(a))) } @@ -8071,7 +8071,7 @@ pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { transmute(vld1q_s64_x3(transmute(a))) } @@ -8084,7 +8084,7 @@ pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { transmute(vld1_s8_x4(transmute(a))) } @@ -8097,7 +8097,7 @@ pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { transmute(vld1_s16_x4(transmute(a))) } @@ -8110,7 +8110,7 @@ pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { transmute(vld1_s32_x4(transmute(a))) } @@ -8123,7 +8123,7 @@ pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { transmute(vld1_s64_x4(transmute(a))) } @@ -8136,7 +8136,7 @@ pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { transmute(vld1q_s8_x4(transmute(a))) } @@ -8149,7 +8149,7 @@ pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { transmute(vld1q_s16_x4(transmute(a))) } @@ -8162,7 +8162,7 @@ pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { transmute(vld1q_s32_x4(transmute(a))) } @@ -8175,7 +8175,7 @@ pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { transmute(vld1q_s64_x4(transmute(a))) } @@ -8188,7 +8188,7 @@ pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { transmute(vld1_s8_x2(transmute(a))) } @@ -8201,7 +8201,7 @@ pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { transmute(vld1_s8_x3(transmute(a))) } @@ -8214,7 +8214,7 @@ pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { transmute(vld1_s8_x4(transmute(a))) } @@ -8227,7 +8227,7 @@ pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { transmute(vld1q_s8_x2(transmute(a))) } @@ -8240,7 +8240,7 @@ pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { transmute(vld1q_s8_x3(transmute(a))) } @@ -8253,7 +8253,7 @@ pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { transmute(vld1q_s8_x4(transmute(a))) } @@ -8266,7 +8266,7 @@ pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { transmute(vld1_s16_x2(transmute(a))) } @@ -8279,7 +8279,7 @@ pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { transmute(vld1_s16_x3(transmute(a))) } @@ -8292,7 +8292,7 @@ pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { transmute(vld1_s16_x4(transmute(a))) } @@ -8305,7 +8305,7 @@ pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { transmute(vld1q_s16_x2(transmute(a))) } @@ -8318,7 +8318,7 @@ pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { transmute(vld1q_s16_x3(transmute(a))) } @@ -8331,7 +8331,7 @@ pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { transmute(vld1q_s16_x4(transmute(a))) } @@ -8344,7 +8344,7 @@ pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { transmute(vld1_s64_x2(transmute(a))) } @@ -8357,7 +8357,7 @@ pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { transmute(vld1_s64_x3(transmute(a))) } @@ -8370,7 +8370,7 @@ pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { transmute(vld1_s64_x4(transmute(a))) } @@ -8383,7 +8383,7 @@ pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { transmute(vld1q_s64_x2(transmute(a))) } @@ -8396,7 +8396,7 @@ pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { transmute(vld1q_s64_x3(transmute(a))) } @@ -8409,7 +8409,7 @@ pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { transmute(vld1q_s64_x4(transmute(a))) } @@ -8422,7 +8422,7 @@ pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8441,7 +8441,7 @@ vld1_f32_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8460,7 +8460,7 @@ vld1q_f32_x2_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8479,7 +8479,7 @@ vld1_f32_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8498,7 +8498,7 @@ vld1q_f32_x3_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8517,7 +8517,7 @@ vld1_f32_x4_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -8767,7 +8767,7 @@ vld2_s64_(a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { transmute(vld2_s8(transmute(a))) } @@ -8780,7 +8780,7 @@ pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { transmute(vld2_s16(transmute(a))) } @@ -8793,7 +8793,7 @@ pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { transmute(vld2_s32(transmute(a))) } @@ -8806,7 +8806,7 @@ pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { transmute(vld2q_s8(transmute(a))) } @@ -8819,7 +8819,7 @@ pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { transmute(vld2q_s16(transmute(a))) } @@ -8832,7 +8832,7 @@ pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { transmute(vld2q_s32(transmute(a))) } @@ -8845,7 +8845,7 @@ pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { transmute(vld2_s8(transmute(a))) } @@ -8858,7 +8858,7 @@ pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { transmute(vld2_s16(transmute(a))) } @@ -8871,7 +8871,7 @@ pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { transmute(vld2q_s8(transmute(a))) } @@ -8884,7 +8884,7 @@ pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { transmute(vld2q_s16(transmute(a))) } @@ -8897,7 +8897,7 @@ pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t { transmute(vld2_s64(transmute(a))) } @@ -8910,7 +8910,7 @@ pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_p64(a: *const p64) -> poly64x1x2_t { transmute(vld2_s64(transmute(a))) } @@ -9220,7 +9220,7 @@ vld2_dup_s64_(a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { transmute(vld2_dup_s8(transmute(a))) } @@ -9233,7 +9233,7 @@ pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { transmute(vld2_dup_s16(transmute(a))) } @@ -9246,7 +9246,7 @@ pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { transmute(vld2_dup_s32(transmute(a))) } @@ -9259,7 +9259,7 @@ pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { transmute(vld2q_dup_s8(transmute(a))) } @@ -9272,7 +9272,7 @@ pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { transmute(vld2q_dup_s16(transmute(a))) } @@ -9285,7 +9285,7 @@ pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { transmute(vld2q_dup_s32(transmute(a))) } @@ -9298,7 +9298,7 @@ pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { transmute(vld2_dup_s8(transmute(a))) } @@ -9311,7 +9311,7 @@ pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { transmute(vld2_dup_s16(transmute(a))) } @@ -9324,7 +9324,7 @@ pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { transmute(vld2q_dup_s8(transmute(a))) } @@ -9337,7 +9337,7 @@ pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { transmute(vld2q_dup_s16(transmute(a))) } @@ -9350,7 +9350,7 @@ pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t { transmute(vld2_dup_s64(transmute(a))) } @@ -9363,7 +9363,7 @@ pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_dup_p64(a: *const p64) -> poly64x1x2_t { transmute(vld2_dup_s64(transmute(a))) } @@ -9628,7 +9628,7 @@ vld2q_lane_s32_(b.0, b.1, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_u8<const LANE: i32>(a: *const u8, b: uint8x8x2_t) -> uint8x8x2_t { static_assert_imm3!(LANE); transmute(vld2_lane_s8::<LANE>(transmute(a), transmute(b))) @@ -9643,7 +9643,7 @@ pub unsafe fn vld2_lane_u8<const LANE: i32>(a: *const u8, b: uint8x8x2_t) -> uin #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_u16<const LANE: i32>(a: *const u16, b: uint16x4x2_t) -> uint16x4x2_t { static_assert_imm2!(LANE); transmute(vld2_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -9658,7 +9658,7 @@ pub unsafe fn vld2_lane_u16<const LANE: i32>(a: *const u16, b: uint16x4x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_u32<const LANE: i32>(a: *const u32, b: uint32x2x2_t) -> uint32x2x2_t { static_assert_imm1!(LANE); transmute(vld2_lane_s32::<LANE>(transmute(a), transmute(b))) @@ -9673,7 +9673,7 @@ pub unsafe fn vld2_lane_u32<const LANE: i32>(a: *const u32, b: uint32x2x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_lane_u16<const LANE: i32>(a: *const u16, b: uint16x8x2_t) -> uint16x8x2_t { static_assert_imm3!(LANE); transmute(vld2q_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -9688,7 +9688,7 @@ pub unsafe fn vld2q_lane_u16<const LANE: i32>(a: *const u16, b: uint16x8x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_lane_u32<const LANE: i32>(a: *const u32, b: uint32x4x2_t) -> uint32x4x2_t { static_assert_imm2!(LANE); transmute(vld2q_lane_s32::<LANE>(transmute(a), transmute(b))) @@ -9703,7 +9703,7 @@ pub unsafe fn vld2q_lane_u32<const LANE: i32>(a: *const u32, b: uint32x4x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_p8<const LANE: i32>(a: *const p8, b: poly8x8x2_t) -> poly8x8x2_t { static_assert_imm3!(LANE); transmute(vld2_lane_s8::<LANE>(transmute(a), transmute(b))) @@ -9718,7 +9718,7 @@ pub unsafe fn vld2_lane_p8<const LANE: i32>(a: *const p8, b: poly8x8x2_t) -> pol #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2_lane_p16<const LANE: i32>(a: *const p16, b: poly16x4x2_t) -> poly16x4x2_t { static_assert_imm2!(LANE); transmute(vld2_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -9733,7 +9733,7 @@ pub unsafe fn vld2_lane_p16<const LANE: i32>(a: *const p16, b: poly16x4x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld2q_lane_p16<const LANE: i32>(a: *const p16, b: poly16x8x2_t) -> poly16x8x2_t { static_assert_imm3!(LANE); transmute(vld2q_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -10052,7 +10052,7 @@ vld3_s64_(a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { transmute(vld3_s8(transmute(a))) } @@ -10065,7 +10065,7 @@ pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { transmute(vld3_s16(transmute(a))) } @@ -10078,7 +10078,7 @@ pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { transmute(vld3_s32(transmute(a))) } @@ -10091,7 +10091,7 @@ pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { transmute(vld3q_s8(transmute(a))) } @@ -10104,7 +10104,7 @@ pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { transmute(vld3q_s16(transmute(a))) } @@ -10117,7 +10117,7 @@ pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { transmute(vld3q_s32(transmute(a))) } @@ -10130,7 +10130,7 @@ pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { transmute(vld3_s8(transmute(a))) } @@ -10143,7 +10143,7 @@ pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { transmute(vld3_s16(transmute(a))) } @@ -10156,7 +10156,7 @@ pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { transmute(vld3q_s8(transmute(a))) } @@ -10169,7 +10169,7 @@ pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { transmute(vld3q_s16(transmute(a))) } @@ -10182,7 +10182,7 @@ pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { transmute(vld3_s64(transmute(a))) } @@ -10195,7 +10195,7 @@ pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t { transmute(vld3_s64(transmute(a))) } @@ -10505,7 +10505,7 @@ vld3_dup_s64_(a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { transmute(vld3_dup_s8(transmute(a))) } @@ -10518,7 +10518,7 @@ pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { transmute(vld3_dup_s16(transmute(a))) } @@ -10531,7 +10531,7 @@ pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { transmute(vld3_dup_s32(transmute(a))) } @@ -10544,7 +10544,7 @@ pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { transmute(vld3q_dup_s8(transmute(a))) } @@ -10557,7 +10557,7 @@ pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { transmute(vld3q_dup_s16(transmute(a))) } @@ -10570,7 +10570,7 @@ pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { transmute(vld3q_dup_s32(transmute(a))) } @@ -10583,7 +10583,7 @@ pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { transmute(vld3_dup_s8(transmute(a))) } @@ -10596,7 +10596,7 @@ pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { transmute(vld3_dup_s16(transmute(a))) } @@ -10609,7 +10609,7 @@ pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { transmute(vld3q_dup_s8(transmute(a))) } @@ -10622,7 +10622,7 @@ pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { transmute(vld3q_dup_s16(transmute(a))) } @@ -10635,7 +10635,7 @@ pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t { transmute(vld3_dup_s64(transmute(a))) } @@ -10648,7 +10648,7 @@ pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_dup_p64(a: *const p64) -> poly64x1x3_t { transmute(vld3_dup_s64(transmute(a))) } @@ -10913,7 +10913,7 @@ vld3q_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_u8<const LANE: i32>(a: *const u8, b: uint8x8x3_t) -> uint8x8x3_t { static_assert_imm3!(LANE); transmute(vld3_lane_s8::<LANE>(transmute(a), transmute(b))) @@ -10928,7 +10928,7 @@ pub unsafe fn vld3_lane_u8<const LANE: i32>(a: *const u8, b: uint8x8x3_t) -> uin #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_u16<const LANE: i32>(a: *const u16, b: uint16x4x3_t) -> uint16x4x3_t { static_assert_imm2!(LANE); transmute(vld3_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -10943,7 +10943,7 @@ pub unsafe fn vld3_lane_u16<const LANE: i32>(a: *const u16, b: uint16x4x3_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_u32<const LANE: i32>(a: *const u32, b: uint32x2x3_t) -> uint32x2x3_t { static_assert_imm1!(LANE); transmute(vld3_lane_s32::<LANE>(transmute(a), transmute(b))) @@ -10958,7 +10958,7 @@ pub unsafe fn vld3_lane_u32<const LANE: i32>(a: *const u32, b: uint32x2x3_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_lane_u16<const LANE: i32>(a: *const u16, b: uint16x8x3_t) -> uint16x8x3_t { static_assert_imm3!(LANE); transmute(vld3q_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -10973,7 +10973,7 @@ pub unsafe fn vld3q_lane_u16<const LANE: i32>(a: *const u16, b: uint16x8x3_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_lane_u32<const LANE: i32>(a: *const u32, b: uint32x4x3_t) -> uint32x4x3_t { static_assert_imm2!(LANE); transmute(vld3q_lane_s32::<LANE>(transmute(a), transmute(b))) @@ -10988,7 +10988,7 @@ pub unsafe fn vld3q_lane_u32<const LANE: i32>(a: *const u32, b: uint32x4x3_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_p8<const LANE: i32>(a: *const p8, b: poly8x8x3_t) -> poly8x8x3_t { static_assert_imm3!(LANE); transmute(vld3_lane_s8::<LANE>(transmute(a), transmute(b))) @@ -11003,7 +11003,7 @@ pub unsafe fn vld3_lane_p8<const LANE: i32>(a: *const p8, b: poly8x8x3_t) -> pol #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3_lane_p16<const LANE: i32>(a: *const p16, b: poly16x4x3_t) -> poly16x4x3_t { static_assert_imm2!(LANE); transmute(vld3_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -11018,7 +11018,7 @@ pub unsafe fn vld3_lane_p16<const LANE: i32>(a: *const p16, b: poly16x4x3_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld3q_lane_p16<const LANE: i32>(a: *const p16, b: poly16x8x3_t) -> poly16x8x3_t { static_assert_imm3!(LANE); transmute(vld3q_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -11337,7 +11337,7 @@ vld4_s64_(a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { transmute(vld4_s8(transmute(a))) } @@ -11350,7 +11350,7 @@ pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { transmute(vld4_s16(transmute(a))) } @@ -11363,7 +11363,7 @@ pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { transmute(vld4_s32(transmute(a))) } @@ -11376,7 +11376,7 @@ pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { transmute(vld4q_s8(transmute(a))) } @@ -11389,7 +11389,7 @@ pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { transmute(vld4q_s16(transmute(a))) } @@ -11402,7 +11402,7 @@ pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { transmute(vld4q_s32(transmute(a))) } @@ -11415,7 +11415,7 @@ pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { transmute(vld4_s8(transmute(a))) } @@ -11428,7 +11428,7 @@ pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { transmute(vld4_s16(transmute(a))) } @@ -11441,7 +11441,7 @@ pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { transmute(vld4q_s8(transmute(a))) } @@ -11454,7 +11454,7 @@ pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { transmute(vld4q_s16(transmute(a))) } @@ -11467,7 +11467,7 @@ pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { transmute(vld4_s64(transmute(a))) } @@ -11480,7 +11480,7 @@ pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { transmute(vld4_s64(transmute(a))) } @@ -11790,7 +11790,7 @@ vld4_dup_s64_(a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { transmute(vld4_dup_s8(transmute(a))) } @@ -11803,7 +11803,7 @@ pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { transmute(vld4_dup_s16(transmute(a))) } @@ -11816,7 +11816,7 @@ pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { transmute(vld4_dup_s32(transmute(a))) } @@ -11829,7 +11829,7 @@ pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { transmute(vld4q_dup_s8(transmute(a))) } @@ -11842,7 +11842,7 @@ pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { transmute(vld4q_dup_s16(transmute(a))) } @@ -11855,7 +11855,7 @@ pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { transmute(vld4q_dup_s32(transmute(a))) } @@ -11868,7 +11868,7 @@ pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { transmute(vld4_dup_s8(transmute(a))) } @@ -11881,7 +11881,7 @@ pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { transmute(vld4_dup_s16(transmute(a))) } @@ -11894,7 +11894,7 @@ pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { transmute(vld4q_dup_s8(transmute(a))) } @@ -11907,7 +11907,7 @@ pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { transmute(vld4q_dup_s16(transmute(a))) } @@ -11920,7 +11920,7 @@ pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { transmute(vld4_dup_s64(transmute(a))) } @@ -11933,7 +11933,7 @@ pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4r))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t { transmute(vld4_dup_s64(transmute(a))) } @@ -12198,7 +12198,7 @@ vld4q_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_u8<const LANE: i32>(a: *const u8, b: uint8x8x4_t) -> uint8x8x4_t { static_assert_imm3!(LANE); transmute(vld4_lane_s8::<LANE>(transmute(a), transmute(b))) @@ -12213,7 +12213,7 @@ pub unsafe fn vld4_lane_u8<const LANE: i32>(a: *const u8, b: uint8x8x4_t) -> uin #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_u16<const LANE: i32>(a: *const u16, b: uint16x4x4_t) -> uint16x4x4_t { static_assert_imm2!(LANE); transmute(vld4_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -12228,7 +12228,7 @@ pub unsafe fn vld4_lane_u16<const LANE: i32>(a: *const u16, b: uint16x4x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_u32<const LANE: i32>(a: *const u32, b: uint32x2x4_t) -> uint32x2x4_t { static_assert_imm1!(LANE); transmute(vld4_lane_s32::<LANE>(transmute(a), transmute(b))) @@ -12243,7 +12243,7 @@ pub unsafe fn vld4_lane_u32<const LANE: i32>(a: *const u32, b: uint32x2x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_lane_u16<const LANE: i32>(a: *const u16, b: uint16x8x4_t) -> uint16x8x4_t { static_assert_imm3!(LANE); transmute(vld4q_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -12258,7 +12258,7 @@ pub unsafe fn vld4q_lane_u16<const LANE: i32>(a: *const u16, b: uint16x8x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_lane_u32<const LANE: i32>(a: *const u32, b: uint32x4x4_t) -> uint32x4x4_t { static_assert_imm2!(LANE); transmute(vld4q_lane_s32::<LANE>(transmute(a), transmute(b))) @@ -12273,7 +12273,7 @@ pub unsafe fn vld4q_lane_u32<const LANE: i32>(a: *const u32, b: uint32x4x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_p8<const LANE: i32>(a: *const p8, b: poly8x8x4_t) -> poly8x8x4_t { static_assert_imm3!(LANE); transmute(vld4_lane_s8::<LANE>(transmute(a), transmute(b))) @@ -12288,7 +12288,7 @@ pub unsafe fn vld4_lane_p8<const LANE: i32>(a: *const p8, b: poly8x8x4_t) -> pol #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4_lane_p16<const LANE: i32>(a: *const p16, b: poly16x4x4_t) -> poly16x4x4_t { static_assert_imm2!(LANE); transmute(vld4_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -12303,7 +12303,7 @@ pub unsafe fn vld4_lane_p16<const LANE: i32>(a: *const p16, b: poly16x4x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vld4q_lane_p16<const LANE: i32>(a: *const p16, b: poly16x8x4_t) -> poly16x8x4_t { static_assert_imm3!(LANE); transmute(vld4q_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -12392,7 +12392,7 @@ vld4q_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_s8<const LANE: i32>(a: *mut i8, b: int8x8_t) { static_assert_imm3!(LANE); *a = simd_extract(b, LANE as u32); @@ -12407,7 +12407,7 @@ pub unsafe fn vst1_lane_s8<const LANE: i32>(a: *mut i8, b: int8x8_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_s16<const LANE: i32>(a: *mut i16, b: int16x4_t) { static_assert_imm2!(LANE); *a = simd_extract(b, LANE as u32); @@ -12422,7 +12422,7 @@ pub unsafe fn vst1_lane_s16<const LANE: i32>(a: *mut i16, b: int16x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_s32<const LANE: i32>(a: *mut i32, b: int32x2_t) { static_assert_imm1!(LANE); *a = simd_extract(b, LANE as u32); @@ -12437,7 +12437,7 @@ pub unsafe fn vst1_lane_s32<const LANE: i32>(a: *mut i32, b: int32x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1_t) { static_assert!(LANE : i32 where LANE == 0); *a = simd_extract(b, LANE as u32); @@ -12452,7 +12452,7 @@ pub unsafe fn vst1_lane_s64<const LANE: i32>(a: *mut i64, b: int64x1_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16_t) { static_assert_imm4!(LANE); *a = simd_extract(b, LANE as u32); @@ -12467,7 +12467,7 @@ pub unsafe fn vst1q_lane_s8<const LANE: i32>(a: *mut i8, b: int8x16_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_s16<const LANE: i32>(a: *mut i16, b: int16x8_t) { static_assert_imm3!(LANE); *a = simd_extract(b, LANE as u32); @@ -12482,7 +12482,7 @@ pub unsafe fn vst1q_lane_s16<const LANE: i32>(a: *mut i16, b: int16x8_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_s32<const LANE: i32>(a: *mut i32, b: int32x4_t) { static_assert_imm2!(LANE); *a = simd_extract(b, LANE as u32); @@ -12497,7 +12497,7 @@ pub unsafe fn vst1q_lane_s32<const LANE: i32>(a: *mut i32, b: int32x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2_t) { static_assert_imm1!(LANE); *a = simd_extract(b, LANE as u32); @@ -12512,7 +12512,7 @@ pub unsafe fn vst1q_lane_s64<const LANE: i32>(a: *mut i64, b: int64x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8_t) { static_assert_imm3!(LANE); *a = simd_extract(b, LANE as u32); @@ -12527,7 +12527,7 @@ pub unsafe fn vst1_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4_t) { static_assert_imm2!(LANE); *a = simd_extract(b, LANE as u32); @@ -12542,7 +12542,7 @@ pub unsafe fn vst1_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2_t) { static_assert_imm1!(LANE); *a = simd_extract(b, LANE as u32); @@ -12557,7 +12557,7 @@ pub unsafe fn vst1_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1_t) { static_assert!(LANE : i32 where LANE == 0); *a = simd_extract(b, LANE as u32); @@ -12572,7 +12572,7 @@ pub unsafe fn vst1_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x1_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16_t) { static_assert_imm4!(LANE); *a = simd_extract(b, LANE as u32); @@ -12587,7 +12587,7 @@ pub unsafe fn vst1q_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x16_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8_t) { static_assert_imm3!(LANE); *a = simd_extract(b, LANE as u32); @@ -12602,7 +12602,7 @@ pub unsafe fn vst1q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4_t) { static_assert_imm2!(LANE); *a = simd_extract(b, LANE as u32); @@ -12617,7 +12617,7 @@ pub unsafe fn vst1q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2_t) { static_assert_imm1!(LANE); *a = simd_extract(b, LANE as u32); @@ -12632,7 +12632,7 @@ pub unsafe fn vst1q_lane_u64<const LANE: i32>(a: *mut u64, b: uint64x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8_t) { static_assert_imm3!(LANE); *a = simd_extract(b, LANE as u32); @@ -12647,7 +12647,7 @@ pub unsafe fn vst1_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4_t) { static_assert_imm2!(LANE); *a = simd_extract(b, LANE as u32); @@ -12662,7 +12662,7 @@ pub unsafe fn vst1_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16_t) { static_assert_imm4!(LANE); *a = simd_extract(b, LANE as u32); @@ -12677,7 +12677,7 @@ pub unsafe fn vst1q_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x16_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8_t) { static_assert_imm3!(LANE); *a = simd_extract(b, LANE as u32); @@ -12692,7 +12692,7 @@ pub unsafe fn vst1q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1_t) { static_assert!(LANE : i32 where LANE == 0); *a = simd_extract(b, LANE as u32); @@ -12707,7 +12707,7 @@ pub unsafe fn vst1_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x1_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2_t) { static_assert_imm1!(LANE); *a = simd_extract(b, LANE as u32); @@ -12722,7 +12722,7 @@ pub unsafe fn vst1q_lane_p64<const LANE: i32>(a: *mut p64, b: poly64x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_lane_f32<const LANE: i32>(a: *mut f32, b: float32x2_t) { static_assert_imm1!(LANE); *a = simd_extract(b, LANE as u32); @@ -12737,7 +12737,7 @@ pub unsafe fn vst1_lane_f32<const LANE: i32>(a: *mut f32, b: float32x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_lane_f32<const LANE: i32>(a: *mut f32, b: float32x4_t) { static_assert_imm2!(LANE); *a = simd_extract(b, LANE as u32); @@ -13543,7 +13543,7 @@ vst1q_s64_x4_(b.0, b.1, b.2, b.3, a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { vst1_s8_x2(transmute(a), transmute(b)) } @@ -13556,7 +13556,7 @@ pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { vst1_s16_x2(transmute(a), transmute(b)) } @@ -13569,7 +13569,7 @@ pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { vst1_s32_x2(transmute(a), transmute(b)) } @@ -13582,7 +13582,7 @@ pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { vst1_s64_x2(transmute(a), transmute(b)) } @@ -13595,7 +13595,7 @@ pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { vst1q_s8_x2(transmute(a), transmute(b)) } @@ -13608,7 +13608,7 @@ pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { vst1q_s16_x2(transmute(a), transmute(b)) } @@ -13621,7 +13621,7 @@ pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { vst1q_s32_x2(transmute(a), transmute(b)) } @@ -13634,7 +13634,7 @@ pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { vst1q_s64_x2(transmute(a), transmute(b)) } @@ -13647,7 +13647,7 @@ pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { vst1_s8_x3(transmute(a), transmute(b)) } @@ -13660,7 +13660,7 @@ pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { vst1_s16_x3(transmute(a), transmute(b)) } @@ -13673,7 +13673,7 @@ pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { vst1_s32_x3(transmute(a), transmute(b)) } @@ -13686,7 +13686,7 @@ pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) { vst1_s64_x3(transmute(a), transmute(b)) } @@ -13699,7 +13699,7 @@ pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { vst1q_s8_x3(transmute(a), transmute(b)) } @@ -13712,7 +13712,7 @@ pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { vst1q_s16_x3(transmute(a), transmute(b)) } @@ -13725,7 +13725,7 @@ pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { vst1q_s32_x3(transmute(a), transmute(b)) } @@ -13738,7 +13738,7 @@ pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { vst1q_s64_x3(transmute(a), transmute(b)) } @@ -13751,7 +13751,7 @@ pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { vst1_s8_x4(transmute(a), transmute(b)) } @@ -13764,7 +13764,7 @@ pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { vst1_s16_x4(transmute(a), transmute(b)) } @@ -13777,7 +13777,7 @@ pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { vst1_s32_x4(transmute(a), transmute(b)) } @@ -13790,7 +13790,7 @@ pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) { vst1_s64_x4(transmute(a), transmute(b)) } @@ -13803,7 +13803,7 @@ pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { vst1q_s8_x4(transmute(a), transmute(b)) } @@ -13816,7 +13816,7 @@ pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { vst1q_s16_x4(transmute(a), transmute(b)) } @@ -13829,7 +13829,7 @@ pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { vst1q_s32_x4(transmute(a), transmute(b)) } @@ -13842,7 +13842,7 @@ pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { vst1q_s64_x4(transmute(a), transmute(b)) } @@ -13855,7 +13855,7 @@ pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { vst1_s8_x2(transmute(a), transmute(b)) } @@ -13868,7 +13868,7 @@ pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { vst1_s8_x3(transmute(a), transmute(b)) } @@ -13881,7 +13881,7 @@ pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { vst1_s8_x4(transmute(a), transmute(b)) } @@ -13894,7 +13894,7 @@ pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { vst1q_s8_x2(transmute(a), transmute(b)) } @@ -13907,7 +13907,7 @@ pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { vst1q_s8_x3(transmute(a), transmute(b)) } @@ -13920,7 +13920,7 @@ pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { vst1q_s8_x4(transmute(a), transmute(b)) } @@ -13933,7 +13933,7 @@ pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { vst1_s16_x2(transmute(a), transmute(b)) } @@ -13946,7 +13946,7 @@ pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { vst1_s16_x3(transmute(a), transmute(b)) } @@ -13959,7 +13959,7 @@ pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { vst1_s16_x4(transmute(a), transmute(b)) } @@ -13972,7 +13972,7 @@ pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { vst1q_s16_x2(transmute(a), transmute(b)) } @@ -13985,7 +13985,7 @@ pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { vst1q_s16_x3(transmute(a), transmute(b)) } @@ -13998,7 +13998,7 @@ pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { vst1q_s16_x4(transmute(a), transmute(b)) } @@ -14011,7 +14011,7 @@ pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) { vst1_s64_x2(transmute(a), transmute(b)) } @@ -14024,7 +14024,7 @@ pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) { vst1_s64_x3(transmute(a), transmute(b)) } @@ -14037,7 +14037,7 @@ pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) { vst1_s64_x4(transmute(a), transmute(b)) } @@ -14050,7 +14050,7 @@ pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { vst1q_s64_x2(transmute(a), transmute(b)) } @@ -14063,7 +14063,7 @@ pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { vst1q_s64_x3(transmute(a), transmute(b)) } @@ -14076,7 +14076,7 @@ pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st1))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { vst1q_s64_x4(transmute(a), transmute(b)) } @@ -14518,7 +14518,7 @@ vst2_s64_(b.0, b.1, a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { transmute(vst2_s8(transmute(a), transmute(b))) } @@ -14531,7 +14531,7 @@ pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { transmute(vst2_s16(transmute(a), transmute(b))) } @@ -14544,7 +14544,7 @@ pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { transmute(vst2_s32(transmute(a), transmute(b))) } @@ -14557,7 +14557,7 @@ pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { transmute(vst2q_s8(transmute(a), transmute(b))) } @@ -14570,7 +14570,7 @@ pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { transmute(vst2q_s16(transmute(a), transmute(b))) } @@ -14583,7 +14583,7 @@ pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { transmute(vst2q_s32(transmute(a), transmute(b))) } @@ -14596,7 +14596,7 @@ pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { transmute(vst2_s8(transmute(a), transmute(b))) } @@ -14609,7 +14609,7 @@ pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { transmute(vst2_s16(transmute(a), transmute(b))) } @@ -14622,7 +14622,7 @@ pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { transmute(vst2q_s8(transmute(a), transmute(b))) } @@ -14635,7 +14635,7 @@ pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { transmute(vst2q_s16(transmute(a), transmute(b))) } @@ -14648,7 +14648,7 @@ pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) { transmute(vst2_s64(transmute(a), transmute(b))) } @@ -14661,7 +14661,7 @@ pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_p64(a: *mut p64, b: poly64x1x2_t) { transmute(vst2_s64(transmute(a), transmute(b))) } @@ -14926,7 +14926,7 @@ vst2q_lane_s32_(b.0, b.1, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8x2_t) { static_assert_imm3!(LANE); transmute(vst2_lane_s8::<LANE>(transmute(a), transmute(b))) @@ -14941,7 +14941,7 @@ pub unsafe fn vst2_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4x2_t) { static_assert_imm2!(LANE); transmute(vst2_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -14956,7 +14956,7 @@ pub unsafe fn vst2_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2x2_t) { static_assert_imm1!(LANE); transmute(vst2_lane_s32::<LANE>(transmute(a), transmute(b))) @@ -14971,7 +14971,7 @@ pub unsafe fn vst2_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8x2_t) { static_assert_imm3!(LANE); transmute(vst2q_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -14986,7 +14986,7 @@ pub unsafe fn vst2q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4x2_t) { static_assert_imm2!(LANE); transmute(vst2q_lane_s32::<LANE>(transmute(a), transmute(b))) @@ -15001,7 +15001,7 @@ pub unsafe fn vst2q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8x2_t) { static_assert_imm3!(LANE); transmute(vst2_lane_s8::<LANE>(transmute(a), transmute(b))) @@ -15016,7 +15016,7 @@ pub unsafe fn vst2_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4x2_t) { static_assert_imm2!(LANE); transmute(vst2_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -15031,7 +15031,7 @@ pub unsafe fn vst2_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4x2_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst2q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8x2_t) { static_assert_imm3!(LANE); transmute(vst2q_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -15350,7 +15350,7 @@ vst3_s64_(b.0, b.1, b.2, a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { transmute(vst3_s8(transmute(a), transmute(b))) } @@ -15363,7 +15363,7 @@ pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { transmute(vst3_s16(transmute(a), transmute(b))) } @@ -15376,7 +15376,7 @@ pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { transmute(vst3_s32(transmute(a), transmute(b))) } @@ -15389,7 +15389,7 @@ pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { transmute(vst3q_s8(transmute(a), transmute(b))) } @@ -15402,7 +15402,7 @@ pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { transmute(vst3q_s16(transmute(a), transmute(b))) } @@ -15415,7 +15415,7 @@ pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { transmute(vst3q_s32(transmute(a), transmute(b))) } @@ -15428,7 +15428,7 @@ pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { transmute(vst3_s8(transmute(a), transmute(b))) } @@ -15441,7 +15441,7 @@ pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { transmute(vst3_s16(transmute(a), transmute(b))) } @@ -15454,7 +15454,7 @@ pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { transmute(vst3q_s8(transmute(a), transmute(b))) } @@ -15467,7 +15467,7 @@ pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { transmute(vst3q_s16(transmute(a), transmute(b))) } @@ -15480,7 +15480,7 @@ pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) { transmute(vst3_s64(transmute(a), transmute(b))) } @@ -15493,7 +15493,7 @@ pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_p64(a: *mut p64, b: poly64x1x3_t) { transmute(vst3_s64(transmute(a), transmute(b))) } @@ -15758,7 +15758,7 @@ vst3q_lane_s32_(b.0, b.1, b.2, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8x3_t) { static_assert_imm3!(LANE); transmute(vst3_lane_s8::<LANE>(transmute(a), transmute(b))) @@ -15773,7 +15773,7 @@ pub unsafe fn vst3_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4x3_t) { static_assert_imm2!(LANE); transmute(vst3_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -15788,7 +15788,7 @@ pub unsafe fn vst3_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2x3_t) { static_assert_imm1!(LANE); transmute(vst3_lane_s32::<LANE>(transmute(a), transmute(b))) @@ -15803,7 +15803,7 @@ pub unsafe fn vst3_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8x3_t) { static_assert_imm3!(LANE); transmute(vst3q_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -15818,7 +15818,7 @@ pub unsafe fn vst3q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4x3_t) { static_assert_imm2!(LANE); transmute(vst3q_lane_s32::<LANE>(transmute(a), transmute(b))) @@ -15833,7 +15833,7 @@ pub unsafe fn vst3q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8x3_t) { static_assert_imm3!(LANE); transmute(vst3_lane_s8::<LANE>(transmute(a), transmute(b))) @@ -15848,7 +15848,7 @@ pub unsafe fn vst3_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4x3_t) { static_assert_imm2!(LANE); transmute(vst3_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -15863,7 +15863,7 @@ pub unsafe fn vst3_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4x3_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst3q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8x3_t) { static_assert_imm3!(LANE); transmute(vst3q_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -16182,7 +16182,7 @@ vst4_s64_(b.0, b.1, b.2, b.3, a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { transmute(vst4_s8(transmute(a), transmute(b))) } @@ -16195,7 +16195,7 @@ pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { transmute(vst4_s16(transmute(a), transmute(b))) } @@ -16208,7 +16208,7 @@ pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { transmute(vst4_s32(transmute(a), transmute(b))) } @@ -16221,7 +16221,7 @@ pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { transmute(vst4q_s8(transmute(a), transmute(b))) } @@ -16234,7 +16234,7 @@ pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { transmute(vst4q_s16(transmute(a), transmute(b))) } @@ -16247,7 +16247,7 @@ pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { transmute(vst4q_s32(transmute(a), transmute(b))) } @@ -16260,7 +16260,7 @@ pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { transmute(vst4_s8(transmute(a), transmute(b))) } @@ -16273,7 +16273,7 @@ pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { transmute(vst4_s16(transmute(a), transmute(b))) } @@ -16286,7 +16286,7 @@ pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { transmute(vst4q_s8(transmute(a), transmute(b))) } @@ -16299,7 +16299,7 @@ pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { transmute(vst4q_s16(transmute(a), transmute(b))) } @@ -16312,7 +16312,7 @@ pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) { transmute(vst4_s64(transmute(a), transmute(b))) } @@ -16325,7 +16325,7 @@ pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_p64(a: *mut p64, b: poly64x1x4_t) { transmute(vst4_s64(transmute(a), transmute(b))) } @@ -16590,7 +16590,7 @@ vst4q_lane_s32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8x4_t) { static_assert_imm3!(LANE); transmute(vst4_lane_s8::<LANE>(transmute(a), transmute(b))) @@ -16605,7 +16605,7 @@ pub unsafe fn vst4_lane_u8<const LANE: i32>(a: *mut u8, b: uint8x8x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4x4_t) { static_assert_imm2!(LANE); transmute(vst4_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -16620,7 +16620,7 @@ pub unsafe fn vst4_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x4x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2x4_t) { static_assert_imm1!(LANE); transmute(vst4_lane_s32::<LANE>(transmute(a), transmute(b))) @@ -16635,7 +16635,7 @@ pub unsafe fn vst4_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x2x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8x4_t) { static_assert_imm3!(LANE); transmute(vst4q_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -16650,7 +16650,7 @@ pub unsafe fn vst4q_lane_u16<const LANE: i32>(a: *mut u16, b: uint16x8x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4x4_t) { static_assert_imm2!(LANE); transmute(vst4q_lane_s32::<LANE>(transmute(a), transmute(b))) @@ -16665,7 +16665,7 @@ pub unsafe fn vst4q_lane_u32<const LANE: i32>(a: *mut u32, b: uint32x4x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8x4_t) { static_assert_imm3!(LANE); transmute(vst4_lane_s8::<LANE>(transmute(a), transmute(b))) @@ -16680,7 +16680,7 @@ pub unsafe fn vst4_lane_p8<const LANE: i32>(a: *mut p8, b: poly8x8x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4x4_t) { static_assert_imm2!(LANE); transmute(vst4_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -16695,7 +16695,7 @@ pub unsafe fn vst4_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x4x4_t) { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vst4q_lane_p16<const LANE: i32>(a: *mut p16, b: poly16x8x4_t) { static_assert_imm3!(LANE); transmute(vst4q_lane_s16::<LANE>(transmute(a), transmute(b))) @@ -16783,7 +16783,7 @@ vst4q_lane_f32_(b.0, b.1, b.2, b.3, LANE as i64, a as _) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_mul(a, b) } @@ -16796,7 +16796,7 @@ pub unsafe fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_mul(a, b) } @@ -16809,7 +16809,7 @@ pub unsafe fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_mul(a, b) } @@ -16822,7 +16822,7 @@ pub unsafe fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_mul(a, b) } @@ -16835,7 +16835,7 @@ pub unsafe fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_mul(a, b) } @@ -16848,7 +16848,7 @@ pub unsafe fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_mul(a, b) } @@ -16861,7 +16861,7 @@ pub unsafe fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_mul(a, b) } @@ -16874,7 +16874,7 @@ pub unsafe fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_mul(a, b) } @@ -16887,7 +16887,7 @@ pub unsafe fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_mul(a, b) } @@ -16900,7 +16900,7 @@ pub unsafe fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_mul(a, b) } @@ -16913,7 +16913,7 @@ pub unsafe fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_mul(a, b) } @@ -16926,7 +16926,7 @@ pub unsafe fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_mul(a, b) } @@ -16939,7 +16939,7 @@ pub unsafe fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(pmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16958,7 +16958,7 @@ vmul_p8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(pmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -16977,7 +16977,7 @@ vmulq_p8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_mul(a, b) } @@ -16990,7 +16990,7 @@ pub unsafe fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_mul(a, b) } @@ -17003,7 +17003,7 @@ pub unsafe fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { simd_mul(a, vdup_n_s16(b)) } @@ -17016,7 +17016,7 @@ pub unsafe fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { simd_mul(a, vdupq_n_s16(b)) } @@ -17029,7 +17029,7 @@ pub unsafe fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { simd_mul(a, vdup_n_s32(b)) } @@ -17042,7 +17042,7 @@ pub unsafe fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { simd_mul(a, vdupq_n_s32(b)) } @@ -17055,7 +17055,7 @@ pub unsafe fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { simd_mul(a, vdup_n_u16(b)) } @@ -17068,7 +17068,7 @@ pub unsafe fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { simd_mul(a, vdupq_n_u16(b)) } @@ -17081,7 +17081,7 @@ pub unsafe fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { simd_mul(a, vdup_n_u32(b)) } @@ -17094,7 +17094,7 @@ pub unsafe fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { simd_mul(a, vdupq_n_u32(b)) } @@ -17107,7 +17107,7 @@ pub unsafe fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { simd_mul(a, vdup_n_f32(b)) } @@ -17120,7 +17120,7 @@ pub unsafe fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { simd_mul(a, vdupq_n_f32(b)) } @@ -17134,7 +17134,7 @@ pub unsafe fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17149,7 +17149,7 @@ pub unsafe fn vmul_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> int16x4_t { static_assert_imm3!(LANE); simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17164,7 +17164,7 @@ pub unsafe fn vmul_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> int #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17179,7 +17179,7 @@ pub unsafe fn vmulq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); simd_mul(a, simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17194,7 +17194,7 @@ pub unsafe fn vmulq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> in #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE); simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -17209,7 +17209,7 @@ pub unsafe fn vmul_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int3 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> int32x2_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -17224,7 +17224,7 @@ pub unsafe fn vmul_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> int #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t { static_assert_imm1!(LANE); simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17239,7 +17239,7 @@ pub unsafe fn vmulq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17254,7 +17254,7 @@ pub unsafe fn vmulq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> in #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17269,7 +17269,7 @@ pub unsafe fn vmul_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t) -> ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x8_t) -> uint16x4_t { static_assert_imm3!(LANE); simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17284,7 +17284,7 @@ pub unsafe fn vmul_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x8_t) -> u #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> uint16x8_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17299,7 +17299,7 @@ pub unsafe fn vmulq_lane_u16<const LANE: i32>(a: uint16x8_t, b: uint16x4_t) -> u #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert_imm3!(LANE); simd_mul(a, simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17314,7 +17314,7 @@ pub unsafe fn vmulq_laneq_u16<const LANE: i32>(a: uint16x8_t, b: uint16x8_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert_imm1!(LANE); simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -17329,7 +17329,7 @@ pub unsafe fn vmul_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t) -> ui #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x4_t) -> uint32x2_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -17344,7 +17344,7 @@ pub unsafe fn vmul_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x4_t) -> u #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> uint32x4_t { static_assert_imm1!(LANE); simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17359,7 +17359,7 @@ pub unsafe fn vmulq_lane_u32<const LANE: i32>(a: uint32x4_t, b: uint32x2_t) -> u #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mul, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17374,7 +17374,7 @@ pub unsafe fn vmulq_laneq_u32<const LANE: i32>(a: uint32x4_t, b: uint32x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) -> float32x2_t { static_assert_imm1!(LANE); simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -17389,7 +17389,7 @@ pub unsafe fn vmul_lane_f32<const LANE: i32>(a: float32x2_t, b: float32x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmul_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -> float32x2_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -17404,7 +17404,7 @@ pub unsafe fn vmul_laneq_f32<const LANE: i32>(a: float32x2_t, b: float32x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -> float32x4_t { static_assert_imm1!(LANE); simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17419,7 +17419,7 @@ pub unsafe fn vmulq_lane_f32<const LANE: i32>(a: float32x4_t, b: float32x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmulq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) -> float32x4_t { static_assert_imm2!(LANE); simd_mul(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17433,7 +17433,7 @@ pub unsafe fn vmulq_laneq_f32<const LANE: i32>(a: float32x4_t, b: float32x4_t) - #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17452,7 +17452,7 @@ vmull_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17471,7 +17471,7 @@ vmull_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17490,7 +17490,7 @@ vmull_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17509,7 +17509,7 @@ vmull_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17528,7 +17528,7 @@ vmull_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17547,7 +17547,7 @@ vmull_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.p8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(pmull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17566,7 +17566,7 @@ vmull_p8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { vmull_s16(a, vdup_n_s16(b)) } @@ -17579,7 +17579,7 @@ pub unsafe fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { vmull_s32(a, vdup_n_s32(b)) } @@ -17592,7 +17592,7 @@ pub unsafe fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { vmull_u16(a, vdup_n_u16(b)) } @@ -17605,7 +17605,7 @@ pub unsafe fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { vmull_u32(a, vdup_n_u32(b)) } @@ -17619,7 +17619,7 @@ pub unsafe fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int32x4_t { static_assert_imm2!(LANE); vmull_s16(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17634,7 +17634,7 @@ pub unsafe fn vmull_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> int32x4_t { static_assert_imm3!(LANE); vmull_s16(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17649,7 +17649,7 @@ pub unsafe fn vmull_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> in #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int64x2_t { static_assert_imm1!(LANE); vmull_s32(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -17664,7 +17664,7 @@ pub unsafe fn vmull_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smull, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> int64x2_t { static_assert_imm2!(LANE); vmull_s32(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -17679,7 +17679,7 @@ pub unsafe fn vmull_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> in #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { static_assert_imm2!(LANE); vmull_u16(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17694,7 +17694,7 @@ pub unsafe fn vmull_lane_u16<const LANE: i32>(a: uint16x4_t, b: uint16x4_t) -> u #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x8_t) -> uint32x4_t { static_assert_imm3!(LANE); vmull_u16(a, simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32])) @@ -17709,7 +17709,7 @@ pub unsafe fn vmull_laneq_u16<const LANE: i32>(a: uint16x4_t, b: uint16x8_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { static_assert_imm1!(LANE); vmull_u32(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -17724,7 +17724,7 @@ pub unsafe fn vmull_lane_u32<const LANE: i32>(a: uint32x2_t, b: uint32x2_t) -> u #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umull, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmull_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x4_t) -> uint64x2_t { static_assert_imm2!(LANE); vmull_u32(a, simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32])) @@ -17738,7 +17738,7 @@ pub unsafe fn vmull_laneq_u32<const LANE: i32>(a: uint32x2_t, b: uint32x4_t) -> #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17757,7 +17757,7 @@ vfma_f32_(b, c, a) #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -17776,7 +17776,7 @@ vfmaq_f32_(b, c, a) #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vfma_f32(a, b, vdup_n_f32_vfp4(c)) } @@ -17789,7 +17789,7 @@ pub unsafe fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmla))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vfmaq_f32(a, b, vdupq_n_f32_vfp4(c)) } @@ -17802,7 +17802,7 @@ pub unsafe fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { let b: float32x2_t = simd_neg(b); vfma_f32(a, b, c) @@ -17816,7 +17816,7 @@ pub unsafe fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { let b: float32x4_t = simd_neg(b); vfmaq_f32(a, b, c) @@ -17830,7 +17830,7 @@ pub unsafe fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { vfms_f32(a, b, vdup_n_f32_vfp4(c)) } @@ -17843,7 +17843,7 @@ pub unsafe fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmls))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { vfmsq_f32(a, b, vdupq_n_f32_vfp4(c)) } @@ -17856,7 +17856,7 @@ pub unsafe fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { simd_sub(a, b) } @@ -17869,7 +17869,7 @@ pub unsafe fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { simd_sub(a, b) } @@ -17882,7 +17882,7 @@ pub unsafe fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { simd_sub(a, b) } @@ -17895,7 +17895,7 @@ pub unsafe fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { simd_sub(a, b) } @@ -17908,7 +17908,7 @@ pub unsafe fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { simd_sub(a, b) } @@ -17921,7 +17921,7 @@ pub unsafe fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { simd_sub(a, b) } @@ -17934,7 +17934,7 @@ pub unsafe fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_sub(a, b) } @@ -17947,7 +17947,7 @@ pub unsafe fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_sub(a, b) } @@ -17960,7 +17960,7 @@ pub unsafe fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_sub(a, b) } @@ -17973,7 +17973,7 @@ pub unsafe fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_sub(a, b) } @@ -17986,7 +17986,7 @@ pub unsafe fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_sub(a, b) } @@ -17999,7 +17999,7 @@ pub unsafe fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { simd_sub(a, b) } @@ -18012,7 +18012,7 @@ pub unsafe fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { simd_sub(a, b) } @@ -18025,7 +18025,7 @@ pub unsafe fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { simd_sub(a, b) } @@ -18038,7 +18038,7 @@ pub unsafe fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_sub(a, b) } @@ -18051,7 +18051,7 @@ pub unsafe fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { simd_sub(a, b) } @@ -18064,7 +18064,7 @@ pub unsafe fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { simd_sub(a, b) } @@ -18077,7 +18077,7 @@ pub unsafe fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { simd_sub(a, b) } @@ -18090,7 +18090,7 @@ pub unsafe fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { simd_xor(a, b) } @@ -18103,7 +18103,7 @@ pub unsafe fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { simd_xor(a, b) } @@ -18116,7 +18116,7 @@ pub unsafe fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { simd_xor(a, b) } @@ -18129,7 +18129,7 @@ pub unsafe fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { simd_xor(a, b) } @@ -18142,7 +18142,7 @@ pub unsafe fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { simd_xor(a, b) } @@ -18155,7 +18155,7 @@ pub unsafe fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { simd_xor(a, b) } @@ -18168,7 +18168,7 @@ pub unsafe fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vaddq_p128(a: p128, b: p128) -> p128 { a ^ b } @@ -18181,7 +18181,7 @@ pub unsafe fn vaddq_p128(a: p128, b: p128) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { let c: i16x8 = i16x8::new(8, 8, 8, 8, 8, 8, 8, 8); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -18195,7 +18195,7 @@ pub unsafe fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { let c: i32x4 = i32x4::new(16, 16, 16, 16); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -18209,7 +18209,7 @@ pub unsafe fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { let c: i64x2 = i64x2::new(32, 32); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -18223,7 +18223,7 @@ pub unsafe fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { let c: u16x8 = u16x8::new(8, 8, 8, 8, 8, 8, 8, 8); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -18237,7 +18237,7 @@ pub unsafe fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { let c: u32x4 = u32x4::new(16, 16, 16, 16); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -18251,7 +18251,7 @@ pub unsafe fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { let c: u64x2 = u64x2::new(32, 32); simd_cast(simd_shr(simd_sub(a, b), transmute(c))) @@ -18265,7 +18265,7 @@ pub unsafe fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { let d: int8x8_t = vsubhn_s16(b, c); simd_shuffle16!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -18279,7 +18279,7 @@ pub unsafe fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x1 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { let d: int16x4_t = vsubhn_s32(b, c); simd_shuffle8!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -18293,7 +18293,7 @@ pub unsafe fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { let d: int32x2_t = vsubhn_s64(b, c); simd_shuffle4!(a, d, [0, 1, 2, 3]) @@ -18307,7 +18307,7 @@ pub unsafe fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t { let d: uint8x8_t = vsubhn_u16(b, c); simd_shuffle16!(a, d, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) @@ -18321,7 +18321,7 @@ pub unsafe fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uin #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_t { let d: uint16x4_t = vsubhn_u32(b, c); simd_shuffle8!(a, d, [0, 1, 2, 3, 4, 5, 6, 7]) @@ -18335,7 +18335,7 @@ pub unsafe fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> ui #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(subhn2))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_t { let d: uint32x2_t = vsubhn_u64(b, c); simd_shuffle4!(a, d, [0, 1, 2, 3]) @@ -18349,7 +18349,7 @@ pub unsafe fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> ui #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18368,7 +18368,7 @@ vhsub_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18387,7 +18387,7 @@ vhsubq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18406,7 +18406,7 @@ vhsub_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18425,7 +18425,7 @@ vhsubq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18444,7 +18444,7 @@ vhsub_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uhsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18463,7 +18463,7 @@ vhsubq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18482,7 +18482,7 @@ vhsub_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18501,7 +18501,7 @@ vhsubq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18520,7 +18520,7 @@ vhsub_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18539,7 +18539,7 @@ vhsubq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18558,7 +18558,7 @@ vhsub_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shsub))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18577,7 +18577,7 @@ vhsubq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubw))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { simd_sub(a, simd_cast(b)) } @@ -18590,7 +18590,7 @@ pub unsafe fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubw))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { simd_sub(a, simd_cast(b)) } @@ -18603,7 +18603,7 @@ pub unsafe fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubw))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { simd_sub(a, simd_cast(b)) } @@ -18616,7 +18616,7 @@ pub unsafe fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubw))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { simd_sub(a, simd_cast(b)) } @@ -18629,7 +18629,7 @@ pub unsafe fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubw))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { simd_sub(a, simd_cast(b)) } @@ -18642,7 +18642,7 @@ pub unsafe fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubw))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { simd_sub(a, simd_cast(b)) } @@ -18655,7 +18655,7 @@ pub unsafe fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { let c: int16x8_t = simd_cast(a); let d: int16x8_t = simd_cast(b); @@ -18670,7 +18670,7 @@ pub unsafe fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { let c: int32x4_t = simd_cast(a); let d: int32x4_t = simd_cast(b); @@ -18685,7 +18685,7 @@ pub unsafe fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssubl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { let c: int64x2_t = simd_cast(a); let d: int64x2_t = simd_cast(b); @@ -18700,7 +18700,7 @@ pub unsafe fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { let c: uint16x8_t = simd_cast(a); let d: uint16x8_t = simd_cast(b); @@ -18715,7 +18715,7 @@ pub unsafe fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { let c: uint32x4_t = simd_cast(a); let d: uint32x4_t = simd_cast(b); @@ -18730,7 +18730,7 @@ pub unsafe fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usubl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { let c: uint64x2_t = simd_cast(a); let d: uint64x2_t = simd_cast(b); @@ -18745,7 +18745,7 @@ pub unsafe fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18764,7 +18764,7 @@ vmax_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18783,7 +18783,7 @@ vmaxq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18802,7 +18802,7 @@ vmax_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18821,7 +18821,7 @@ vmaxq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18840,7 +18840,7 @@ vmax_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18859,7 +18859,7 @@ vmaxq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18878,7 +18878,7 @@ vmax_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18897,7 +18897,7 @@ vmaxq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18916,7 +18916,7 @@ vmax_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18935,7 +18935,7 @@ vmaxq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18954,7 +18954,7 @@ vmax_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18973,7 +18973,7 @@ vmaxq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -18992,7 +18992,7 @@ vmax_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmax))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19011,7 +19011,7 @@ vmaxq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmaxnm))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19030,7 +19030,7 @@ vmaxnm_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmaxnm))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19049,7 +19049,7 @@ vmaxnmq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19068,7 +19068,7 @@ vmin_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19087,7 +19087,7 @@ vminq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19106,7 +19106,7 @@ vmin_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19125,7 +19125,7 @@ vminq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19144,7 +19144,7 @@ vmin_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(smin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19163,7 +19163,7 @@ vminq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19182,7 +19182,7 @@ vmin_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19201,7 +19201,7 @@ vminq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19220,7 +19220,7 @@ vmin_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19239,7 +19239,7 @@ vminq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19258,7 +19258,7 @@ vmin_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(umin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19277,7 +19277,7 @@ vminq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19296,7 +19296,7 @@ vmin_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fmin))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19315,7 +19315,7 @@ vminq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fminnm))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19334,7 +19334,7 @@ vminnm_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(fminnm))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19353,7 +19353,7 @@ vminnmq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(faddp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19372,7 +19372,7 @@ vpadd_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19391,7 +19391,7 @@ vqdmull_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19410,7 +19410,7 @@ vqdmull_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { vqdmull_s16(a, vdup_n_s16(b)) } @@ -19423,7 +19423,7 @@ pub unsafe fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { vqdmull_s32(a, vdup_n_s32(b)) } @@ -19437,7 +19437,7 @@ pub unsafe fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_lane_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int32x4_t { static_assert_imm2!(N); let b: int16x4_t = simd_shuffle4!(b, b, <const N: i32> [N as u32, N as u32, N as u32, N as u32]); @@ -19453,7 +19453,7 @@ pub unsafe fn vqdmull_lane_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int3 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmull, N = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmull_lane_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int64x2_t { static_assert_imm1!(N); let b: int32x2_t = simd_shuffle2!(b, b, <const N: i32> [N as u32, N as u32]); @@ -19468,7 +19468,7 @@ pub unsafe fn vqdmull_lane_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int6 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { vqaddq_s32(a, vqdmull_s16(b, c)) } @@ -19481,7 +19481,7 @@ pub unsafe fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { vqaddq_s64(a, vqdmull_s32(b, c)) } @@ -19494,7 +19494,7 @@ pub unsafe fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vqaddq_s32(a, vqdmull_n_s16(b, c)) } @@ -19507,7 +19507,7 @@ pub unsafe fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vqaddq_s64(a, vqdmull_n_s32(b, c)) } @@ -19521,7 +19521,7 @@ pub unsafe fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal, N = 2))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_lane_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { static_assert_imm2!(N); vqaddq_s32(a, vqdmull_lane_s16::<N>(b, c)) @@ -19536,7 +19536,7 @@ pub unsafe fn vqdmlal_lane_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlal, N = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlal_lane_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { static_assert_imm1!(N); vqaddq_s64(a, vqdmull_lane_s32::<N>(b, c)) @@ -19550,7 +19550,7 @@ pub unsafe fn vqdmlal_lane_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int3 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { vqsubq_s32(a, vqdmull_s16(b, c)) } @@ -19563,7 +19563,7 @@ pub unsafe fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { vqsubq_s64(a, vqdmull_s32(b, c)) } @@ -19576,7 +19576,7 @@ pub unsafe fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { vqsubq_s32(a, vqdmull_n_s16(b, c)) } @@ -19589,7 +19589,7 @@ pub unsafe fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { vqsubq_s64(a, vqdmull_n_s32(b, c)) } @@ -19603,7 +19603,7 @@ pub unsafe fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl, N = 2))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_lane_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { static_assert_imm2!(N); vqsubq_s32(a, vqdmull_lane_s16::<N>(b, c)) @@ -19618,7 +19618,7 @@ pub unsafe fn vqdmlsl_lane_s16<const N: i32>(a: int32x4_t, b: int16x4_t, c: int1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmlsl, N = 1))] #[rustc_legacy_const_generics(3)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmlsl_lane_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { static_assert_imm1!(N); vqsubq_s64(a, vqdmull_lane_s32::<N>(b, c)) @@ -19632,7 +19632,7 @@ pub unsafe fn vqdmlsl_lane_s32<const N: i32>(a: int64x2_t, b: int32x2_t, c: int3 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19651,7 +19651,7 @@ vqdmulh_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19670,7 +19670,7 @@ vqdmulhq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19689,7 +19689,7 @@ vqdmulh_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19708,7 +19708,7 @@ vqdmulhq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { let b: int16x4_t = vdup_n_s16(b); vqdmulh_s16(a, b) @@ -19722,7 +19722,7 @@ pub unsafe fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { let b: int32x2_t = vdup_n_s32(b); vqdmulh_s32(a, b) @@ -19736,7 +19736,7 @@ pub unsafe fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { let b: int16x8_t = vdupq_n_s16(b); vqdmulhq_s16(a, b) @@ -19750,7 +19750,7 @@ pub unsafe fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { let b: int32x4_t = vdupq_n_s32(b); vqdmulhq_s32(a, b) @@ -19765,7 +19765,7 @@ pub unsafe fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); vqdmulhq_s16(a, vdupq_n_s16(simd_extract(b, LANE as u32))) @@ -19780,7 +19780,7 @@ pub unsafe fn vqdmulhq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> int16x4_t { static_assert_imm3!(LANE); vqdmulh_s16(a, vdup_n_s16(simd_extract(b, LANE as u32))) @@ -19795,7 +19795,7 @@ pub unsafe fn vqdmulh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulhq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); vqdmulhq_s32(a, vdupq_n_s32(simd_extract(b, LANE as u32))) @@ -19810,7 +19810,7 @@ pub unsafe fn vqdmulhq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqdmulh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> int32x2_t { static_assert_imm2!(LANE); vqdmulh_s32(a, vdup_n_s32(simd_extract(b, LANE as u32))) @@ -19824,7 +19824,7 @@ pub unsafe fn vqdmulh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovn_s16(a: int16x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19843,7 +19843,7 @@ vqmovn_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovn_s32(a: int32x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19862,7 +19862,7 @@ vqmovn_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovn_s64(a: int64x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19881,7 +19881,7 @@ vqmovn_s64_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqxtn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19900,7 +19900,7 @@ vqmovn_u16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqxtn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19919,7 +19919,7 @@ vqmovn_u32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqxtn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19938,7 +19938,7 @@ vqmovn_u64_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtun))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19957,7 +19957,7 @@ vqmovun_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtun))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19976,7 +19976,7 @@ vqmovun_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqxtun))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -19995,7 +19995,7 @@ vqmovun_s64_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20014,7 +20014,7 @@ vqrdmulh_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20033,7 +20033,7 @@ vqrdmulhq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20052,7 +20052,7 @@ vqrdmulh_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20071,7 +20071,7 @@ vqrdmulhq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { vqrdmulh_s16(a, vdup_n_s16(b)) } @@ -20084,7 +20084,7 @@ pub unsafe fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { vqrdmulhq_s16(a, vdupq_n_s16(b)) } @@ -20097,7 +20097,7 @@ pub unsafe fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { vqrdmulh_s32(a, vdup_n_s32(b)) } @@ -20110,7 +20110,7 @@ pub unsafe fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { vqrdmulhq_s32(a, vdupq_n_s32(b)) } @@ -20124,7 +20124,7 @@ pub unsafe fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); let b: int16x4_t = simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -20140,7 +20140,7 @@ pub unsafe fn vqrdmulh_lane_s16<const LANE: i32>(a: int16x4_t, b: int16x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> int16x4_t { static_assert_imm3!(LANE); let b: int16x4_t = simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -20156,7 +20156,7 @@ pub unsafe fn vqrdmulh_laneq_s16<const LANE: i32>(a: int16x4_t, b: int16x8_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> int16x8_t { static_assert_imm2!(LANE); let b: int16x8_t = simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -20172,7 +20172,7 @@ pub unsafe fn vqrdmulhq_lane_s16<const LANE: i32>(a: int16x8_t, b: int16x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); let b: int16x8_t = simd_shuffle8!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -20188,7 +20188,7 @@ pub unsafe fn vqrdmulhq_laneq_s16<const LANE: i32>(a: int16x8_t, b: int16x8_t) - #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE); let b: int32x2_t = simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]); @@ -20204,7 +20204,7 @@ pub unsafe fn vqrdmulh_lane_s32<const LANE: i32>(a: int32x2_t, b: int32x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> int32x2_t { static_assert_imm2!(LANE); let b: int32x2_t = simd_shuffle2!(b, b, <const LANE: i32> [LANE as u32, LANE as u32]); @@ -20220,7 +20220,7 @@ pub unsafe fn vqrdmulh_laneq_s32<const LANE: i32>(a: int32x2_t, b: int32x4_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> int32x4_t { static_assert_imm1!(LANE); let b: int32x4_t = simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -20236,7 +20236,7 @@ pub unsafe fn vqrdmulhq_lane_s32<const LANE: i32>(a: int32x4_t, b: int32x2_t) -> #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrdmulhq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); let b: int32x4_t = simd_shuffle4!(b, b, <const LANE: i32> [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -20251,7 +20251,7 @@ pub unsafe fn vqrdmulhq_laneq_s32<const LANE: i32>(a: int32x4_t, b: int32x4_t) - #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20270,7 +20270,7 @@ vqrshl_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20289,7 +20289,7 @@ vqrshlq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20308,7 +20308,7 @@ vqrshl_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20327,7 +20327,7 @@ vqrshlq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20346,7 +20346,7 @@ vqrshl_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20365,7 +20365,7 @@ vqrshlq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20384,7 +20384,7 @@ vqrshl_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20403,7 +20403,7 @@ vqrshlq_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20422,7 +20422,7 @@ vqrshl_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20441,7 +20441,7 @@ vqrshlq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20460,7 +20460,7 @@ vqrshl_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20479,7 +20479,7 @@ vqrshlq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20498,7 +20498,7 @@ vqrshl_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20517,7 +20517,7 @@ vqrshlq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20536,7 +20536,7 @@ vqrshl_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqrshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20888,7 +20888,7 @@ vqrshrun_n_s64_(a, N) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20907,7 +20907,7 @@ vqshl_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20926,7 +20926,7 @@ vqshlq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20945,7 +20945,7 @@ vqshl_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20964,7 +20964,7 @@ vqshlq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -20983,7 +20983,7 @@ vqshl_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21002,7 +21002,7 @@ vqshlq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21021,7 +21021,7 @@ vqshl_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21040,7 +21040,7 @@ vqshlq_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21059,7 +21059,7 @@ vqshl_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21078,7 +21078,7 @@ vqshlq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21097,7 +21097,7 @@ vqshl_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21116,7 +21116,7 @@ vqshlq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21135,7 +21135,7 @@ vqshl_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21154,7 +21154,7 @@ vqshlq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21173,7 +21173,7 @@ vqshl_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -21193,7 +21193,7 @@ vqshlq_u64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t { static_assert_imm3!(N); vqshl_s8(a, vdup_n_s8(N as _)) @@ -21208,7 +21208,7 @@ pub unsafe fn vqshl_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t { static_assert_imm3!(N); vqshlq_s8(a, vdupq_n_s8(N as _)) @@ -21223,7 +21223,7 @@ pub unsafe fn vqshlq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t { static_assert_imm4!(N); vqshl_s16(a, vdup_n_s16(N as _)) @@ -21238,7 +21238,7 @@ pub unsafe fn vqshl_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t { static_assert_imm4!(N); vqshlq_s16(a, vdupq_n_s16(N as _)) @@ -21253,7 +21253,7 @@ pub unsafe fn vqshlq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t { static_assert_imm5!(N); vqshl_s32(a, vdup_n_s32(N as _)) @@ -21268,7 +21268,7 @@ pub unsafe fn vqshl_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t { static_assert_imm5!(N); vqshlq_s32(a, vdupq_n_s32(N as _)) @@ -21283,7 +21283,7 @@ pub unsafe fn vqshlq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t { static_assert_imm6!(N); vqshl_s64(a, vdup_n_s64(N as _)) @@ -21298,7 +21298,7 @@ pub unsafe fn vqshl_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t { static_assert_imm6!(N); vqshlq_s64(a, vdupq_n_s64(N as _)) @@ -21313,7 +21313,7 @@ pub unsafe fn vqshlq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t { static_assert_imm3!(N); vqshl_u8(a, vdup_n_s8(N as _)) @@ -21328,7 +21328,7 @@ pub unsafe fn vqshl_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t { static_assert_imm3!(N); vqshlq_u8(a, vdupq_n_s8(N as _)) @@ -21343,7 +21343,7 @@ pub unsafe fn vqshlq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t { static_assert_imm4!(N); vqshl_u16(a, vdup_n_s16(N as _)) @@ -21358,7 +21358,7 @@ pub unsafe fn vqshl_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t { static_assert_imm4!(N); vqshlq_u16(a, vdupq_n_s16(N as _)) @@ -21373,7 +21373,7 @@ pub unsafe fn vqshlq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t { static_assert_imm5!(N); vqshl_u32(a, vdup_n_s32(N as _)) @@ -21388,7 +21388,7 @@ pub unsafe fn vqshl_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t { static_assert_imm5!(N); vqshlq_u32(a, vdupq_n_s32(N as _)) @@ -21403,7 +21403,7 @@ pub unsafe fn vqshlq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshl_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t { static_assert_imm6!(N); vqshl_u64(a, vdup_n_s64(N as _)) @@ -21418,7 +21418,7 @@ pub unsafe fn vqshl_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqshlq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t { static_assert_imm6!(N); vqshlq_u64(a, vdupq_n_s64(N as _)) @@ -22061,7 +22061,7 @@ vqshrun_n_s64_(a, N) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrte))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22080,7 +22080,7 @@ vrsqrte_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrte))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22099,7 +22099,7 @@ vrsqrteq_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursqrte))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22118,7 +22118,7 @@ vrsqrte_u32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursqrte))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22137,7 +22137,7 @@ vrsqrteq_u32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrts))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22156,7 +22156,7 @@ vrsqrts_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frsqrts))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22175,7 +22175,7 @@ vrsqrtsq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecpe))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrecpe_f32(a: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22194,7 +22194,7 @@ vrecpe_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecpe))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22213,7 +22213,7 @@ vrecpeq_f32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urecpe))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22232,7 +22232,7 @@ vrecpe_u32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urecpe))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22251,7 +22251,7 @@ vrecpeq_u32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecps))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22270,7 +22270,7 @@ vrecps_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(frecps))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -22289,7 +22289,7 @@ vrecpsq_f32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { transmute(a) } @@ -22302,7 +22302,7 @@ pub unsafe fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { transmute(a) } @@ -22315,7 +22315,7 @@ pub unsafe fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { transmute(a) } @@ -22328,7 +22328,7 @@ pub unsafe fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { transmute(a) } @@ -22341,7 +22341,7 @@ pub unsafe fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { transmute(a) } @@ -22354,7 +22354,7 @@ pub unsafe fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { transmute(a) } @@ -22367,7 +22367,7 @@ pub unsafe fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { transmute(a) } @@ -22380,7 +22380,7 @@ pub unsafe fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { transmute(a) } @@ -22393,7 +22393,7 @@ pub unsafe fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { transmute(a) } @@ -22406,7 +22406,7 @@ pub unsafe fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { transmute(a) } @@ -22419,7 +22419,7 @@ pub unsafe fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { transmute(a) } @@ -22432,7 +22432,7 @@ pub unsafe fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { transmute(a) } @@ -22445,7 +22445,7 @@ pub unsafe fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { transmute(a) } @@ -22458,7 +22458,7 @@ pub unsafe fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { transmute(a) } @@ -22471,7 +22471,7 @@ pub unsafe fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { transmute(a) } @@ -22484,7 +22484,7 @@ pub unsafe fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { transmute(a) } @@ -22497,7 +22497,7 @@ pub unsafe fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { transmute(a) } @@ -22510,7 +22510,7 @@ pub unsafe fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { transmute(a) } @@ -22523,7 +22523,7 @@ pub unsafe fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { transmute(a) } @@ -22536,7 +22536,7 @@ pub unsafe fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { transmute(a) } @@ -22549,7 +22549,7 @@ pub unsafe fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { transmute(a) } @@ -22562,7 +22562,7 @@ pub unsafe fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { transmute(a) } @@ -22575,7 +22575,7 @@ pub unsafe fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { transmute(a) } @@ -22588,7 +22588,7 @@ pub unsafe fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { transmute(a) } @@ -22601,7 +22601,7 @@ pub unsafe fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { transmute(a) } @@ -22614,7 +22614,7 @@ pub unsafe fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { transmute(a) } @@ -22627,7 +22627,7 @@ pub unsafe fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { transmute(a) } @@ -22640,7 +22640,7 @@ pub unsafe fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { transmute(a) } @@ -22653,7 +22653,7 @@ pub unsafe fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { transmute(a) } @@ -22666,7 +22666,7 @@ pub unsafe fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { transmute(a) } @@ -22679,7 +22679,7 @@ pub unsafe fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { transmute(a) } @@ -22692,7 +22692,7 @@ pub unsafe fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { transmute(a) } @@ -22705,7 +22705,7 @@ pub unsafe fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { transmute(a) } @@ -22718,7 +22718,7 @@ pub unsafe fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { transmute(a) } @@ -22731,7 +22731,7 @@ pub unsafe fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { transmute(a) } @@ -22744,7 +22744,7 @@ pub unsafe fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { transmute(a) } @@ -22757,7 +22757,7 @@ pub unsafe fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { transmute(a) } @@ -22770,7 +22770,7 @@ pub unsafe fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { transmute(a) } @@ -22783,7 +22783,7 @@ pub unsafe fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { transmute(a) } @@ -22796,7 +22796,7 @@ pub unsafe fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { transmute(a) } @@ -22809,7 +22809,7 @@ pub unsafe fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { transmute(a) } @@ -22822,7 +22822,7 @@ pub unsafe fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { transmute(a) } @@ -22835,7 +22835,7 @@ pub unsafe fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { transmute(a) } @@ -22848,7 +22848,7 @@ pub unsafe fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { transmute(a) } @@ -22861,7 +22861,7 @@ pub unsafe fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { transmute(a) } @@ -22874,7 +22874,7 @@ pub unsafe fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { transmute(a) } @@ -22887,7 +22887,7 @@ pub unsafe fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { transmute(a) } @@ -22900,7 +22900,7 @@ pub unsafe fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { transmute(a) } @@ -22913,7 +22913,7 @@ pub unsafe fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { transmute(a) } @@ -22926,7 +22926,7 @@ pub unsafe fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { transmute(a) } @@ -22939,7 +22939,7 @@ pub unsafe fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { transmute(a) } @@ -22952,7 +22952,7 @@ pub unsafe fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { transmute(a) } @@ -22965,7 +22965,7 @@ pub unsafe fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { transmute(a) } @@ -22978,7 +22978,7 @@ pub unsafe fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { transmute(a) } @@ -22991,7 +22991,7 @@ pub unsafe fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { transmute(a) } @@ -23004,7 +23004,7 @@ pub unsafe fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { transmute(a) } @@ -23017,7 +23017,7 @@ pub unsafe fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { transmute(a) } @@ -23030,7 +23030,7 @@ pub unsafe fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { transmute(a) } @@ -23043,7 +23043,7 @@ pub unsafe fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { transmute(a) } @@ -23056,7 +23056,7 @@ pub unsafe fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { transmute(a) } @@ -23069,7 +23069,7 @@ pub unsafe fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { transmute(a) } @@ -23082,7 +23082,7 @@ pub unsafe fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { transmute(a) } @@ -23095,7 +23095,7 @@ pub unsafe fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { transmute(a) } @@ -23108,7 +23108,7 @@ pub unsafe fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { transmute(a) } @@ -23121,7 +23121,7 @@ pub unsafe fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { transmute(a) } @@ -23134,7 +23134,7 @@ pub unsafe fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { transmute(a) } @@ -23147,7 +23147,7 @@ pub unsafe fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { transmute(a) } @@ -23160,7 +23160,7 @@ pub unsafe fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { transmute(a) } @@ -23173,7 +23173,7 @@ pub unsafe fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { transmute(a) } @@ -23186,7 +23186,7 @@ pub unsafe fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { transmute(a) } @@ -23199,7 +23199,7 @@ pub unsafe fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { transmute(a) } @@ -23212,7 +23212,7 @@ pub unsafe fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { transmute(a) } @@ -23225,7 +23225,7 @@ pub unsafe fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { transmute(a) } @@ -23238,7 +23238,7 @@ pub unsafe fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { transmute(a) } @@ -23251,7 +23251,7 @@ pub unsafe fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { transmute(a) } @@ -23264,7 +23264,7 @@ pub unsafe fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { transmute(a) } @@ -23277,7 +23277,7 @@ pub unsafe fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { transmute(a) } @@ -23290,7 +23290,7 @@ pub unsafe fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { transmute(a) } @@ -23303,7 +23303,7 @@ pub unsafe fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { transmute(a) } @@ -23316,7 +23316,7 @@ pub unsafe fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { transmute(a) } @@ -23329,7 +23329,7 @@ pub unsafe fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { transmute(a) } @@ -23342,7 +23342,7 @@ pub unsafe fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { transmute(a) } @@ -23355,7 +23355,7 @@ pub unsafe fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { transmute(a) } @@ -23368,7 +23368,7 @@ pub unsafe fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { transmute(a) } @@ -23381,7 +23381,7 @@ pub unsafe fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { transmute(a) } @@ -23394,7 +23394,7 @@ pub unsafe fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { transmute(a) } @@ -23407,7 +23407,7 @@ pub unsafe fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { transmute(a) } @@ -23420,7 +23420,7 @@ pub unsafe fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { transmute(a) } @@ -23433,7 +23433,7 @@ pub unsafe fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { transmute(a) } @@ -23446,7 +23446,7 @@ pub unsafe fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { transmute(a) } @@ -23459,7 +23459,7 @@ pub unsafe fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { transmute(a) } @@ -23472,7 +23472,7 @@ pub unsafe fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { transmute(a) } @@ -23485,7 +23485,7 @@ pub unsafe fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { transmute(a) } @@ -23498,7 +23498,7 @@ pub unsafe fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { transmute(a) } @@ -23511,7 +23511,7 @@ pub unsafe fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { transmute(a) } @@ -23524,7 +23524,7 @@ pub unsafe fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { transmute(a) } @@ -23537,7 +23537,7 @@ pub unsafe fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { transmute(a) } @@ -23550,7 +23550,7 @@ pub unsafe fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { transmute(a) } @@ -23563,7 +23563,7 @@ pub unsafe fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { transmute(a) } @@ -23576,7 +23576,7 @@ pub unsafe fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { transmute(a) } @@ -23589,7 +23589,7 @@ pub unsafe fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { transmute(a) } @@ -23602,7 +23602,7 @@ pub unsafe fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { transmute(a) } @@ -23615,7 +23615,7 @@ pub unsafe fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { transmute(a) } @@ -23628,7 +23628,7 @@ pub unsafe fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { transmute(a) } @@ -23641,7 +23641,7 @@ pub unsafe fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { transmute(a) } @@ -23654,7 +23654,7 @@ pub unsafe fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { transmute(a) } @@ -23667,7 +23667,7 @@ pub unsafe fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { transmute(a) } @@ -23680,7 +23680,7 @@ pub unsafe fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { transmute(a) } @@ -23693,7 +23693,7 @@ pub unsafe fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { transmute(a) } @@ -23706,7 +23706,7 @@ pub unsafe fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { transmute(a) } @@ -23719,7 +23719,7 @@ pub unsafe fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { transmute(a) } @@ -23732,7 +23732,7 @@ pub unsafe fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { transmute(a) } @@ -23745,7 +23745,7 @@ pub unsafe fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { transmute(a) } @@ -23758,7 +23758,7 @@ pub unsafe fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { transmute(a) } @@ -23771,7 +23771,7 @@ pub unsafe fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { transmute(a) } @@ -23784,7 +23784,7 @@ pub unsafe fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { transmute(a) } @@ -23797,7 +23797,7 @@ pub unsafe fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { transmute(a) } @@ -23810,7 +23810,7 @@ pub unsafe fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { transmute(a) } @@ -23823,7 +23823,7 @@ pub unsafe fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { transmute(a) } @@ -23836,7 +23836,7 @@ pub unsafe fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { transmute(a) } @@ -23849,7 +23849,7 @@ pub unsafe fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { transmute(a) } @@ -23862,7 +23862,7 @@ pub unsafe fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { transmute(a) } @@ -23875,7 +23875,7 @@ pub unsafe fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { transmute(a) } @@ -23888,7 +23888,7 @@ pub unsafe fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { transmute(a) } @@ -23901,7 +23901,7 @@ pub unsafe fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { transmute(a) } @@ -23914,7 +23914,7 @@ pub unsafe fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { transmute(a) } @@ -23927,7 +23927,7 @@ pub unsafe fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { transmute(a) } @@ -23940,7 +23940,7 @@ pub unsafe fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { transmute(a) } @@ -23953,7 +23953,7 @@ pub unsafe fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { transmute(a) } @@ -23966,7 +23966,7 @@ pub unsafe fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { transmute(a) } @@ -23979,7 +23979,7 @@ pub unsafe fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { transmute(a) } @@ -23992,7 +23992,7 @@ pub unsafe fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { transmute(a) } @@ -24005,7 +24005,7 @@ pub unsafe fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { transmute(a) } @@ -24018,7 +24018,7 @@ pub unsafe fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { transmute(a) } @@ -24031,7 +24031,7 @@ pub unsafe fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { transmute(a) } @@ -24044,7 +24044,7 @@ pub unsafe fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { transmute(a) } @@ -24057,7 +24057,7 @@ pub unsafe fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { transmute(a) } @@ -24070,7 +24070,7 @@ pub unsafe fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { transmute(a) } @@ -24083,7 +24083,7 @@ pub unsafe fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { transmute(a) } @@ -24096,7 +24096,7 @@ pub unsafe fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { transmute(a) } @@ -24109,7 +24109,7 @@ pub unsafe fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { transmute(a) } @@ -24122,7 +24122,7 @@ pub unsafe fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { transmute(a) } @@ -24135,7 +24135,7 @@ pub unsafe fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { transmute(a) } @@ -24148,7 +24148,7 @@ pub unsafe fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { transmute(a) } @@ -24161,7 +24161,7 @@ pub unsafe fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { transmute(a) } @@ -24174,7 +24174,7 @@ pub unsafe fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { transmute(a) } @@ -24187,7 +24187,7 @@ pub unsafe fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { transmute(a) } @@ -24200,7 +24200,7 @@ pub unsafe fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { transmute(a) } @@ -24213,7 +24213,7 @@ pub unsafe fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { transmute(a) } @@ -24226,7 +24226,7 @@ pub unsafe fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { transmute(a) } @@ -24239,7 +24239,7 @@ pub unsafe fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { transmute(a) } @@ -24252,7 +24252,7 @@ pub unsafe fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { transmute(a) } @@ -24265,7 +24265,7 @@ pub unsafe fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { transmute(a) } @@ -24278,7 +24278,7 @@ pub unsafe fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { transmute(a) } @@ -24291,7 +24291,7 @@ pub unsafe fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { transmute(a) } @@ -24304,7 +24304,7 @@ pub unsafe fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { transmute(a) } @@ -24317,7 +24317,7 @@ pub unsafe fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { transmute(a) } @@ -24330,7 +24330,7 @@ pub unsafe fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { transmute(a) } @@ -24343,7 +24343,7 @@ pub unsafe fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { transmute(a) } @@ -24356,7 +24356,7 @@ pub unsafe fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { transmute(a) } @@ -24369,7 +24369,7 @@ pub unsafe fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { transmute(a) } @@ -24382,7 +24382,7 @@ pub unsafe fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { transmute(a) } @@ -24395,7 +24395,7 @@ pub unsafe fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { transmute(a) } @@ -24408,7 +24408,7 @@ pub unsafe fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { transmute(a) } @@ -24421,7 +24421,7 @@ pub unsafe fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { transmute(a) } @@ -24434,7 +24434,7 @@ pub unsafe fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { transmute(a) } @@ -24447,7 +24447,7 @@ pub unsafe fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { transmute(a) } @@ -24460,7 +24460,7 @@ pub unsafe fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { transmute(a) } @@ -24473,7 +24473,7 @@ pub unsafe fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { transmute(a) } @@ -24486,7 +24486,7 @@ pub unsafe fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { transmute(a) } @@ -24499,7 +24499,7 @@ pub unsafe fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { transmute(a) } @@ -24512,7 +24512,7 @@ pub unsafe fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { transmute(a) } @@ -24525,7 +24525,7 @@ pub unsafe fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { transmute(a) } @@ -24538,7 +24538,7 @@ pub unsafe fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { transmute(a) } @@ -24551,7 +24551,7 @@ pub unsafe fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { transmute(a) } @@ -24564,7 +24564,7 @@ pub unsafe fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { transmute(a) } @@ -24577,7 +24577,7 @@ pub unsafe fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { transmute(a) } @@ -24590,7 +24590,7 @@ pub unsafe fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { transmute(a) } @@ -24603,7 +24603,7 @@ pub unsafe fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { transmute(a) } @@ -24616,7 +24616,7 @@ pub unsafe fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { transmute(a) } @@ -24629,7 +24629,7 @@ pub unsafe fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { transmute(a) } @@ -24642,7 +24642,7 @@ pub unsafe fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { transmute(a) } @@ -24655,7 +24655,7 @@ pub unsafe fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { transmute(a) } @@ -24668,7 +24668,7 @@ pub unsafe fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { transmute(a) } @@ -24681,7 +24681,7 @@ pub unsafe fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { transmute(a) } @@ -24694,7 +24694,7 @@ pub unsafe fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { transmute(a) } @@ -24707,7 +24707,7 @@ pub unsafe fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { transmute(a) } @@ -24720,7 +24720,7 @@ pub unsafe fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { transmute(a) } @@ -24733,7 +24733,7 @@ pub unsafe fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { transmute(a) } @@ -24746,7 +24746,7 @@ pub unsafe fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { transmute(a) } @@ -24759,7 +24759,7 @@ pub unsafe fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { transmute(a) } @@ -24772,7 +24772,7 @@ pub unsafe fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { transmute(a) } @@ -24785,7 +24785,7 @@ pub unsafe fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { transmute(a) } @@ -24798,7 +24798,7 @@ pub unsafe fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { transmute(a) } @@ -24811,7 +24811,7 @@ pub unsafe fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { transmute(a) } @@ -24824,7 +24824,7 @@ pub unsafe fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { transmute(a) } @@ -24837,7 +24837,7 @@ pub unsafe fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { transmute(a) } @@ -24850,7 +24850,7 @@ pub unsafe fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { transmute(a) } @@ -24863,7 +24863,7 @@ pub unsafe fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { transmute(a) } @@ -24876,7 +24876,7 @@ pub unsafe fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { transmute(a) } @@ -24889,7 +24889,7 @@ pub unsafe fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { transmute(a) } @@ -24902,7 +24902,7 @@ pub unsafe fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { transmute(a) } @@ -24915,7 +24915,7 @@ pub unsafe fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { transmute(a) } @@ -24928,7 +24928,7 @@ pub unsafe fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { transmute(a) } @@ -24941,7 +24941,7 @@ pub unsafe fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { transmute(a) } @@ -24954,7 +24954,7 @@ pub unsafe fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { transmute(a) } @@ -24967,7 +24967,7 @@ pub unsafe fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { transmute(a) } @@ -24980,7 +24980,7 @@ pub unsafe fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { transmute(a) } @@ -24993,7 +24993,7 @@ pub unsafe fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { transmute(a) } @@ -25006,7 +25006,7 @@ pub unsafe fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { transmute(a) } @@ -25019,7 +25019,7 @@ pub unsafe fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { transmute(a) } @@ -25032,7 +25032,7 @@ pub unsafe fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { transmute(a) } @@ -25045,7 +25045,7 @@ pub unsafe fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { transmute(a) } @@ -25058,7 +25058,7 @@ pub unsafe fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { transmute(a) } @@ -25071,7 +25071,7 @@ pub unsafe fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { transmute(a) } @@ -25084,7 +25084,7 @@ pub unsafe fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { transmute(a) } @@ -25097,7 +25097,7 @@ pub unsafe fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { transmute(a) } @@ -25110,7 +25110,7 @@ pub unsafe fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { transmute(a) } @@ -25123,7 +25123,7 @@ pub unsafe fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { transmute(a) } @@ -25136,7 +25136,7 @@ pub unsafe fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { transmute(a) } @@ -25149,7 +25149,7 @@ pub unsafe fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { transmute(a) } @@ -25162,7 +25162,7 @@ pub unsafe fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { transmute(a) } @@ -25175,7 +25175,7 @@ pub unsafe fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { transmute(a) } @@ -25188,7 +25188,7 @@ pub unsafe fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { transmute(a) } @@ -25201,7 +25201,7 @@ pub unsafe fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { transmute(a) } @@ -25214,7 +25214,7 @@ pub unsafe fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { transmute(a) } @@ -25227,7 +25227,7 @@ pub unsafe fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { transmute(a) } @@ -25240,7 +25240,7 @@ pub unsafe fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { transmute(a) } @@ -25253,7 +25253,7 @@ pub unsafe fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { transmute(a) } @@ -25266,7 +25266,7 @@ pub unsafe fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { transmute(a) } @@ -25279,7 +25279,7 @@ pub unsafe fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { transmute(a) } @@ -25292,7 +25292,7 @@ pub unsafe fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { transmute(a) } @@ -25305,7 +25305,7 @@ pub unsafe fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { transmute(a) } @@ -25318,7 +25318,7 @@ pub unsafe fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "aes,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { transmute(a) } @@ -25331,7 +25331,7 @@ pub unsafe fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { transmute(a) } @@ -25344,7 +25344,7 @@ pub unsafe fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { transmute(a) } @@ -25357,7 +25357,7 @@ pub unsafe fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { transmute(a) } @@ -25370,7 +25370,7 @@ pub unsafe fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { transmute(a) } @@ -25383,7 +25383,7 @@ pub unsafe fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { transmute(a) } @@ -25396,7 +25396,7 @@ pub unsafe fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { transmute(a) } @@ -25409,7 +25409,7 @@ pub unsafe fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { transmute(a) } @@ -25422,7 +25422,7 @@ pub unsafe fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { transmute(a) } @@ -25435,7 +25435,7 @@ pub unsafe fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { transmute(a) } @@ -25448,7 +25448,7 @@ pub unsafe fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { transmute(a) } @@ -25461,7 +25461,7 @@ pub unsafe fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { transmute(a) } @@ -25474,7 +25474,7 @@ pub unsafe fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { transmute(a) } @@ -25487,7 +25487,7 @@ pub unsafe fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { transmute(a) } @@ -25500,7 +25500,7 @@ pub unsafe fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { transmute(a) } @@ -25513,7 +25513,7 @@ pub unsafe fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { transmute(a) } @@ -25526,7 +25526,7 @@ pub unsafe fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { transmute(a) } @@ -25539,7 +25539,7 @@ pub unsafe fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { transmute(a) } @@ -25552,7 +25552,7 @@ pub unsafe fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { transmute(a) } @@ -25565,7 +25565,7 @@ pub unsafe fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { transmute(a) } @@ -25578,7 +25578,7 @@ pub unsafe fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { transmute(a) } @@ -25591,7 +25591,7 @@ pub unsafe fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { transmute(a) } @@ -25604,7 +25604,7 @@ pub unsafe fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { transmute(a) } @@ -25617,7 +25617,7 @@ pub unsafe fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { transmute(a) } @@ -25630,7 +25630,7 @@ pub unsafe fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { transmute(a) } @@ -25643,7 +25643,7 @@ pub unsafe fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { transmute(a) } @@ -25656,7 +25656,7 @@ pub unsafe fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { transmute(a) } @@ -25669,7 +25669,7 @@ pub unsafe fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { transmute(a) } @@ -25682,7 +25682,7 @@ pub unsafe fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { transmute(a) } @@ -25695,7 +25695,7 @@ pub unsafe fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { transmute(a) } @@ -25708,7 +25708,7 @@ pub unsafe fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { transmute(a) } @@ -25721,7 +25721,7 @@ pub unsafe fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { transmute(a) } @@ -25734,7 +25734,7 @@ pub unsafe fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { transmute(a) } @@ -25747,7 +25747,7 @@ pub unsafe fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { transmute(a) } @@ -25760,7 +25760,7 @@ pub unsafe fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { transmute(a) } @@ -25773,7 +25773,7 @@ pub unsafe fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { transmute(a) } @@ -25786,7 +25786,7 @@ pub unsafe fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { transmute(a) } @@ -25799,7 +25799,7 @@ pub unsafe fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { transmute(a) } @@ -25812,7 +25812,7 @@ pub unsafe fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { transmute(a) } @@ -25825,7 +25825,7 @@ pub unsafe fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { transmute(a) } @@ -25838,7 +25838,7 @@ pub unsafe fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { transmute(a) } @@ -25851,7 +25851,7 @@ pub unsafe fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { transmute(a) } @@ -25864,7 +25864,7 @@ pub unsafe fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { transmute(a) } @@ -25877,7 +25877,7 @@ pub unsafe fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -25896,7 +25896,7 @@ vrshl_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -25915,7 +25915,7 @@ vrshlq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -25934,7 +25934,7 @@ vrshl_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -25953,7 +25953,7 @@ vrshlq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -25972,7 +25972,7 @@ vrshl_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -25991,7 +25991,7 @@ vrshlq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26010,7 +26010,7 @@ vrshl_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26029,7 +26029,7 @@ vrshlq_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26048,7 +26048,7 @@ vrshl_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26067,7 +26067,7 @@ vrshlq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26086,7 +26086,7 @@ vrshl_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26105,7 +26105,7 @@ vrshlq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26124,7 +26124,7 @@ vrshl_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26143,7 +26143,7 @@ vrshlq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26162,7 +26162,7 @@ vrshl_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26182,7 +26182,7 @@ vrshlq_u64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); vrshl_s8(a, vdup_n_s8((-N) as _)) @@ -26197,7 +26197,7 @@ pub unsafe fn vrshr_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); vrshlq_s8(a, vdupq_n_s8((-N) as _)) @@ -26212,7 +26212,7 @@ pub unsafe fn vrshrq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); vrshl_s16(a, vdup_n_s16((-N) as _)) @@ -26227,7 +26227,7 @@ pub unsafe fn vrshr_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); vrshlq_s16(a, vdupq_n_s16((-N) as _)) @@ -26242,7 +26242,7 @@ pub unsafe fn vrshrq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); vrshl_s32(a, vdup_n_s32((-N) as _)) @@ -26257,7 +26257,7 @@ pub unsafe fn vrshr_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); vrshlq_s32(a, vdupq_n_s32((-N) as _)) @@ -26272,7 +26272,7 @@ pub unsafe fn vrshrq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); vrshl_s64(a, vdup_n_s64((-N) as _)) @@ -26287,7 +26287,7 @@ pub unsafe fn vrshr_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); vrshlq_s64(a, vdupq_n_s64((-N) as _)) @@ -26302,7 +26302,7 @@ pub unsafe fn vrshrq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); vrshl_u8(a, vdup_n_s8((-N) as _)) @@ -26317,7 +26317,7 @@ pub unsafe fn vrshr_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); vrshlq_u8(a, vdupq_n_s8((-N) as _)) @@ -26332,7 +26332,7 @@ pub unsafe fn vrshrq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); vrshl_u16(a, vdup_n_s16((-N) as _)) @@ -26347,7 +26347,7 @@ pub unsafe fn vrshr_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); vrshlq_u16(a, vdupq_n_s16((-N) as _)) @@ -26362,7 +26362,7 @@ pub unsafe fn vrshrq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); vrshl_u32(a, vdup_n_s32((-N) as _)) @@ -26377,7 +26377,7 @@ pub unsafe fn vrshr_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); vrshlq_u32(a, vdupq_n_s32((-N) as _)) @@ -26392,7 +26392,7 @@ pub unsafe fn vrshrq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshr_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); vrshl_u64(a, vdup_n_s64((-N) as _)) @@ -26407,7 +26407,7 @@ pub unsafe fn vrshr_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); vrshlq_u64(a, vdupq_n_s64((-N) as _)) @@ -26533,7 +26533,7 @@ vrshrn_n_s64_(a, N) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rshrn, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); transmute(vrshrn_n_s16::<N>(transmute(a))) @@ -26548,7 +26548,7 @@ pub unsafe fn vrshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rshrn, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); transmute(vrshrn_n_s32::<N>(transmute(a))) @@ -26563,7 +26563,7 @@ pub unsafe fn vrshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rshrn, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); transmute(vrshrn_n_s64::<N>(transmute(a))) @@ -26578,7 +26578,7 @@ pub unsafe fn vrshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vrshr_n_s8::<N>(b)) @@ -26593,7 +26593,7 @@ pub unsafe fn vrsra_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vrshrq_n_s8::<N>(b)) @@ -26608,7 +26608,7 @@ pub unsafe fn vrsraq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vrshr_n_s16::<N>(b)) @@ -26623,7 +26623,7 @@ pub unsafe fn vrsra_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vrshrq_n_s16::<N>(b)) @@ -26638,7 +26638,7 @@ pub unsafe fn vrsraq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vrshr_n_s32::<N>(b)) @@ -26653,7 +26653,7 @@ pub unsafe fn vrsra_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vrshrq_n_s32::<N>(b)) @@ -26668,7 +26668,7 @@ pub unsafe fn vrsraq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vrshr_n_s64::<N>(b)) @@ -26683,7 +26683,7 @@ pub unsafe fn vrsra_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(srsra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vrshrq_n_s64::<N>(b)) @@ -26698,7 +26698,7 @@ pub unsafe fn vrsraq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vrshr_n_u8::<N>(b)) @@ -26713,7 +26713,7 @@ pub unsafe fn vrsra_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vrshrq_n_u8::<N>(b)) @@ -26728,7 +26728,7 @@ pub unsafe fn vrsraq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vrshr_n_u16::<N>(b)) @@ -26743,7 +26743,7 @@ pub unsafe fn vrsra_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vrshrq_n_u16::<N>(b)) @@ -26758,7 +26758,7 @@ pub unsafe fn vrsraq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vrshr_n_u32::<N>(b)) @@ -26773,7 +26773,7 @@ pub unsafe fn vrsra_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vrshrq_n_u32::<N>(b)) @@ -26788,7 +26788,7 @@ pub unsafe fn vrsraq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsra_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vrshr_n_u64::<N>(b)) @@ -26803,7 +26803,7 @@ pub unsafe fn vrsra_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ursra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsraq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vrshrq_n_u64::<N>(b)) @@ -26817,7 +26817,7 @@ pub unsafe fn vrsraq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26836,7 +26836,7 @@ vrsubhn_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26855,7 +26855,7 @@ vrsubhn_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -26874,7 +26874,7 @@ vrsubhn_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { transmute(vrsubhn_s16(transmute(a), transmute(b))) } @@ -26887,7 +26887,7 @@ pub unsafe fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { transmute(vrsubhn_s32(transmute(a), transmute(b))) } @@ -26900,7 +26900,7 @@ pub unsafe fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(rsubhn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { transmute(vrsubhn_s64(transmute(a), transmute(b))) } @@ -26914,7 +26914,7 @@ pub unsafe fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_s8<const LANE: i32>(a: i8, b: int8x8_t) -> int8x8_t { static_assert_imm3!(LANE); simd_insert(b, LANE as u32, a) @@ -26929,7 +26929,7 @@ pub unsafe fn vset_lane_s8<const LANE: i32>(a: i8, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> int16x4_t { static_assert_imm2!(LANE); simd_insert(b, LANE as u32, a) @@ -26944,7 +26944,7 @@ pub unsafe fn vset_lane_s16<const LANE: i32>(a: i16, b: int16x4_t) -> int16x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> int32x2_t { static_assert_imm1!(LANE); simd_insert(b, LANE as u32, a) @@ -26959,7 +26959,7 @@ pub unsafe fn vset_lane_s32<const LANE: i32>(a: i32, b: int32x2_t) -> int32x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_s64<const LANE: i32>(a: i64, b: int64x1_t) -> int64x1_t { static_assert!(LANE : i32 where LANE == 0); simd_insert(b, LANE as u32, a) @@ -26974,7 +26974,7 @@ pub unsafe fn vset_lane_s64<const LANE: i32>(a: i64, b: int64x1_t) -> int64x1_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_u8<const LANE: i32>(a: u8, b: uint8x8_t) -> uint8x8_t { static_assert_imm3!(LANE); simd_insert(b, LANE as u32, a) @@ -26989,7 +26989,7 @@ pub unsafe fn vset_lane_u8<const LANE: i32>(a: u8, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_u16<const LANE: i32>(a: u16, b: uint16x4_t) -> uint16x4_t { static_assert_imm2!(LANE); simd_insert(b, LANE as u32, a) @@ -27004,7 +27004,7 @@ pub unsafe fn vset_lane_u16<const LANE: i32>(a: u16, b: uint16x4_t) -> uint16x4_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_u32<const LANE: i32>(a: u32, b: uint32x2_t) -> uint32x2_t { static_assert_imm1!(LANE); simd_insert(b, LANE as u32, a) @@ -27019,7 +27019,7 @@ pub unsafe fn vset_lane_u32<const LANE: i32>(a: u32, b: uint32x2_t) -> uint32x2_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_u64<const LANE: i32>(a: u64, b: uint64x1_t) -> uint64x1_t { static_assert!(LANE : i32 where LANE == 0); simd_insert(b, LANE as u32, a) @@ -27034,7 +27034,7 @@ pub unsafe fn vset_lane_u64<const LANE: i32>(a: u64, b: uint64x1_t) -> uint64x1_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_p8<const LANE: i32>(a: p8, b: poly8x8_t) -> poly8x8_t { static_assert_imm3!(LANE); simd_insert(b, LANE as u32, a) @@ -27049,7 +27049,7 @@ pub unsafe fn vset_lane_p8<const LANE: i32>(a: p8, b: poly8x8_t) -> poly8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_p16<const LANE: i32>(a: p16, b: poly16x4_t) -> poly16x4_t { static_assert_imm2!(LANE); simd_insert(b, LANE as u32, a) @@ -27064,7 +27064,7 @@ pub unsafe fn vset_lane_p16<const LANE: i32>(a: p16, b: poly16x4_t) -> poly16x4_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_p64<const LANE: i32>(a: p64, b: poly64x1_t) -> poly64x1_t { static_assert!(LANE : i32 where LANE == 0); simd_insert(b, LANE as u32, a) @@ -27079,7 +27079,7 @@ pub unsafe fn vset_lane_p64<const LANE: i32>(a: p64, b: poly64x1_t) -> poly64x1_ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_s8<const LANE: i32>(a: i8, b: int8x16_t) -> int8x16_t { static_assert_imm4!(LANE); simd_insert(b, LANE as u32, a) @@ -27094,7 +27094,7 @@ pub unsafe fn vsetq_lane_s8<const LANE: i32>(a: i8, b: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_s16<const LANE: i32>(a: i16, b: int16x8_t) -> int16x8_t { static_assert_imm3!(LANE); simd_insert(b, LANE as u32, a) @@ -27109,7 +27109,7 @@ pub unsafe fn vsetq_lane_s16<const LANE: i32>(a: i16, b: int16x8_t) -> int16x8_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_s32<const LANE: i32>(a: i32, b: int32x4_t) -> int32x4_t { static_assert_imm2!(LANE); simd_insert(b, LANE as u32, a) @@ -27124,7 +27124,7 @@ pub unsafe fn vsetq_lane_s32<const LANE: i32>(a: i32, b: int32x4_t) -> int32x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_s64<const LANE: i32>(a: i64, b: int64x2_t) -> int64x2_t { static_assert_imm1!(LANE); simd_insert(b, LANE as u32, a) @@ -27139,7 +27139,7 @@ pub unsafe fn vsetq_lane_s64<const LANE: i32>(a: i64, b: int64x2_t) -> int64x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_u8<const LANE: i32>(a: u8, b: uint8x16_t) -> uint8x16_t { static_assert_imm4!(LANE); simd_insert(b, LANE as u32, a) @@ -27154,7 +27154,7 @@ pub unsafe fn vsetq_lane_u8<const LANE: i32>(a: u8, b: uint8x16_t) -> uint8x16_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_u16<const LANE: i32>(a: u16, b: uint16x8_t) -> uint16x8_t { static_assert_imm3!(LANE); simd_insert(b, LANE as u32, a) @@ -27169,7 +27169,7 @@ pub unsafe fn vsetq_lane_u16<const LANE: i32>(a: u16, b: uint16x8_t) -> uint16x8 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_u32<const LANE: i32>(a: u32, b: uint32x4_t) -> uint32x4_t { static_assert_imm2!(LANE); simd_insert(b, LANE as u32, a) @@ -27184,7 +27184,7 @@ pub unsafe fn vsetq_lane_u32<const LANE: i32>(a: u32, b: uint32x4_t) -> uint32x4 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_u64<const LANE: i32>(a: u64, b: uint64x2_t) -> uint64x2_t { static_assert_imm1!(LANE); simd_insert(b, LANE as u32, a) @@ -27199,7 +27199,7 @@ pub unsafe fn vsetq_lane_u64<const LANE: i32>(a: u64, b: uint64x2_t) -> uint64x2 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_p8<const LANE: i32>(a: p8, b: poly8x16_t) -> poly8x16_t { static_assert_imm4!(LANE); simd_insert(b, LANE as u32, a) @@ -27214,7 +27214,7 @@ pub unsafe fn vsetq_lane_p8<const LANE: i32>(a: p8, b: poly8x16_t) -> poly8x16_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_p16<const LANE: i32>(a: p16, b: poly16x8_t) -> poly16x8_t { static_assert_imm3!(LANE); simd_insert(b, LANE as u32, a) @@ -27229,7 +27229,7 @@ pub unsafe fn vsetq_lane_p16<const LANE: i32>(a: p16, b: poly16x8_t) -> poly16x8 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_p64<const LANE: i32>(a: p64, b: poly64x2_t) -> poly64x2_t { static_assert_imm1!(LANE); simd_insert(b, LANE as u32, a) @@ -27244,7 +27244,7 @@ pub unsafe fn vsetq_lane_p64<const LANE: i32>(a: p64, b: poly64x2_t) -> poly64x2 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vset_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> float32x2_t { static_assert_imm1!(LANE); simd_insert(b, LANE as u32, a) @@ -27259,7 +27259,7 @@ pub unsafe fn vset_lane_f32<const LANE: i32>(a: f32, b: float32x2_t) -> float32x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsetq_lane_f32<const LANE: i32>(a: f32, b: float32x4_t) -> float32x4_t { static_assert_imm2!(LANE); simd_insert(b, LANE as u32, a) @@ -27273,7 +27273,7 @@ pub unsafe fn vsetq_lane_f32<const LANE: i32>(a: f32, b: float32x4_t) -> float32 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27292,7 +27292,7 @@ vshl_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27311,7 +27311,7 @@ vshlq_s8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27330,7 +27330,7 @@ vshl_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27349,7 +27349,7 @@ vshlq_s16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27368,7 +27368,7 @@ vshl_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27387,7 +27387,7 @@ vshlq_s32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27406,7 +27406,7 @@ vshl_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27425,7 +27425,7 @@ vshlq_s64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27444,7 +27444,7 @@ vshl_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27463,7 +27463,7 @@ vshlq_u8_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27482,7 +27482,7 @@ vshl_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27501,7 +27501,7 @@ vshlq_u16_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27520,7 +27520,7 @@ vshl_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27539,7 +27539,7 @@ vshlq_u32_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27558,7 +27558,7 @@ vshl_u64_(a, b) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushl))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -27578,7 +27578,7 @@ vshlq_u64_(a, b) #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t { static_assert_imm3!(N); simd_shl(a, vdup_n_s8(N as _)) @@ -27593,7 +27593,7 @@ pub unsafe fn vshl_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t { static_assert_imm3!(N); simd_shl(a, vdupq_n_s8(N as _)) @@ -27608,7 +27608,7 @@ pub unsafe fn vshlq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t { static_assert_imm4!(N); simd_shl(a, vdup_n_s16(N as _)) @@ -27623,7 +27623,7 @@ pub unsafe fn vshl_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t { static_assert_imm4!(N); simd_shl(a, vdupq_n_s16(N as _)) @@ -27638,7 +27638,7 @@ pub unsafe fn vshlq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t { static_assert_imm5!(N); simd_shl(a, vdup_n_s32(N as _)) @@ -27653,7 +27653,7 @@ pub unsafe fn vshl_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t { static_assert_imm5!(N); simd_shl(a, vdupq_n_s32(N as _)) @@ -27668,7 +27668,7 @@ pub unsafe fn vshlq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t { static_assert_imm3!(N); simd_shl(a, vdup_n_u8(N as _)) @@ -27683,7 +27683,7 @@ pub unsafe fn vshl_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t { static_assert_imm3!(N); simd_shl(a, vdupq_n_u8(N as _)) @@ -27698,7 +27698,7 @@ pub unsafe fn vshlq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t { static_assert_imm4!(N); simd_shl(a, vdup_n_u16(N as _)) @@ -27713,7 +27713,7 @@ pub unsafe fn vshl_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t { static_assert_imm4!(N); simd_shl(a, vdupq_n_u16(N as _)) @@ -27728,7 +27728,7 @@ pub unsafe fn vshlq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t { static_assert_imm5!(N); simd_shl(a, vdup_n_u32(N as _)) @@ -27743,7 +27743,7 @@ pub unsafe fn vshl_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t { static_assert_imm5!(N); simd_shl(a, vdupq_n_u32(N as _)) @@ -27758,7 +27758,7 @@ pub unsafe fn vshlq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t { static_assert_imm6!(N); simd_shl(a, vdup_n_s64(N as _)) @@ -27773,7 +27773,7 @@ pub unsafe fn vshl_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t { static_assert_imm6!(N); simd_shl(a, vdupq_n_s64(N as _)) @@ -27788,7 +27788,7 @@ pub unsafe fn vshlq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshl_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t { static_assert_imm6!(N); simd_shl(a, vdup_n_u64(N as _)) @@ -27803,7 +27803,7 @@ pub unsafe fn vshl_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shl, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshlq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t { static_assert_imm6!(N); simd_shl(a, vdupq_n_u64(N as _)) @@ -27818,7 +27818,7 @@ pub unsafe fn vshlq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshll, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_s8<const N: i32>(a: int8x8_t) -> int16x8_t { static_assert!(N : i32 where N >= 0 && N <= 8); simd_shl(simd_cast(a), vdupq_n_s16(N as _)) @@ -27833,7 +27833,7 @@ pub unsafe fn vshll_n_s8<const N: i32>(a: int8x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshll, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_s16<const N: i32>(a: int16x4_t) -> int32x4_t { static_assert!(N : i32 where N >= 0 && N <= 16); simd_shl(simd_cast(a), vdupq_n_s32(N as _)) @@ -27848,7 +27848,7 @@ pub unsafe fn vshll_n_s16<const N: i32>(a: int16x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshll, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_s32<const N: i32>(a: int32x2_t) -> int64x2_t { static_assert!(N : i32 where N >= 0 && N <= 32); simd_shl(simd_cast(a), vdupq_n_s64(N as _)) @@ -27863,7 +27863,7 @@ pub unsafe fn vshll_n_s32<const N: i32>(a: int32x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushll, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_u8<const N: i32>(a: uint8x8_t) -> uint16x8_t { static_assert!(N : i32 where N >= 0 && N <= 8); simd_shl(simd_cast(a), vdupq_n_u16(N as _)) @@ -27878,7 +27878,7 @@ pub unsafe fn vshll_n_u8<const N: i32>(a: uint8x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushll, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_u16<const N: i32>(a: uint16x4_t) -> uint32x4_t { static_assert!(N : i32 where N >= 0 && N <= 16); simd_shl(simd_cast(a), vdupq_n_u32(N as _)) @@ -27893,7 +27893,7 @@ pub unsafe fn vshll_n_u16<const N: i32>(a: uint16x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushll, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshll_n_u32<const N: i32>(a: uint32x2_t) -> uint64x2_t { static_assert!(N : i32 where N >= 0 && N <= 32); simd_shl(simd_cast(a), vdupq_n_u64(N as _)) @@ -27908,7 +27908,7 @@ pub unsafe fn vshll_n_u32<const N: i32>(a: uint32x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); let n: i32 = if N == 8 { 7 } else { N }; @@ -27924,7 +27924,7 @@ pub unsafe fn vshr_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); let n: i32 = if N == 8 { 7 } else { N }; @@ -27940,7 +27940,7 @@ pub unsafe fn vshrq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); let n: i32 = if N == 16 { 15 } else { N }; @@ -27956,7 +27956,7 @@ pub unsafe fn vshr_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); let n: i32 = if N == 16 { 15 } else { N }; @@ -27972,7 +27972,7 @@ pub unsafe fn vshrq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); let n: i32 = if N == 32 { 31 } else { N }; @@ -27988,7 +27988,7 @@ pub unsafe fn vshr_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); let n: i32 = if N == 32 { 31 } else { N }; @@ -28004,7 +28004,7 @@ pub unsafe fn vshrq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); let n: i32 = if N == 64 { 63 } else { N }; @@ -28020,7 +28020,7 @@ pub unsafe fn vshr_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sshr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); let n: i32 = if N == 64 { 63 } else { N }; @@ -28036,7 +28036,7 @@ pub unsafe fn vshrq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); let n: i32 = if N == 8 { return vdup_n_u8(0); } else { N }; @@ -28052,7 +28052,7 @@ pub unsafe fn vshr_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); let n: i32 = if N == 8 { return vdupq_n_u8(0); } else { N }; @@ -28068,7 +28068,7 @@ pub unsafe fn vshrq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); let n: i32 = if N == 16 { return vdup_n_u16(0); } else { N }; @@ -28084,7 +28084,7 @@ pub unsafe fn vshr_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); let n: i32 = if N == 16 { return vdupq_n_u16(0); } else { N }; @@ -28100,7 +28100,7 @@ pub unsafe fn vshrq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); let n: i32 = if N == 32 { return vdup_n_u32(0); } else { N }; @@ -28116,7 +28116,7 @@ pub unsafe fn vshr_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); let n: i32 = if N == 32 { return vdupq_n_u32(0); } else { N }; @@ -28132,7 +28132,7 @@ pub unsafe fn vshrq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshr_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); let n: i32 = if N == 64 { return vdup_n_u64(0); } else { N }; @@ -28148,7 +28148,7 @@ pub unsafe fn vshr_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ushr, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); let n: i32 = if N == 64 { return vdupq_n_u64(0); } else { N }; @@ -28164,7 +28164,7 @@ pub unsafe fn vshrq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_cast(simd_shr(a, vdupq_n_s16(N as _))) @@ -28179,7 +28179,7 @@ pub unsafe fn vshrn_n_s16<const N: i32>(a: int16x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_cast(simd_shr(a, vdupq_n_s32(N as _))) @@ -28194,7 +28194,7 @@ pub unsafe fn vshrn_n_s32<const N: i32>(a: int32x4_t) -> int16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_cast(simd_shr(a, vdupq_n_s64(N as _))) @@ -28209,7 +28209,7 @@ pub unsafe fn vshrn_n_s64<const N: i32>(a: int64x2_t) -> int32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_cast(simd_shr(a, vdupq_n_u16(N as _))) @@ -28224,7 +28224,7 @@ pub unsafe fn vshrn_n_u16<const N: i32>(a: uint16x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_cast(simd_shr(a, vdupq_n_u32(N as _))) @@ -28239,7 +28239,7 @@ pub unsafe fn vshrn_n_u32<const N: i32>(a: uint32x4_t) -> uint16x4_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(shrn, N = 2))] #[rustc_legacy_const_generics(1)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_cast(simd_shr(a, vdupq_n_u64(N as _))) @@ -28254,7 +28254,7 @@ pub unsafe fn vshrn_n_u64<const N: i32>(a: uint64x2_t) -> uint32x2_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vshr_n_s8::<N>(b)) @@ -28269,7 +28269,7 @@ pub unsafe fn vsra_n_s8<const N: i32>(a: int8x8_t, b: int8x8_t) -> int8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vshrq_n_s8::<N>(b)) @@ -28284,7 +28284,7 @@ pub unsafe fn vsraq_n_s8<const N: i32>(a: int8x16_t, b: int8x16_t) -> int8x16_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vshr_n_s16::<N>(b)) @@ -28299,7 +28299,7 @@ pub unsafe fn vsra_n_s16<const N: i32>(a: int16x4_t, b: int16x4_t) -> int16x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vshrq_n_s16::<N>(b)) @@ -28314,7 +28314,7 @@ pub unsafe fn vsraq_n_s16<const N: i32>(a: int16x8_t, b: int16x8_t) -> int16x8_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vshr_n_s32::<N>(b)) @@ -28329,7 +28329,7 @@ pub unsafe fn vsra_n_s32<const N: i32>(a: int32x2_t, b: int32x2_t) -> int32x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vshrq_n_s32::<N>(b)) @@ -28344,7 +28344,7 @@ pub unsafe fn vsraq_n_s32<const N: i32>(a: int32x4_t, b: int32x4_t) -> int32x4_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vshr_n_s64::<N>(b)) @@ -28359,7 +28359,7 @@ pub unsafe fn vsra_n_s64<const N: i32>(a: int64x1_t, b: int64x1_t) -> int64x1_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ssra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vshrq_n_s64::<N>(b)) @@ -28374,7 +28374,7 @@ pub unsafe fn vsraq_n_s64<const N: i32>(a: int64x2_t, b: int64x2_t) -> int64x2_t #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vshr_n_u8::<N>(b)) @@ -28389,7 +28389,7 @@ pub unsafe fn vsra_n_u8<const N: i32>(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { static_assert!(N : i32 where N >= 1 && N <= 8); simd_add(a, vshrq_n_u8::<N>(b)) @@ -28404,7 +28404,7 @@ pub unsafe fn vsraq_n_u8<const N: i32>(a: uint8x16_t, b: uint8x16_t) -> uint8x16 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vshr_n_u16::<N>(b)) @@ -28419,7 +28419,7 @@ pub unsafe fn vsra_n_u16<const N: i32>(a: uint16x4_t, b: uint16x4_t) -> uint16x4 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { static_assert!(N : i32 where N >= 1 && N <= 16); simd_add(a, vshrq_n_u16::<N>(b)) @@ -28434,7 +28434,7 @@ pub unsafe fn vsraq_n_u16<const N: i32>(a: uint16x8_t, b: uint16x8_t) -> uint16x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vshr_n_u32::<N>(b)) @@ -28449,7 +28449,7 @@ pub unsafe fn vsra_n_u32<const N: i32>(a: uint32x2_t, b: uint32x2_t) -> uint32x2 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { static_assert!(N : i32 where N >= 1 && N <= 32); simd_add(a, vshrq_n_u32::<N>(b)) @@ -28464,7 +28464,7 @@ pub unsafe fn vsraq_n_u32<const N: i32>(a: uint32x4_t, b: uint32x4_t) -> uint32x #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsra_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vshr_n_u64::<N>(b)) @@ -28479,7 +28479,7 @@ pub unsafe fn vsra_n_u64<const N: i32>(a: uint64x1_t, b: uint64x1_t) -> uint64x1 #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(usra, N = 2))] #[rustc_legacy_const_generics(2)] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vsraq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { static_assert!(N : i32 where N >= 1 && N <= 64); simd_add(a, vshrq_n_u64::<N>(b)) @@ -28493,7 +28493,7 @@ pub unsafe fn vsraq_n_u64<const N: i32>(a: uint64x2_t, b: uint64x2_t) -> uint64x #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { let a1: int8x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: int8x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -28508,7 +28508,7 @@ pub unsafe fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { let a1: int16x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]); let b1: int16x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]); @@ -28523,7 +28523,7 @@ pub unsafe fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { let a1: int8x16_t = simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]); let b1: int8x16_t = simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]); @@ -28538,7 +28538,7 @@ pub unsafe fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { let a1: int16x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: int16x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -28553,7 +28553,7 @@ pub unsafe fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { let a1: int32x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]); let b1: int32x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]); @@ -28568,7 +28568,7 @@ pub unsafe fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { let a1: uint8x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: uint8x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -28583,7 +28583,7 @@ pub unsafe fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { let a1: uint16x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]); let b1: uint16x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]); @@ -28598,7 +28598,7 @@ pub unsafe fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { let a1: uint8x16_t = simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]); let b1: uint8x16_t = simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]); @@ -28613,7 +28613,7 @@ pub unsafe fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { let a1: uint16x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: uint16x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -28628,7 +28628,7 @@ pub unsafe fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { let a1: uint32x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]); let b1: uint32x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]); @@ -28643,7 +28643,7 @@ pub unsafe fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { let a1: poly8x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: poly8x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -28658,7 +28658,7 @@ pub unsafe fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { let a1: poly16x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]); let b1: poly16x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]); @@ -28673,7 +28673,7 @@ pub unsafe fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { let a1: poly8x16_t = simd_shuffle16!(a, b, [0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]); let b1: poly8x16_t = simd_shuffle16!(a, b, [1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]); @@ -28688,7 +28688,7 @@ pub unsafe fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { let a1: poly16x8_t = simd_shuffle8!(a, b, [0, 8, 2, 10, 4, 12, 6, 14]); let b1: poly16x8_t = simd_shuffle8!(a, b, [1, 9, 3, 11, 5, 13, 7, 15]); @@ -28703,7 +28703,7 @@ pub unsafe fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { let a1: int32x2_t = simd_shuffle2!(a, b, [0, 2]); let b1: int32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -28718,7 +28718,7 @@ pub unsafe fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { let a1: uint32x2_t = simd_shuffle2!(a, b, [0, 2]); let b1: uint32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -28733,7 +28733,7 @@ pub unsafe fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { let a1: float32x2_t = simd_shuffle2!(a, b, [0, 2]); let b1: float32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -28748,7 +28748,7 @@ pub unsafe fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(trn))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { let a1: float32x4_t = simd_shuffle4!(a, b, [0, 4, 2, 6]); let b1: float32x4_t = simd_shuffle4!(a, b, [1, 5, 3, 7]); @@ -28763,7 +28763,7 @@ pub unsafe fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { let a0: int8x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: int8x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -28778,7 +28778,7 @@ pub unsafe fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { let a0: int16x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]); let b0: int16x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]); @@ -28793,7 +28793,7 @@ pub unsafe fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { let a0: uint8x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: uint8x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -28808,7 +28808,7 @@ pub unsafe fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { let a0: uint16x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]); let b0: uint16x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]); @@ -28823,7 +28823,7 @@ pub unsafe fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { let a0: poly8x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: poly8x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -28838,7 +28838,7 @@ pub unsafe fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { let a0: poly16x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]); let b0: poly16x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]); @@ -28853,7 +28853,7 @@ pub unsafe fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { let a0: int32x2_t = simd_shuffle2!(a, b, [0, 2]); let b0: int32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -28868,7 +28868,7 @@ pub unsafe fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { let a0: uint32x2_t = simd_shuffle2!(a, b, [0, 2]); let b0: uint32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -28883,7 +28883,7 @@ pub unsafe fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { let a0: int8x16_t = simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]); let b0: int8x16_t = simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]); @@ -28898,7 +28898,7 @@ pub unsafe fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { let a0: int16x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: int16x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -28913,7 +28913,7 @@ pub unsafe fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { let a0: int32x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]); let b0: int32x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]); @@ -28928,7 +28928,7 @@ pub unsafe fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { let a0: uint8x16_t = simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]); let b0: uint8x16_t = simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]); @@ -28943,7 +28943,7 @@ pub unsafe fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { let a0: uint16x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: uint16x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -28958,7 +28958,7 @@ pub unsafe fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { let a0: uint32x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]); let b0: uint32x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]); @@ -28973,7 +28973,7 @@ pub unsafe fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { let a0: poly8x16_t = simd_shuffle16!(a, b, [0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]); let b0: poly8x16_t = simd_shuffle16!(a, b, [8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]); @@ -28988,7 +28988,7 @@ pub unsafe fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { let a0: poly16x8_t = simd_shuffle8!(a, b, [0, 8, 1, 9, 2, 10, 3, 11]); let b0: poly16x8_t = simd_shuffle8!(a, b, [4, 12, 5, 13, 6, 14, 7, 15]); @@ -29003,7 +29003,7 @@ pub unsafe fn vzipq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { let a0: float32x2_t = simd_shuffle2!(a, b, [0, 2]); let b0: float32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -29018,7 +29018,7 @@ pub unsafe fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(ext))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { let a0: float32x4_t = simd_shuffle4!(a, b, [0, 4, 1, 5]); let b0: float32x4_t = simd_shuffle4!(a, b, [2, 6, 3, 7]); @@ -29033,7 +29033,7 @@ pub unsafe fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { let a0: int8x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: int8x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -29048,7 +29048,7 @@ pub unsafe fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { let a0: int16x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]); let b0: int16x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]); @@ -29063,7 +29063,7 @@ pub unsafe fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { let a0: int8x16_t = simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]); let b0: int8x16_t = simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]); @@ -29078,7 +29078,7 @@ pub unsafe fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { let a0: int16x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: int16x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -29093,7 +29093,7 @@ pub unsafe fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { let a0: int32x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]); let b0: int32x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]); @@ -29108,7 +29108,7 @@ pub unsafe fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { let a0: uint8x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: uint8x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -29123,7 +29123,7 @@ pub unsafe fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { let a0: uint16x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]); let b0: uint16x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]); @@ -29138,7 +29138,7 @@ pub unsafe fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { let a0: uint8x16_t = simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]); let b0: uint8x16_t = simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]); @@ -29153,7 +29153,7 @@ pub unsafe fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { let a0: uint16x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: uint16x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -29168,7 +29168,7 @@ pub unsafe fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { let a0: uint32x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]); let b0: uint32x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]); @@ -29183,7 +29183,7 @@ pub unsafe fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { let a0: poly8x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: poly8x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -29198,7 +29198,7 @@ pub unsafe fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { let a0: poly16x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]); let b0: poly16x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]); @@ -29213,7 +29213,7 @@ pub unsafe fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { let a0: poly8x16_t = simd_shuffle16!(a, b, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]); let b0: poly8x16_t = simd_shuffle16!(a, b, [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]); @@ -29228,7 +29228,7 @@ pub unsafe fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { let a0: poly16x8_t = simd_shuffle8!(a, b, [0, 2, 4, 6, 8, 10, 12, 14]); let b0: poly16x8_t = simd_shuffle8!(a, b, [1, 3, 5, 7, 9, 11, 13, 15]); @@ -29243,7 +29243,7 @@ pub unsafe fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { let a0: int32x2_t = simd_shuffle2!(a, b, [0, 2]); let b0: int32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -29258,7 +29258,7 @@ pub unsafe fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { let a0: uint32x2_t = simd_shuffle2!(a, b, [0, 2]); let b0: uint32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -29273,7 +29273,7 @@ pub unsafe fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(zip))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { let a0: float32x2_t = simd_shuffle2!(a, b, [0, 2]); let b0: float32x2_t = simd_shuffle2!(a, b, [1, 3]); @@ -29288,7 +29288,7 @@ pub unsafe fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uzp))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { let a0: float32x4_t = simd_shuffle4!(a, b, [0, 2, 4, 6]); let b0: float32x4_t = simd_shuffle4!(a, b, [1, 3, 5, 7]); @@ -29303,7 +29303,7 @@ pub unsafe fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { let d: uint8x8_t = vabd_u8(b, c); simd_add(a, simd_cast(d)) @@ -29317,7 +29317,7 @@ pub unsafe fn vabal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { let d: uint16x4_t = vabd_u16(b, c); simd_add(a, simd_cast(d)) @@ -29331,7 +29331,7 @@ pub unsafe fn vabal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(uabal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { let d: uint32x2_t = vabd_u32(b, c); simd_add(a, simd_cast(d)) @@ -29345,7 +29345,7 @@ pub unsafe fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2 #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { let d: int8x8_t = vabd_s8(b, c); let e: uint8x8_t = simd_cast(d); @@ -29360,7 +29360,7 @@ pub unsafe fn vabal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { let d: int16x4_t = vabd_s16(b, c); let e: uint16x4_t = simd_cast(d); @@ -29375,7 +29375,7 @@ pub unsafe fn vabal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sabal))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { let d: int32x2_t = vabd_s32(b, c); let e: uint32x2_t = simd_cast(d); @@ -29390,7 +29390,7 @@ pub unsafe fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqabs_s8(a: int8x8_t) -> int8x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -29409,7 +29409,7 @@ vqabs_s8_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqabsq_s8(a: int8x16_t) -> int8x16_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -29428,7 +29428,7 @@ vqabsq_s8_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqabs_s16(a: int16x4_t) -> int16x4_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -29447,7 +29447,7 @@ vqabs_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqabsq_s16(a: int16x8_t) -> int16x8_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -29466,7 +29466,7 @@ vqabsq_s16_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqabs_s32(a: int32x2_t) -> int32x2_t { #[allow(improper_ctypes)] extern "unadjusted" { @@ -29485,7 +29485,7 @@ vqabs_s32_(a) #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(sqabs))] -#[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] +#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub unsafe fn vqabsq_s32(a: int32x4_t) -> int32x4_t { #[allow(improper_ctypes)] extern "unadjusted" { diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs index 0559aea83..31e924b84 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs @@ -18,90 +18,90 @@ pub(crate) type p128 = u128; types! { /// ARM-specific 64-bit wide vector of eight packed `i8`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int8x8_t(pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8); /// ARM-specific 64-bit wide vector of eight packed `u8`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint8x8_t(pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8); /// ARM-specific 64-bit wide polynomial vector of eight packed `p8`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct poly8x8_t(pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8); /// ARM-specific 64-bit wide vector of four packed `i16`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int16x4_t(pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16); /// ARM-specific 64-bit wide vector of four packed `u16`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint16x4_t(pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16); // FIXME: ARM-specific 64-bit wide vector of four packed `f16`. // pub struct float16x4_t(f16, f16, f16, f16); /// ARM-specific 64-bit wide vector of four packed `p16`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct poly16x4_t(pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16); /// ARM-specific 64-bit wide vector of two packed `i32`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int32x2_t(pub(crate) i32, pub(crate) i32); /// ARM-specific 64-bit wide vector of two packed `u32`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint32x2_t(pub(crate) u32, pub(crate) u32); /// ARM-specific 64-bit wide vector of two packed `f32`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct float32x2_t(pub(crate) f32, pub(crate) f32); /// ARM-specific 64-bit wide vector of one packed `i64`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int64x1_t(pub(crate) i64); /// ARM-specific 64-bit wide vector of one packed `u64`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint64x1_t(pub(crate) u64); /// ARM-specific 64-bit wide vector of one packed `p64`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct poly64x1_t(pub(crate) p64); /// ARM-specific 128-bit wide vector of sixteen packed `i8`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int8x16_t( pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8 , pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8 , pub(crate) i8, pub(crate) i8, ); /// ARM-specific 128-bit wide vector of sixteen packed `u8`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint8x16_t( pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, ); /// ARM-specific 128-bit wide vector of sixteen packed `p8`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct poly8x16_t( pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, ); /// ARM-specific 128-bit wide vector of eight packed `i16`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int16x8_t(pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16); /// ARM-specific 128-bit wide vector of eight packed `u16`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint16x8_t(pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16); // FIXME: ARM-specific 128-bit wide vector of eight packed `f16`. // pub struct float16x8_t(f16, f16, f16, f16, f16, f16, f16); /// ARM-specific 128-bit wide vector of eight packed `p16`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct poly16x8_t(pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16); /// ARM-specific 128-bit wide vector of four packed `i32`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int32x4_t(pub(crate) i32, pub(crate) i32, pub(crate) i32, pub(crate) i32); /// ARM-specific 128-bit wide vector of four packed `u32`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint32x4_t(pub(crate) u32, pub(crate) u32, pub(crate) u32, pub(crate) u32); /// ARM-specific 128-bit wide vector of four packed `f32`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct float32x4_t(pub(crate) f32, pub(crate) f32, pub(crate) f32, pub(crate) f32); /// ARM-specific 128-bit wide vector of two packed `i64`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct int64x2_t(pub(crate) i64, pub(crate) i64); /// ARM-specific 128-bit wide vector of two packed `u64`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct uint64x2_t(pub(crate) u64, pub(crate) u64); /// ARM-specific 128-bit wide vector of two packed `p64`. - #[cfg_attr(target_arch = "aarch64", stable(feature = "neon_intrinsics", since = "1.59.0"))] + #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] pub struct poly64x2_t(pub(crate) p64, pub(crate) p64); } @@ -6915,6 +6915,177 @@ pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4 vusmmlaq_s32_(a, b, c) } +/* FIXME: 16-bit float +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +pub unsafe fn vcombine_f16 ( low: float16x4_t, high: float16x4_t) -> float16x8_t { + simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) +} +*/ + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcombine_f32(low: float32x2_t, high: float32x2_t) -> float32x4_t { + simd_shuffle4!(low, high, [0, 1, 2, 3]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcombine_p8(low: poly8x8_t, high: poly8x8_t) -> poly8x16_t { + simd_shuffle16!( + low, + high, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], + ) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[stable(feature = "neon_intrinsics", since = "1.59.0")] +pub unsafe fn vcombine_p16(low: poly16x4_t, high: poly16x4_t) -> poly16x8_t { + simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +pub unsafe fn vcombine_s8(low: int8x8_t, high: int8x8_t) -> int8x16_t { + simd_shuffle16!( + low, + high, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], + ) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +pub unsafe fn vcombine_s16(low: int16x4_t, high: int16x4_t) -> int16x8_t { + simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +pub unsafe fn vcombine_s32(low: int32x2_t, high: int32x2_t) -> int32x4_t { + simd_shuffle4!(low, high, [0, 1, 2, 3]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +pub unsafe fn vcombine_s64(low: int64x1_t, high: int64x1_t) -> int64x2_t { + simd_shuffle2!(low, high, [0, 1]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +pub unsafe fn vcombine_u8(low: uint8x8_t, high: uint8x8_t) -> uint8x16_t { + simd_shuffle16!( + low, + high, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], + ) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +pub unsafe fn vcombine_u16(low: uint16x4_t, high: uint16x4_t) -> uint16x8_t { + simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] +#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mov))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +pub unsafe fn vcombine_u32(low: uint32x2_t, high: uint32x2_t) -> uint32x4_t { + simd_shuffle4!(low, high, [0, 1, 2, 3]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +pub unsafe fn vcombine_u64(low: uint64x1_t, high: uint64x1_t) -> uint64x2_t { + simd_shuffle2!(low, high, [0, 1]) +} + +/// Vector combine +#[inline] +#[target_feature(enable = "neon")] +#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] +#[cfg_attr(test, assert_instr(nop))] +#[cfg_attr( + target_arch = "aarch64", + stable(feature = "neon_intrinsics", since = "1.59.0") +)] +pub unsafe fn vcombine_p64(low: poly64x1_t, high: poly64x1_t) -> poly64x2_t { + simd_shuffle2!(low, high, [0, 1]) +} + #[cfg(test)] mod tests { use super::*; @@ -12488,6 +12659,44 @@ mod tests { let r: i32x4 = transmute(vusmmlaq_s32(transmute(a), transmute(b), transmute(c))); assert_eq!(r, e); } + + macro_rules! test_vcombine { + ($test_id:ident => $fn_id:ident ([$($a:expr),*], [$($b:expr),*])) => { + #[allow(unused_assignments)] + #[simd_test(enable = "neon")] + unsafe fn $test_id() { + let a = [$($a),*]; + let b = [$($b),*]; + let e = [$($a),* $(, $b)*]; + let c = $fn_id(transmute(a), transmute(b)); + let mut d = e; + d = transmute(c); + assert_eq!(d, e); + } + } + } + + test_vcombine!(test_vcombine_s8 => vcombine_s8([3_i8, -4, 5, -6, 7, 8, 9, 10], [13_i8, -14, 15, -16, 17, 18, 19, 110])); + test_vcombine!(test_vcombine_u8 => vcombine_u8([3_u8, 4, 5, 6, 7, 8, 9, 10], [13_u8, 14, 15, 16, 17, 18, 19, 110])); + test_vcombine!(test_vcombine_p8 => vcombine_p8([3_u8, 4, 5, 6, 7, 8, 9, 10], [13_u8, 14, 15, 16, 17, 18, 19, 110])); + + test_vcombine!(test_vcombine_s16 => vcombine_s16([3_i16, -4, 5, -6], [13_i16, -14, 15, -16])); + test_vcombine!(test_vcombine_u16 => vcombine_u16([3_u16, 4, 5, 6], [13_u16, 14, 15, 16])); + test_vcombine!(test_vcombine_p16 => vcombine_p16([3_u16, 4, 5, 6], [13_u16, 14, 15, 16])); + // FIXME: 16-bit floats + // test_vcombine!(test_vcombine_f16 => vcombine_f16([3_f16, 4., 5., 6.], + // [13_f16, 14., 15., 16.])); + + test_vcombine!(test_vcombine_s32 => vcombine_s32([3_i32, -4], [13_i32, -14])); + test_vcombine!(test_vcombine_u32 => vcombine_u32([3_u32, 4], [13_u32, 14])); + // note: poly32x4 does not exist, and neither does vcombine_p32 + test_vcombine!(test_vcombine_f32 => vcombine_f32([3_f32, -4.], [13_f32, -14.])); + + test_vcombine!(test_vcombine_s64 => vcombine_s64([-3_i64], [13_i64])); + test_vcombine!(test_vcombine_u64 => vcombine_u64([3_u64], [13_u64])); + test_vcombine!(test_vcombine_p64 => vcombine_p64([3_u64], [13_u64])); + #[cfg(target_arch = "aarch64")] + test_vcombine!(test_vcombine_f64 => vcombine_f64([-3_f64], [13_f64])); } #[cfg(all(test, target_arch = "arm", target_endian = "little"))] diff --git a/library/stdarch/crates/core_arch/src/mips/msa.rs b/library/stdarch/crates/core_arch/src/mips/msa.rs index 85ed30d18..cded73a54 100644 --- a/library/stdarch/crates/core_arch/src/mips/msa.rs +++ b/library/stdarch/crates/core_arch/src/mips/msa.rs @@ -2208,7 +2208,7 @@ pub unsafe fn __msa_bmnz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { /// Immediate Bit Move If Not Zero /// /// Copy to destination vector `a` (sixteen unsigned 8-bit integer numbers) all bits from source vector -/// `b` (sixteen unsigned 8-bit integer numbers) for which the corresponding bits from from immediate `imm8` +/// `b` (sixteen unsigned 8-bit integer numbers) for which the corresponding bits from immediate `imm8` /// are 1 and leaves unchanged all destination bits for which the corresponding target bits are 0. /// #[inline] @@ -2237,7 +2237,7 @@ pub unsafe fn __msa_bmz_v(a: v16u8, b: v16u8, c: v16u8) -> v16u8 { /// Immediate Bit Move If Zero /// /// Copy to destination vector `a` (sixteen unsigned 8-bit integer numbers) all bits from source vector -/// `b` (sixteen unsigned 8-bit integer numbers) for which the corresponding bits from from immediate `imm8` +/// `b` (sixteen unsigned 8-bit integer numbers) for which the corresponding bits from immediate `imm8` /// are 0 and leaves unchanged all destination bits for which the corresponding immediate bits are 1. /// #[inline] diff --git a/library/stdarch/crates/core_arch/src/mod.rs b/library/stdarch/crates/core_arch/src/mod.rs index 2f7af22cb..231d89e33 100644 --- a/library/stdarch/crates/core_arch/src/mod.rs +++ b/library/stdarch/crates/core_arch/src/mod.rs @@ -155,7 +155,7 @@ pub mod arch { /// which support SIMD, or it will not have SIMD at all. For compatibility /// the standard library itself does not use any SIMD internally. /// Determining how best to ship your WebAssembly binary with SIMD is - /// largely left up to you as it can can be pretty nuanced depending on + /// largely left up to you as it can be pretty nuanced depending on /// your situation. /// /// [condsections]: https://github.com/webassembly/conditional-sections diff --git a/library/stdarch/crates/core_arch/src/x86/avx.rs b/library/stdarch/crates/core_arch/src/x86/avx.rs index ad9e68db6..f8e83a35b 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx.rs @@ -2450,7 +2450,7 @@ pub unsafe fn _mm256_set1_epi8(a: i8) -> __m256i { ) } -/// Broadcasts 16-bit integer `a` to all all elements of returned vector. +/// Broadcasts 16-bit integer `a` to all elements of returned vector. /// This intrinsic may generate the `vpbroadcastw`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_epi16) diff --git a/library/stdarch/crates/core_arch/src/x86/avx2.rs b/library/stdarch/crates/core_arch/src/x86/avx2.rs index 16add3dbb..8638b3136 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx2.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx2.rs @@ -1857,7 +1857,9 @@ pub unsafe fn _mm256_maskstore_epi64(mem_addr: *mut i64, mask: __m256i, a: __m25 #[cfg_attr(test, assert_instr(vpmaxsw))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_max_epi16(a: __m256i, b: __m256i) -> __m256i { - transmute(pmaxsw(a.as_i16x16(), b.as_i16x16())) + let a = a.as_i16x16(); + let b = b.as_i16x16(); + transmute(simd_select::<i16x16, _>(simd_gt(a, b), a, b)) } /// Compares packed 32-bit integers in `a` and `b`, and returns the packed @@ -1869,7 +1871,9 @@ pub unsafe fn _mm256_max_epi16(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpmaxsd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_max_epi32(a: __m256i, b: __m256i) -> __m256i { - transmute(pmaxsd(a.as_i32x8(), b.as_i32x8())) + let a = a.as_i32x8(); + let b = b.as_i32x8(); + transmute(simd_select::<i32x8, _>(simd_gt(a, b), a, b)) } /// Compares packed 8-bit integers in `a` and `b`, and returns the packed @@ -1881,7 +1885,9 @@ pub unsafe fn _mm256_max_epi32(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpmaxsb))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_max_epi8(a: __m256i, b: __m256i) -> __m256i { - transmute(pmaxsb(a.as_i8x32(), b.as_i8x32())) + let a = a.as_i8x32(); + let b = b.as_i8x32(); + transmute(simd_select::<i8x32, _>(simd_gt(a, b), a, b)) } /// Compares packed unsigned 16-bit integers in `a` and `b`, and returns @@ -1893,7 +1899,9 @@ pub unsafe fn _mm256_max_epi8(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpmaxuw))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_max_epu16(a: __m256i, b: __m256i) -> __m256i { - transmute(pmaxuw(a.as_u16x16(), b.as_u16x16())) + let a = a.as_u16x16(); + let b = b.as_u16x16(); + transmute(simd_select::<i16x16, _>(simd_gt(a, b), a, b)) } /// Compares packed unsigned 32-bit integers in `a` and `b`, and returns @@ -1905,7 +1913,9 @@ pub unsafe fn _mm256_max_epu16(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpmaxud))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_max_epu32(a: __m256i, b: __m256i) -> __m256i { - transmute(pmaxud(a.as_u32x8(), b.as_u32x8())) + let a = a.as_u32x8(); + let b = b.as_u32x8(); + transmute(simd_select::<i32x8, _>(simd_gt(a, b), a, b)) } /// Compares packed unsigned 8-bit integers in `a` and `b`, and returns @@ -1917,7 +1927,9 @@ pub unsafe fn _mm256_max_epu32(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpmaxub))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_max_epu8(a: __m256i, b: __m256i) -> __m256i { - transmute(pmaxub(a.as_u8x32(), b.as_u8x32())) + let a = a.as_u8x32(); + let b = b.as_u8x32(); + transmute(simd_select::<i8x32, _>(simd_gt(a, b), a, b)) } /// Compares packed 16-bit integers in `a` and `b`, and returns the packed @@ -1929,7 +1941,9 @@ pub unsafe fn _mm256_max_epu8(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpminsw))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_min_epi16(a: __m256i, b: __m256i) -> __m256i { - transmute(pminsw(a.as_i16x16(), b.as_i16x16())) + let a = a.as_i16x16(); + let b = b.as_i16x16(); + transmute(simd_select::<i16x16, _>(simd_lt(a, b), a, b)) } /// Compares packed 32-bit integers in `a` and `b`, and returns the packed @@ -1941,7 +1955,9 @@ pub unsafe fn _mm256_min_epi16(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpminsd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_min_epi32(a: __m256i, b: __m256i) -> __m256i { - transmute(pminsd(a.as_i32x8(), b.as_i32x8())) + let a = a.as_i32x8(); + let b = b.as_i32x8(); + transmute(simd_select::<i32x8, _>(simd_lt(a, b), a, b)) } /// Compares packed 8-bit integers in `a` and `b`, and returns the packed @@ -1953,7 +1969,9 @@ pub unsafe fn _mm256_min_epi32(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpminsb))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_min_epi8(a: __m256i, b: __m256i) -> __m256i { - transmute(pminsb(a.as_i8x32(), b.as_i8x32())) + let a = a.as_i8x32(); + let b = b.as_i8x32(); + transmute(simd_select::<i8x32, _>(simd_lt(a, b), a, b)) } /// Compares packed unsigned 16-bit integers in `a` and `b`, and returns @@ -1965,7 +1983,9 @@ pub unsafe fn _mm256_min_epi8(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpminuw))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_min_epu16(a: __m256i, b: __m256i) -> __m256i { - transmute(pminuw(a.as_u16x16(), b.as_u16x16())) + let a = a.as_u16x16(); + let b = b.as_u16x16(); + transmute(simd_select::<i16x16, _>(simd_lt(a, b), a, b)) } /// Compares packed unsigned 32-bit integers in `a` and `b`, and returns @@ -1977,7 +1997,9 @@ pub unsafe fn _mm256_min_epu16(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpminud))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_min_epu32(a: __m256i, b: __m256i) -> __m256i { - transmute(pminud(a.as_u32x8(), b.as_u32x8())) + let a = a.as_u32x8(); + let b = b.as_u32x8(); + transmute(simd_select::<i32x8, _>(simd_lt(a, b), a, b)) } /// Compares packed unsigned 8-bit integers in `a` and `b`, and returns @@ -1989,7 +2011,9 @@ pub unsafe fn _mm256_min_epu32(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpminub))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_min_epu8(a: __m256i, b: __m256i) -> __m256i { - transmute(pminub(a.as_u8x32(), b.as_u8x32())) + let a = a.as_u8x32(); + let b = b.as_u8x32(); + transmute(simd_select::<i8x32, _>(simd_lt(a, b), a, b)) } /// Creates mask from the most significant bit of each 8-bit element in `a`, @@ -2001,7 +2025,9 @@ pub unsafe fn _mm256_min_epu8(a: __m256i, b: __m256i) -> __m256i { #[cfg_attr(test, assert_instr(vpmovmskb))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_movemask_epi8(a: __m256i) -> i32 { - simd_bitmask::<_, u32>(a.as_i8x32()) as i32 + let z = i8x32::splat(0); + let m: i8x32 = simd_lt(a.as_i8x32(), z); + simd_bitmask::<_, u32>(m) as i32 } /// Computes the sum of absolute differences (SADs) of quadruplets of unsigned @@ -3618,30 +3644,6 @@ extern "C" { fn maskstoreq(mem_addr: *mut i8, mask: i64x2, a: i64x2); #[link_name = "llvm.x86.avx2.maskstore.q.256"] fn maskstoreq256(mem_addr: *mut i8, mask: i64x4, a: i64x4); - #[link_name = "llvm.x86.avx2.pmaxs.w"] - fn pmaxsw(a: i16x16, b: i16x16) -> i16x16; - #[link_name = "llvm.x86.avx2.pmaxs.d"] - fn pmaxsd(a: i32x8, b: i32x8) -> i32x8; - #[link_name = "llvm.x86.avx2.pmaxs.b"] - fn pmaxsb(a: i8x32, b: i8x32) -> i8x32; - #[link_name = "llvm.x86.avx2.pmaxu.w"] - fn pmaxuw(a: u16x16, b: u16x16) -> u16x16; - #[link_name = "llvm.x86.avx2.pmaxu.d"] - fn pmaxud(a: u32x8, b: u32x8) -> u32x8; - #[link_name = "llvm.x86.avx2.pmaxu.b"] - fn pmaxub(a: u8x32, b: u8x32) -> u8x32; - #[link_name = "llvm.x86.avx2.pmins.w"] - fn pminsw(a: i16x16, b: i16x16) -> i16x16; - #[link_name = "llvm.x86.avx2.pmins.d"] - fn pminsd(a: i32x8, b: i32x8) -> i32x8; - #[link_name = "llvm.x86.avx2.pmins.b"] - fn pminsb(a: i8x32, b: i8x32) -> i8x32; - #[link_name = "llvm.x86.avx2.pminu.w"] - fn pminuw(a: u16x16, b: u16x16) -> u16x16; - #[link_name = "llvm.x86.avx2.pminu.d"] - fn pminud(a: u32x8, b: u32x8) -> u32x8; - #[link_name = "llvm.x86.avx2.pminu.b"] - fn pminub(a: u8x32, b: u8x32) -> u8x32; #[link_name = "llvm.x86.avx2.mpsadbw"] fn mpsadbw(a: u8x32, b: u8x32, imm8: i32) -> u16x16; #[link_name = "llvm.x86.avx2.pmulhu.w"] diff --git a/library/stdarch/crates/core_arch/src/x86/avx512bf16.rs b/library/stdarch/crates/core_arch/src/x86/avx512bf16.rs index e9977e018..b21ededab 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512bf16.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512bf16.rs @@ -80,7 +80,7 @@ pub unsafe fn _mm256_cvtne2ps_pbh(a: __m256, b: __m256) -> __m256bh { } /// Convert packed single-precision (32-bit) floating-point elements in two vectors a and b -/// to packed BF16 (16-bit) floating-point elements and and store the results in single vector +/// to packed BF16 (16-bit) floating-point elements and store the results in single vector /// dst using writemask k (elements are copied from src when the corresponding mask bit is not set). /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769,1651,1654&avx512techs=AVX512_BF16&text=_mm256_mask_cvtne2ps_pbh) #[inline] diff --git a/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs b/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs index 3c9df3912..1099ee2cb 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs @@ -303,7 +303,7 @@ pub unsafe fn _mm_mask_popcnt_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __ } /// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers. -/// Then groups 8 8-bit values from `c`as indices into the the bits of the corresponding 64-bit integer. +/// Then groups 8 8-bit values from `c`as indices into the bits of the corresponding 64-bit integer. /// It then selects these bits and packs them into the output. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_bitshuffle_epi64_mask) @@ -315,7 +315,7 @@ pub unsafe fn _mm512_bitshuffle_epi64_mask(b: __m512i, c: __m512i) -> __mmask64 } /// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers. -/// Then groups 8 8-bit values from `c`as indices into the the bits of the corresponding 64-bit integer. +/// Then groups 8 8-bit values from `c`as indices into the bits of the corresponding 64-bit integer. /// It then selects these bits and packs them into the output. /// /// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. @@ -330,7 +330,7 @@ pub unsafe fn _mm512_mask_bitshuffle_epi64_mask(k: __mmask64, b: __m512i, c: __m } /// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers. -/// Then groups 8 8-bit values from `c`as indices into the the bits of the corresponding 64-bit integer. +/// Then groups 8 8-bit values from `c`as indices into the bits of the corresponding 64-bit integer. /// It then selects these bits and packs them into the output. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_bitshuffle_epi64_mask) @@ -342,7 +342,7 @@ pub unsafe fn _mm256_bitshuffle_epi64_mask(b: __m256i, c: __m256i) -> __mmask32 } /// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers. -/// Then groups 8 8-bit values from `c`as indices into the the bits of the corresponding 64-bit integer. +/// Then groups 8 8-bit values from `c`as indices into the bits of the corresponding 64-bit integer. /// It then selects these bits and packs them into the output. /// /// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. @@ -357,7 +357,7 @@ pub unsafe fn _mm256_mask_bitshuffle_epi64_mask(k: __mmask32, b: __m256i, c: __m } /// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers. -/// Then groups 8 8-bit values from `c`as indices into the the bits of the corresponding 64-bit integer. +/// Then groups 8 8-bit values from `c`as indices into the bits of the corresponding 64-bit integer. /// It then selects these bits and packs them into the output. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_bitshuffle_epi64_mask) @@ -369,7 +369,7 @@ pub unsafe fn _mm_bitshuffle_epi64_mask(b: __m128i, c: __m128i) -> __mmask16 { } /// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers. -/// Then groups 8 8-bit values from `c`as indices into the the bits of the corresponding 64-bit integer. +/// Then groups 8 8-bit values from `c`as indices into the bits of the corresponding 64-bit integer. /// It then selects these bits and packs them into the output. /// /// Uses the writemask in k - elements are zeroed in the result if the corresponding mask bit is not set. diff --git a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs index 49d78ed60..fbf71dfc4 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512bw.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512bw.rs @@ -7450,7 +7450,7 @@ pub unsafe fn _mm_maskz_set1_epi8(k: __mmask16, a: i8) -> __m128i { transmute(simd_select_bitmask(k, r, zero)) } -/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from from a to dst. +/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shufflelo_epi16&expand=5221) #[inline] @@ -7501,7 +7501,7 @@ pub unsafe fn _mm512_shufflelo_epi16<const IMM8: i32>(a: __m512i) -> __m512i { transmute(r) } -/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shufflelo_epi16&expand=5219) #[inline] @@ -7518,7 +7518,7 @@ pub unsafe fn _mm512_mask_shufflelo_epi16<const IMM8: i32>( transmute(simd_select_bitmask(k, r.as_i16x32(), src.as_i16x32())) } -/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shufflelo_epi16&expand=5220) #[inline] @@ -7532,7 +7532,7 @@ pub unsafe fn _mm512_maskz_shufflelo_epi16<const IMM8: i32>(k: __mmask32, a: __m transmute(simd_select_bitmask(k, r.as_i16x32(), zero)) } -/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shufflelo_epi16&expand=5216) #[inline] @@ -7549,7 +7549,7 @@ pub unsafe fn _mm256_mask_shufflelo_epi16<const IMM8: i32>( transmute(simd_select_bitmask(k, shuffle.as_i16x16(), src.as_i16x16())) } -/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shufflelo_epi16&expand=5217) #[inline] @@ -7563,7 +7563,7 @@ pub unsafe fn _mm256_maskz_shufflelo_epi16<const IMM8: i32>(k: __mmask16, a: __m transmute(simd_select_bitmask(k, shuffle.as_i16x16(), zero)) } -/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shufflelo_epi16&expand=5213) #[inline] @@ -7580,7 +7580,7 @@ pub unsafe fn _mm_mask_shufflelo_epi16<const IMM8: i32>( transmute(simd_select_bitmask(k, shuffle.as_i16x8(), src.as_i16x8())) } -/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the low 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the low 64 bits of 128-bit lanes of dst, with the high 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shufflelo_epi16&expand=5214) #[inline] @@ -7594,7 +7594,7 @@ pub unsafe fn _mm_maskz_shufflelo_epi16<const IMM8: i32>(k: __mmask8, a: __m128i transmute(simd_select_bitmask(k, shuffle.as_i16x8(), zero)) } -/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from from a to dst. +/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_shufflehi_epi16&expand=5212) #[inline] @@ -7645,7 +7645,7 @@ pub unsafe fn _mm512_shufflehi_epi16<const IMM8: i32>(a: __m512i) -> __m512i { transmute(r) } -/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_shufflehi_epi16&expand=5210) #[inline] @@ -7662,7 +7662,7 @@ pub unsafe fn _mm512_mask_shufflehi_epi16<const IMM8: i32>( transmute(simd_select_bitmask(k, r.as_i16x32(), src.as_i16x32())) } -/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_shufflehi_epi16&expand=5211) #[inline] @@ -7676,7 +7676,7 @@ pub unsafe fn _mm512_maskz_shufflehi_epi16<const IMM8: i32>(k: __mmask32, a: __m transmute(simd_select_bitmask(k, r.as_i16x32(), zero)) } -/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_shufflehi_epi16&expand=5207) #[inline] @@ -7693,7 +7693,7 @@ pub unsafe fn _mm256_mask_shufflehi_epi16<const IMM8: i32>( transmute(simd_select_bitmask(k, shuffle.as_i16x16(), src.as_i16x16())) } -/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_shufflehi_epi16&expand=5208) #[inline] @@ -7707,7 +7707,7 @@ pub unsafe fn _mm256_maskz_shufflehi_epi16<const IMM8: i32>(k: __mmask16, a: __m transmute(simd_select_bitmask(k, shuffle.as_i16x16(), zero)) } -/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst, using writemask k (elements are copied from src when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_shufflehi_epi16&expand=5204) #[inline] @@ -7724,7 +7724,7 @@ pub unsafe fn _mm_mask_shufflehi_epi16<const IMM8: i32>( transmute(simd_select_bitmask(k, shuffle.as_i16x8(), src.as_i16x8())) } -/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set). +/// Shuffle 16-bit integers in the high 64 bits of 128-bit lanes of a using the control in imm8. Store the results in the high 64 bits of 128-bit lanes of dst, with the low 64 bits of 128-bit lanes being copied from a to dst, using zeromask k (elements are zeroed out when the corresponding mask bit is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_shufflehi_epi16&expand=5205) #[inline] diff --git a/library/stdarch/crates/core_arch/src/x86/avx512f.rs b/library/stdarch/crates/core_arch/src/x86/avx512f.rs index f70a28466..0ddb51283 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512f.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512f.rs @@ -26268,7 +26268,7 @@ pub unsafe fn _mm512_set1_epi8(a: i8) -> __m512i { transmute(i8x64::splat(a)) } -/// Broadcast the low packed 16-bit integer from a to all all elements of dst. +/// Broadcast the low packed 16-bit integer from a to all elements of dst. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_set1_epi16&expand=4944) #[inline] diff --git a/library/stdarch/crates/core_arch/src/x86/avx512gfni.rs b/library/stdarch/crates/core_arch/src/x86/gfni.rs index 66fd1c2e1..679b2548a 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512gfni.rs +++ b/library/stdarch/crates/core_arch/src/x86/gfni.rs @@ -65,7 +65,7 @@ extern "C" { /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_gf2p8mul_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512f")] +#[target_feature(enable = "gfni,avx512bw,avx512f")] #[cfg_attr(test, assert_instr(vgf2p8mulb))] pub unsafe fn _mm512_gf2p8mul_epi8(a: __m512i, b: __m512i) -> __m512i { transmute(vgf2p8mulb_512(a.as_i8x64(), b.as_i8x64())) @@ -80,7 +80,7 @@ pub unsafe fn _mm512_gf2p8mul_epi8(a: __m512i, b: __m512i) -> __m512i { /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_gf2p8mul_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512f")] +#[target_feature(enable = "gfni,avx512bw,avx512f")] #[cfg_attr(test, assert_instr(vgf2p8mulb))] pub unsafe fn _mm512_mask_gf2p8mul_epi8( src: __m512i, @@ -104,7 +104,7 @@ pub unsafe fn _mm512_mask_gf2p8mul_epi8( /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_gf2p8mul_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512f")] +#[target_feature(enable = "gfni,avx512bw,avx512f")] #[cfg_attr(test, assert_instr(vgf2p8mulb))] pub unsafe fn _mm512_maskz_gf2p8mul_epi8(k: __mmask64, a: __m512i, b: __m512i) -> __m512i { let zero = _mm512_setzero_si512().as_i8x64(); @@ -121,7 +121,7 @@ pub unsafe fn _mm512_maskz_gf2p8mul_epi8(k: __mmask64, a: __m512i, b: __m512i) - /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_gf2p8mul_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] +#[target_feature(enable = "gfni,avx")] #[cfg_attr(test, assert_instr(vgf2p8mulb))] pub unsafe fn _mm256_gf2p8mul_epi8(a: __m256i, b: __m256i) -> __m256i { transmute(vgf2p8mulb_256(a.as_i8x32(), b.as_i8x32())) @@ -136,7 +136,7 @@ pub unsafe fn _mm256_gf2p8mul_epi8(a: __m256i, b: __m256i) -> __m256i { /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_gf2p8mul_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vgf2p8mulb))] pub unsafe fn _mm256_mask_gf2p8mul_epi8( src: __m256i, @@ -160,7 +160,7 @@ pub unsafe fn _mm256_mask_gf2p8mul_epi8( /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_gf2p8mul_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vgf2p8mulb))] pub unsafe fn _mm256_maskz_gf2p8mul_epi8(k: __mmask32, a: __m256i, b: __m256i) -> __m256i { let zero = _mm256_setzero_si256().as_i8x32(); @@ -177,8 +177,8 @@ pub unsafe fn _mm256_maskz_gf2p8mul_epi8(k: __mmask32, a: __m256i, b: __m256i) - /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_gf2p8mul_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8mulb))] +#[target_feature(enable = "gfni")] +#[cfg_attr(test, assert_instr(gf2p8mulb))] pub unsafe fn _mm_gf2p8mul_epi8(a: __m128i, b: __m128i) -> __m128i { transmute(vgf2p8mulb_128(a.as_i8x16(), b.as_i8x16())) } @@ -192,7 +192,7 @@ pub unsafe fn _mm_gf2p8mul_epi8(a: __m128i, b: __m128i) -> __m128i { /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_gf2p8mul_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vgf2p8mulb))] pub unsafe fn _mm_mask_gf2p8mul_epi8( src: __m128i, @@ -216,7 +216,7 @@ pub unsafe fn _mm_mask_gf2p8mul_epi8( /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_gf2p8mul_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vgf2p8mulb))] pub unsafe fn _mm_maskz_gf2p8mul_epi8(k: __mmask16, a: __m128i, b: __m128i) -> __m128i { let zero = _mm_setzero_si128().as_i8x16(); @@ -234,7 +234,7 @@ pub unsafe fn _mm_maskz_gf2p8mul_epi8(k: __mmask16, a: __m128i, b: __m128i) -> _ /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_gf2p8affine_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512f")] +#[target_feature(enable = "gfni,avx512bw,avx512f")] #[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_gf2p8affine_epi64_epi8<const B: i32>(x: __m512i, a: __m512i) -> __m512i { @@ -256,7 +256,7 @@ pub unsafe fn _mm512_gf2p8affine_epi64_epi8<const B: i32>(x: __m512i, a: __m512i /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_gf2p8affine_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512f")] +#[target_feature(enable = "gfni,avx512bw,avx512f")] #[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_gf2p8affine_epi64_epi8<const B: i32>( @@ -283,7 +283,7 @@ pub unsafe fn _mm512_maskz_gf2p8affine_epi64_epi8<const B: i32>( /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_gf2p8affine_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512f")] +#[target_feature(enable = "gfni,avx512bw,avx512f")] #[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_gf2p8affine_epi64_epi8<const B: i32>( @@ -307,7 +307,7 @@ pub unsafe fn _mm512_mask_gf2p8affine_epi64_epi8<const B: i32>( /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_gf2p8affine_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] +#[target_feature(enable = "gfni,avx")] #[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_gf2p8affine_epi64_epi8<const B: i32>(x: __m256i, a: __m256i) -> __m256i { @@ -329,7 +329,7 @@ pub unsafe fn _mm256_gf2p8affine_epi64_epi8<const B: i32>(x: __m256i, a: __m256i /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_gf2p8affine_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_maskz_gf2p8affine_epi64_epi8<const B: i32>( @@ -356,7 +356,7 @@ pub unsafe fn _mm256_maskz_gf2p8affine_epi64_epi8<const B: i32>( /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_gf2p8affine_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_gf2p8affine_epi64_epi8<const B: i32>( @@ -380,8 +380,8 @@ pub unsafe fn _mm256_mask_gf2p8affine_epi64_epi8<const B: i32>( /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_gf2p8affine_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] +#[target_feature(enable = "gfni")] +#[cfg_attr(test, assert_instr(gf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_gf2p8affine_epi64_epi8<const B: i32>(x: __m128i, a: __m128i) -> __m128i { static_assert_imm8!(B); @@ -402,7 +402,7 @@ pub unsafe fn _mm_gf2p8affine_epi64_epi8<const B: i32>(x: __m128i, a: __m128i) - /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_gf2p8affine_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_gf2p8affine_epi64_epi8<const B: i32>( @@ -429,7 +429,7 @@ pub unsafe fn _mm_maskz_gf2p8affine_epi64_epi8<const B: i32>( /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_gf2p8affine_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vgf2p8affineqb, B = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_gf2p8affine_epi64_epi8<const B: i32>( @@ -455,7 +455,7 @@ pub unsafe fn _mm_mask_gf2p8affine_epi64_epi8<const B: i32>( /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_gf2p8affineinv_epi64_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512f")] +#[target_feature(enable = "gfni,avx512bw,avx512f")] #[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm512_gf2p8affineinv_epi64_epi8<const B: i32>(x: __m512i, a: __m512i) -> __m512i { @@ -479,7 +479,7 @@ pub unsafe fn _mm512_gf2p8affineinv_epi64_epi8<const B: i32>(x: __m512i, a: __m5 /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_maskz_gf2p8affineinv_epi64_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512f")] +#[target_feature(enable = "gfni,avx512bw,avx512f")] #[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm512_maskz_gf2p8affineinv_epi64_epi8<const B: i32>( @@ -508,7 +508,7 @@ pub unsafe fn _mm512_maskz_gf2p8affineinv_epi64_epi8<const B: i32>( /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_mask_gf2p8affineinv_epi64_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512f")] +#[target_feature(enable = "gfni,avx512bw,avx512f")] #[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm512_mask_gf2p8affineinv_epi64_epi8<const B: i32>( @@ -534,7 +534,7 @@ pub unsafe fn _mm512_mask_gf2p8affineinv_epi64_epi8<const B: i32>( /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_gf2p8affineinv_epi64_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] +#[target_feature(enable = "gfni,avx")] #[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_gf2p8affineinv_epi64_epi8<const B: i32>(x: __m256i, a: __m256i) -> __m256i { @@ -558,7 +558,7 @@ pub unsafe fn _mm256_gf2p8affineinv_epi64_epi8<const B: i32>(x: __m256i, a: __m2 /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskz_gf2p8affineinv_epi64_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm256_maskz_gf2p8affineinv_epi64_epi8<const B: i32>( @@ -587,7 +587,7 @@ pub unsafe fn _mm256_maskz_gf2p8affineinv_epi64_epi8<const B: i32>( /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mask_gf2p8affineinv_epi64_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm256_mask_gf2p8affineinv_epi64_epi8<const B: i32>( @@ -613,8 +613,8 @@ pub unsafe fn _mm256_mask_gf2p8affineinv_epi64_epi8<const B: i32>( /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_gf2p8affineinv_epi64_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] -#[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] +#[target_feature(enable = "gfni")] +#[cfg_attr(test, assert_instr(gf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm_gf2p8affineinv_epi64_epi8<const B: i32>(x: __m128i, a: __m128i) -> __m128i { static_assert_imm8!(B); @@ -637,7 +637,7 @@ pub unsafe fn _mm_gf2p8affineinv_epi64_epi8<const B: i32>(x: __m128i, a: __m128i /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskz_gf2p8affineinv_epi64_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(3)] pub unsafe fn _mm_maskz_gf2p8affineinv_epi64_epi8<const B: i32>( @@ -666,7 +666,7 @@ pub unsafe fn _mm_maskz_gf2p8affineinv_epi64_epi8<const B: i32>( /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_mask_gf2p8affineinv_epi64_epi8) #[inline] -#[target_feature(enable = "avx512gfni,avx512bw,avx512vl")] +#[target_feature(enable = "gfni,avx512bw,avx512vl")] #[cfg_attr(test, assert_instr(vgf2p8affineinvqb, B = 0))] #[rustc_legacy_const_generics(4)] pub unsafe fn _mm_mask_gf2p8affineinv_epi64_epi8<const B: i32>( @@ -847,7 +847,7 @@ mod tests { _mm512_loadu_si512(black_box(pointer)) } - #[simd_test(enable = "avx512gfni,avx512bw")] + #[simd_test(enable = "gfni,avx512bw")] unsafe fn test_mm512_gf2p8mul_epi8() { let (left, right, expected) = generate_byte_mul_test_data(); @@ -860,7 +860,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw")] + #[simd_test(enable = "gfni,avx512bw")] unsafe fn test_mm512_maskz_gf2p8mul_epi8() { let (left, right, _expected) = generate_byte_mul_test_data(); @@ -879,7 +879,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw")] + #[simd_test(enable = "gfni,avx512bw")] unsafe fn test_mm512_mask_gf2p8mul_epi8() { let (left, right, _expected) = generate_byte_mul_test_data(); @@ -897,7 +897,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] + #[simd_test(enable = "gfni,avx512bw,avx512vl")] unsafe fn test_mm256_gf2p8mul_epi8() { let (left, right, expected) = generate_byte_mul_test_data(); @@ -910,7 +910,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] + #[simd_test(enable = "gfni,avx512bw,avx512vl")] unsafe fn test_mm256_maskz_gf2p8mul_epi8() { let (left, right, _expected) = generate_byte_mul_test_data(); @@ -929,7 +929,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] + #[simd_test(enable = "gfni,avx512bw,avx512vl")] unsafe fn test_mm256_mask_gf2p8mul_epi8() { let (left, right, _expected) = generate_byte_mul_test_data(); @@ -947,7 +947,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] + #[simd_test(enable = "gfni,avx512bw,avx512vl")] unsafe fn test_mm_gf2p8mul_epi8() { let (left, right, expected) = generate_byte_mul_test_data(); @@ -960,7 +960,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] + #[simd_test(enable = "gfni,avx512bw,avx512vl")] unsafe fn test_mm_maskz_gf2p8mul_epi8() { let (left, right, _expected) = generate_byte_mul_test_data(); @@ -979,7 +979,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] + #[simd_test(enable = "gfni,avx512bw,avx512vl")] unsafe fn test_mm_mask_gf2p8mul_epi8() { let (left, right, _expected) = generate_byte_mul_test_data(); @@ -997,7 +997,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw")] + #[simd_test(enable = "gfni,avx512bw")] unsafe fn test_mm512_gf2p8affine_epi64_epi8() { let identity: i64 = 0x01_02_04_08_10_20_40_80; const IDENTITY_BYTE: i32 = 0; @@ -1031,7 +1031,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw")] + #[simd_test(enable = "gfni,avx512bw")] unsafe fn test_mm512_maskz_gf2p8affine_epi64_epi8() { const CONSTANT_BYTE: i32 = 0x63; let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); @@ -1053,7 +1053,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw")] + #[simd_test(enable = "gfni,avx512bw")] unsafe fn test_mm512_mask_gf2p8affine_epi64_epi8() { const CONSTANT_BYTE: i32 = 0x63; let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); @@ -1074,7 +1074,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] + #[simd_test(enable = "gfni,avx512bw,avx512vl")] unsafe fn test_mm256_gf2p8affine_epi64_epi8() { let identity: i64 = 0x01_02_04_08_10_20_40_80; const IDENTITY_BYTE: i32 = 0; @@ -1108,7 +1108,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] + #[simd_test(enable = "gfni,avx512bw,avx512vl")] unsafe fn test_mm256_maskz_gf2p8affine_epi64_epi8() { const CONSTANT_BYTE: i32 = 0x63; let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); @@ -1130,7 +1130,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] + #[simd_test(enable = "gfni,avx512bw,avx512vl")] unsafe fn test_mm256_mask_gf2p8affine_epi64_epi8() { const CONSTANT_BYTE: i32 = 0x63; let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); @@ -1151,7 +1151,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] + #[simd_test(enable = "gfni,avx512bw,avx512vl")] unsafe fn test_mm_gf2p8affine_epi64_epi8() { let identity: i64 = 0x01_02_04_08_10_20_40_80; const IDENTITY_BYTE: i32 = 0; @@ -1185,7 +1185,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] + #[simd_test(enable = "gfni,avx512bw,avx512vl")] unsafe fn test_mm_maskz_gf2p8affine_epi64_epi8() { const CONSTANT_BYTE: i32 = 0x63; let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); @@ -1206,7 +1206,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] + #[simd_test(enable = "gfni,avx512bw,avx512vl")] unsafe fn test_mm_mask_gf2p8affine_epi64_epi8() { const CONSTANT_BYTE: i32 = 0x63; let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); @@ -1227,7 +1227,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw")] + #[simd_test(enable = "gfni,avx512bw")] unsafe fn test_mm512_gf2p8affineinv_epi64_epi8() { let identity: i64 = 0x01_02_04_08_10_20_40_80; const IDENTITY_BYTE: i32 = 0; @@ -1271,7 +1271,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw")] + #[simd_test(enable = "gfni,avx512bw")] unsafe fn test_mm512_maskz_gf2p8affineinv_epi64_epi8() { const CONSTANT_BYTE: i32 = 0x63; let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); @@ -1293,7 +1293,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw")] + #[simd_test(enable = "gfni,avx512bw")] unsafe fn test_mm512_mask_gf2p8affineinv_epi64_epi8() { const CONSTANT_BYTE: i32 = 0x63; let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); @@ -1315,7 +1315,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] + #[simd_test(enable = "gfni,avx512bw,avx512vl")] unsafe fn test_mm256_gf2p8affineinv_epi64_epi8() { let identity: i64 = 0x01_02_04_08_10_20_40_80; const IDENTITY_BYTE: i32 = 0; @@ -1359,7 +1359,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] + #[simd_test(enable = "gfni,avx512bw,avx512vl")] unsafe fn test_mm256_maskz_gf2p8affineinv_epi64_epi8() { const CONSTANT_BYTE: i32 = 0x63; let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); @@ -1381,7 +1381,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] + #[simd_test(enable = "gfni,avx512bw,avx512vl")] unsafe fn test_mm256_mask_gf2p8affineinv_epi64_epi8() { const CONSTANT_BYTE: i32 = 0x63; let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); @@ -1403,7 +1403,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] + #[simd_test(enable = "gfni,avx512bw,avx512vl")] unsafe fn test_mm_gf2p8affineinv_epi64_epi8() { let identity: i64 = 0x01_02_04_08_10_20_40_80; const IDENTITY_BYTE: i32 = 0; @@ -1447,7 +1447,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] + #[simd_test(enable = "gfni,avx512bw,avx512vl")] unsafe fn test_mm_maskz_gf2p8affineinv_epi64_epi8() { const CONSTANT_BYTE: i32 = 0x63; let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); @@ -1469,7 +1469,7 @@ mod tests { } } - #[simd_test(enable = "avx512gfni,avx512bw,avx512vl")] + #[simd_test(enable = "gfni,avx512bw,avx512vl")] unsafe fn test_mm_mask_gf2p8affineinv_epi64_epi8() { const CONSTANT_BYTE: i32 = 0x63; let (matrices, vectors, _expected) = generate_affine_mul_test_data(CONSTANT_BYTE as u8); diff --git a/library/stdarch/crates/core_arch/src/x86/mod.rs b/library/stdarch/crates/core_arch/src/x86/mod.rs index 6b50e95b2..37045e40e 100644 --- a/library/stdarch/crates/core_arch/src/x86/mod.rs +++ b/library/stdarch/crates/core_arch/src/x86/mod.rs @@ -835,17 +835,17 @@ pub use self::avx512vnni::*; mod avx512bitalg; pub use self::avx512bitalg::*; -mod avx512gfni; -pub use self::avx512gfni::*; +mod gfni; +pub use self::gfni::*; mod avx512vpopcntdq; pub use self::avx512vpopcntdq::*; -mod avx512vaes; -pub use self::avx512vaes::*; +mod vaes; +pub use self::vaes::*; -mod avx512vpclmulqdq; -pub use self::avx512vpclmulqdq::*; +mod vpclmulqdq; +pub use self::vpclmulqdq::*; mod bt; pub use self::bt::*; diff --git a/library/stdarch/crates/core_arch/src/x86/sse.rs b/library/stdarch/crates/core_arch/src/x86/sse.rs index 03c3a14a5..f21288970 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse.rs @@ -1080,10 +1080,7 @@ pub unsafe fn _mm_movelh_ps(a: __m128, b: __m128) -> __m128 { /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_movemask_ps) #[inline] #[target_feature(enable = "sse")] -// FIXME: LLVM9 trunk has the following bug: -// https://github.com/rust-lang/stdarch/issues/794 -// so we only temporarily test this on i686 and x86_64 but not on i586: -#[cfg_attr(all(test, target_feature = "sse2"), assert_instr(movmskps))] +#[cfg_attr(test, assert_instr(movmskps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_movemask_ps(a: __m128) -> i32 { movmskps(a) diff --git a/library/stdarch/crates/core_arch/src/x86/sse2.rs b/library/stdarch/crates/core_arch/src/x86/sse2.rs index 3e79b3539..cde4bc316 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse2.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse2.rs @@ -203,7 +203,9 @@ pub unsafe fn _mm_madd_epi16(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pmaxsw))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_max_epi16(a: __m128i, b: __m128i) -> __m128i { - transmute(pmaxsw(a.as_i16x8(), b.as_i16x8())) + let a = a.as_i16x8(); + let b = b.as_i16x8(); + transmute(simd_select::<i16x8, _>(simd_gt(a, b), a, b)) } /// Compares packed unsigned 8-bit integers in `a` and `b`, and returns the @@ -215,7 +217,9 @@ pub unsafe fn _mm_max_epi16(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pmaxub))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_max_epu8(a: __m128i, b: __m128i) -> __m128i { - transmute(pmaxub(a.as_u8x16(), b.as_u8x16())) + let a = a.as_u8x16(); + let b = b.as_u8x16(); + transmute(simd_select::<i8x16, _>(simd_gt(a, b), a, b)) } /// Compares packed 16-bit integers in `a` and `b`, and returns the packed @@ -227,7 +231,9 @@ pub unsafe fn _mm_max_epu8(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pminsw))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_min_epi16(a: __m128i, b: __m128i) -> __m128i { - transmute(pminsw(a.as_i16x8(), b.as_i16x8())) + let a = a.as_i16x8(); + let b = b.as_i16x8(); + transmute(simd_select::<i16x8, _>(simd_lt(a, b), a, b)) } /// Compares packed unsigned 8-bit integers in `a` and `b`, and returns the @@ -239,7 +245,9 @@ pub unsafe fn _mm_min_epi16(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pminub))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_min_epu8(a: __m128i, b: __m128i) -> __m128i { - transmute(pminub(a.as_u8x16(), b.as_u8x16())) + let a = a.as_u8x16(); + let b = b.as_u8x16(); + transmute(simd_select::<i8x16, _>(simd_lt(a, b), a, b)) } /// Multiplies the packed 16-bit integers in `a` and `b`. @@ -1378,7 +1386,9 @@ pub unsafe fn _mm_insert_epi16<const IMM8: i32>(a: __m128i, i: i32) -> __m128i { #[cfg_attr(test, assert_instr(pmovmskb))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_movemask_epi8(a: __m128i) -> i32 { - simd_bitmask::<_, u16>(a.as_i8x16()) as u32 as i32 + let z = i8x16::splat(0); + let m: i8x16 = simd_lt(a.as_i8x16(), z); + simd_bitmask::<_, u16>(m) as u32 as i32 } /// Shuffles 32-bit integers in `a` using the control in `IMM8`. @@ -1409,7 +1419,7 @@ pub unsafe fn _mm_shuffle_epi32<const IMM8: i32>(a: __m128i) -> __m128i { /// `IMM8`. /// /// Put the results in the high 64 bits of the returned vector, with the low 64 -/// bits being copied from from `a`. +/// bits being copied from `a`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shufflehi_epi16) #[inline] @@ -1441,7 +1451,7 @@ pub unsafe fn _mm_shufflehi_epi16<const IMM8: i32>(a: __m128i) -> __m128i { /// `IMM8`. /// /// Put the results in the low 64 bits of the returned vector, with the high 64 -/// bits being copied from from `a`. +/// bits being copied from `a`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shufflelo_epi16) #[inline] @@ -2796,14 +2806,6 @@ extern "C" { fn pavgw(a: u16x8, b: u16x8) -> u16x8; #[link_name = "llvm.x86.sse2.pmadd.wd"] fn pmaddwd(a: i16x8, b: i16x8) -> i32x4; - #[link_name = "llvm.x86.sse2.pmaxs.w"] - fn pmaxsw(a: i16x8, b: i16x8) -> i16x8; - #[link_name = "llvm.x86.sse2.pmaxu.b"] - fn pmaxub(a: u8x16, b: u8x16) -> u8x16; - #[link_name = "llvm.x86.sse2.pmins.w"] - fn pminsw(a: i16x8, b: i16x8) -> i16x8; - #[link_name = "llvm.x86.sse2.pminu.b"] - fn pminub(a: u8x16, b: u8x16) -> u8x16; #[link_name = "llvm.x86.sse2.pmulh.w"] fn pmulhw(a: i16x8, b: i16x8) -> i16x8; #[link_name = "llvm.x86.sse2.pmulhu.w"] diff --git a/library/stdarch/crates/core_arch/src/x86/sse41.rs b/library/stdarch/crates/core_arch/src/x86/sse41.rs index 7c59f2702..3162ad7d9 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse41.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse41.rs @@ -281,7 +281,9 @@ pub unsafe fn _mm_insert_epi32<const IMM8: i32>(a: __m128i, i: i32) -> __m128i { #[cfg_attr(test, assert_instr(pmaxsb))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_max_epi8(a: __m128i, b: __m128i) -> __m128i { - transmute(pmaxsb(a.as_i8x16(), b.as_i8x16())) + let a = a.as_i8x16(); + let b = b.as_i8x16(); + transmute(simd_select::<i8x16, _>(simd_gt(a, b), a, b)) } /// Compares packed unsigned 16-bit integers in `a` and `b`, and returns packed @@ -293,7 +295,9 @@ pub unsafe fn _mm_max_epi8(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pmaxuw))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_max_epu16(a: __m128i, b: __m128i) -> __m128i { - transmute(pmaxuw(a.as_u16x8(), b.as_u16x8())) + let a = a.as_u16x8(); + let b = b.as_u16x8(); + transmute(simd_select::<i16x8, _>(simd_gt(a, b), a, b)) } /// Compares packed 32-bit integers in `a` and `b`, and returns packed maximum @@ -305,7 +309,9 @@ pub unsafe fn _mm_max_epu16(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pmaxsd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_max_epi32(a: __m128i, b: __m128i) -> __m128i { - transmute(pmaxsd(a.as_i32x4(), b.as_i32x4())) + let a = a.as_i32x4(); + let b = b.as_i32x4(); + transmute(simd_select::<i32x4, _>(simd_gt(a, b), a, b)) } /// Compares packed unsigned 32-bit integers in `a` and `b`, and returns packed @@ -317,7 +323,9 @@ pub unsafe fn _mm_max_epi32(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pmaxud))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_max_epu32(a: __m128i, b: __m128i) -> __m128i { - transmute(pmaxud(a.as_u32x4(), b.as_u32x4())) + let a = a.as_u32x4(); + let b = b.as_u32x4(); + transmute(simd_select::<i32x4, _>(simd_gt(a, b), a, b)) } /// Compares packed 8-bit integers in `a` and `b` and returns packed minimum @@ -329,7 +337,9 @@ pub unsafe fn _mm_max_epu32(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pminsb))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_min_epi8(a: __m128i, b: __m128i) -> __m128i { - transmute(pminsb(a.as_i8x16(), b.as_i8x16())) + let a = a.as_i8x16(); + let b = b.as_i8x16(); + transmute(simd_select::<i8x16, _>(simd_lt(a, b), a, b)) } /// Compares packed unsigned 16-bit integers in `a` and `b`, and returns packed @@ -341,7 +351,9 @@ pub unsafe fn _mm_min_epi8(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pminuw))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_min_epu16(a: __m128i, b: __m128i) -> __m128i { - transmute(pminuw(a.as_u16x8(), b.as_u16x8())) + let a = a.as_u16x8(); + let b = b.as_u16x8(); + transmute(simd_select::<i16x8, _>(simd_lt(a, b), a, b)) } /// Compares packed 32-bit integers in `a` and `b`, and returns packed minimum @@ -353,7 +365,9 @@ pub unsafe fn _mm_min_epu16(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pminsd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_min_epi32(a: __m128i, b: __m128i) -> __m128i { - transmute(pminsd(a.as_i32x4(), b.as_i32x4())) + let a = a.as_i32x4(); + let b = b.as_i32x4(); + transmute(simd_select::<i32x4, _>(simd_lt(a, b), a, b)) } /// Compares packed unsigned 32-bit integers in `a` and `b`, and returns packed @@ -365,7 +379,9 @@ pub unsafe fn _mm_min_epi32(a: __m128i, b: __m128i) -> __m128i { #[cfg_attr(test, assert_instr(pminud))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_min_epu32(a: __m128i, b: __m128i) -> __m128i { - transmute(pminud(a.as_u32x4(), b.as_u32x4())) + let a = a.as_u32x4(); + let b = b.as_u32x4(); + transmute(simd_select::<i32x4, _>(simd_lt(a, b), a, b)) } /// Converts packed 32-bit integers from `a` and `b` to packed 16-bit integers @@ -1122,22 +1138,6 @@ extern "C" { fn pblendw(a: i16x8, b: i16x8, imm8: u8) -> i16x8; #[link_name = "llvm.x86.sse41.insertps"] fn insertps(a: __m128, b: __m128, imm8: u8) -> __m128; - #[link_name = "llvm.x86.sse41.pmaxsb"] - fn pmaxsb(a: i8x16, b: i8x16) -> i8x16; - #[link_name = "llvm.x86.sse41.pmaxuw"] - fn pmaxuw(a: u16x8, b: u16x8) -> u16x8; - #[link_name = "llvm.x86.sse41.pmaxsd"] - fn pmaxsd(a: i32x4, b: i32x4) -> i32x4; - #[link_name = "llvm.x86.sse41.pmaxud"] - fn pmaxud(a: u32x4, b: u32x4) -> u32x4; - #[link_name = "llvm.x86.sse41.pminsb"] - fn pminsb(a: i8x16, b: i8x16) -> i8x16; - #[link_name = "llvm.x86.sse41.pminuw"] - fn pminuw(a: u16x8, b: u16x8) -> u16x8; - #[link_name = "llvm.x86.sse41.pminsd"] - fn pminsd(a: i32x4, b: i32x4) -> i32x4; - #[link_name = "llvm.x86.sse41.pminud"] - fn pminud(a: u32x4, b: u32x4) -> u32x4; #[link_name = "llvm.x86.sse41.packusdw"] fn packusdw(a: i32x4, b: i32x4) -> u16x8; #[link_name = "llvm.x86.sse41.dppd"] diff --git a/library/stdarch/crates/core_arch/src/x86/sse42.rs b/library/stdarch/crates/core_arch/src/x86/sse42.rs index f474b0671..4eb12480b 100644 --- a/library/stdarch/crates/core_arch/src/x86/sse42.rs +++ b/library/stdarch/crates/core_arch/src/x86/sse42.rs @@ -614,7 +614,7 @@ mod tests { use crate::core_arch::x86::*; use std::ptr; - // Currently one cannot `load` a &[u8] that is is less than 16 + // Currently one cannot `load` a &[u8] that is less than 16 // in length. This makes loading strings less than 16 in length // a bit difficult. Rather than `load` and mutate the __m128i, // it is easier to memcpy the given string to a local slice with @@ -623,11 +623,7 @@ mod tests { unsafe fn str_to_m128i(s: &[u8]) -> __m128i { assert!(s.len() <= 16); let slice = &mut [0u8; 16]; - ptr::copy_nonoverlapping( - s.get_unchecked(0) as *const u8 as *const u8, - slice.get_unchecked_mut(0) as *mut u8 as *mut u8, - s.len(), - ); + ptr::copy_nonoverlapping(s.as_ptr(), slice.as_mut_ptr(), s.len()); _mm_loadu_si128(slice.as_ptr() as *const _) } diff --git a/library/stdarch/crates/core_arch/src/x86/avx512vaes.rs b/library/stdarch/crates/core_arch/src/x86/vaes.rs index 676de312b..e09f8a113 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512vaes.rs +++ b/library/stdarch/crates/core_arch/src/x86/vaes.rs @@ -38,7 +38,7 @@ extern "C" { /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_aesenc_epi128) #[inline] -#[target_feature(enable = "avx512vaes,avx512vl")] +#[target_feature(enable = "vaes")] #[cfg_attr(test, assert_instr(vaesenc))] pub unsafe fn _mm256_aesenc_epi128(a: __m256i, round_key: __m256i) -> __m256i { aesenc_256(a, round_key) @@ -49,7 +49,7 @@ pub unsafe fn _mm256_aesenc_epi128(a: __m256i, round_key: __m256i) -> __m256i { /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_aesenclast_epi128) #[inline] -#[target_feature(enable = "avx512vaes,avx512vl")] +#[target_feature(enable = "vaes")] #[cfg_attr(test, assert_instr(vaesenclast))] pub unsafe fn _mm256_aesenclast_epi128(a: __m256i, round_key: __m256i) -> __m256i { aesenclast_256(a, round_key) @@ -60,7 +60,7 @@ pub unsafe fn _mm256_aesenclast_epi128(a: __m256i, round_key: __m256i) -> __m256 /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_aesdec_epi128) #[inline] -#[target_feature(enable = "avx512vaes,avx512vl")] +#[target_feature(enable = "vaes")] #[cfg_attr(test, assert_instr(vaesdec))] pub unsafe fn _mm256_aesdec_epi128(a: __m256i, round_key: __m256i) -> __m256i { aesdec_256(a, round_key) @@ -71,7 +71,7 @@ pub unsafe fn _mm256_aesdec_epi128(a: __m256i, round_key: __m256i) -> __m256i { /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_aesdeclast_epi128) #[inline] -#[target_feature(enable = "avx512vaes,avx512vl")] +#[target_feature(enable = "vaes")] #[cfg_attr(test, assert_instr(vaesdeclast))] pub unsafe fn _mm256_aesdeclast_epi128(a: __m256i, round_key: __m256i) -> __m256i { aesdeclast_256(a, round_key) @@ -82,7 +82,7 @@ pub unsafe fn _mm256_aesdeclast_epi128(a: __m256i, round_key: __m256i) -> __m256 /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_aesenc_epi128) #[inline] -#[target_feature(enable = "avx512vaes,avx512f")] +#[target_feature(enable = "vaes,avx512f")] #[cfg_attr(test, assert_instr(vaesenc))] pub unsafe fn _mm512_aesenc_epi128(a: __m512i, round_key: __m512i) -> __m512i { aesenc_512(a, round_key) @@ -93,7 +93,7 @@ pub unsafe fn _mm512_aesenc_epi128(a: __m512i, round_key: __m512i) -> __m512i { /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_aesenclast_epi128) #[inline] -#[target_feature(enable = "avx512vaes,avx512f")] +#[target_feature(enable = "vaes,avx512f")] #[cfg_attr(test, assert_instr(vaesenclast))] pub unsafe fn _mm512_aesenclast_epi128(a: __m512i, round_key: __m512i) -> __m512i { aesenclast_512(a, round_key) @@ -104,7 +104,7 @@ pub unsafe fn _mm512_aesenclast_epi128(a: __m512i, round_key: __m512i) -> __m512 /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_aesdec_epi128) #[inline] -#[target_feature(enable = "avx512vaes,avx512f")] +#[target_feature(enable = "vaes,avx512f")] #[cfg_attr(test, assert_instr(vaesdec))] pub unsafe fn _mm512_aesdec_epi128(a: __m512i, round_key: __m512i) -> __m512i { aesdec_512(a, round_key) @@ -115,7 +115,7 @@ pub unsafe fn _mm512_aesdec_epi128(a: __m512i, round_key: __m512i) -> __m512i { /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_aesdeclast_epi128) #[inline] -#[target_feature(enable = "avx512vaes,avx512f")] +#[target_feature(enable = "vaes,avx512f")] #[cfg_attr(test, assert_instr(vaesdeclast))] pub unsafe fn _mm512_aesdeclast_epi128(a: __m512i, round_key: __m512i) -> __m512i { aesdeclast_512(a, round_key) @@ -138,7 +138,7 @@ mod tests { // ideally we'd be using quickcheck here instead #[target_feature(enable = "avx2")] - unsafe fn helper_for_256_avx512vaes( + unsafe fn helper_for_256_vaes( linear: unsafe fn(__m128i, __m128i) -> __m128i, vectorized: unsafe fn(__m256i, __m256i) -> __m256i, ) { @@ -187,7 +187,7 @@ mod tests { setup_state_key(_mm512_broadcast_i32x4) } - #[simd_test(enable = "avx512vaes,avx512vl")] + #[simd_test(enable = "vaes,avx512vl")] unsafe fn test_mm256_aesdec_epi128() { // Constants taken from https://msdn.microsoft.com/en-us/library/cc664949.aspx. let (a, k) = setup_state_key_256(); @@ -196,10 +196,10 @@ mod tests { let r = _mm256_aesdec_epi128(a, k); assert_eq_m256i(r, e); - helper_for_256_avx512vaes(_mm_aesdec_si128, _mm256_aesdec_epi128); + helper_for_256_vaes(_mm_aesdec_si128, _mm256_aesdec_epi128); } - #[simd_test(enable = "avx512vaes,avx512vl")] + #[simd_test(enable = "vaes,avx512vl")] unsafe fn test_mm256_aesdeclast_epi128() { // Constants taken from https://msdn.microsoft.com/en-us/library/cc714178.aspx. let (a, k) = setup_state_key_256(); @@ -208,10 +208,10 @@ mod tests { let r = _mm256_aesdeclast_epi128(a, k); assert_eq_m256i(r, e); - helper_for_256_avx512vaes(_mm_aesdeclast_si128, _mm256_aesdeclast_epi128); + helper_for_256_vaes(_mm_aesdeclast_si128, _mm256_aesdeclast_epi128); } - #[simd_test(enable = "avx512vaes,avx512vl")] + #[simd_test(enable = "vaes,avx512vl")] unsafe fn test_mm256_aesenc_epi128() { // Constants taken from https://msdn.microsoft.com/en-us/library/cc664810.aspx. // they are repeated appropriately @@ -221,10 +221,10 @@ mod tests { let r = _mm256_aesenc_epi128(a, k); assert_eq_m256i(r, e); - helper_for_256_avx512vaes(_mm_aesenc_si128, _mm256_aesenc_epi128); + helper_for_256_vaes(_mm_aesenc_si128, _mm256_aesenc_epi128); } - #[simd_test(enable = "avx512vaes,avx512vl")] + #[simd_test(enable = "vaes,avx512vl")] unsafe fn test_mm256_aesenclast_epi128() { // Constants taken from https://msdn.microsoft.com/en-us/library/cc714136.aspx. let (a, k) = setup_state_key_256(); @@ -233,11 +233,11 @@ mod tests { let r = _mm256_aesenclast_epi128(a, k); assert_eq_m256i(r, e); - helper_for_256_avx512vaes(_mm_aesenclast_si128, _mm256_aesenclast_epi128); + helper_for_256_vaes(_mm_aesenclast_si128, _mm256_aesenclast_epi128); } #[target_feature(enable = "avx512f")] - unsafe fn helper_for_512_avx512vaes( + unsafe fn helper_for_512_vaes( linear: unsafe fn(__m128i, __m128i) -> __m128i, vectorized: unsafe fn(__m512i, __m512i) -> __m512i, ) { @@ -282,7 +282,7 @@ mod tests { assert_eq_m128i(_mm512_extracti32x4_epi32::<3>(r), e_decomp[3]); } - #[simd_test(enable = "avx512vaes,avx512f")] + #[simd_test(enable = "vaes,avx512f")] unsafe fn test_mm512_aesdec_epi128() { // Constants taken from https://msdn.microsoft.com/en-us/library/cc664949.aspx. let (a, k) = setup_state_key_512(); @@ -291,10 +291,10 @@ mod tests { let r = _mm512_aesdec_epi128(a, k); assert_eq_m512i(r, e); - helper_for_512_avx512vaes(_mm_aesdec_si128, _mm512_aesdec_epi128); + helper_for_512_vaes(_mm_aesdec_si128, _mm512_aesdec_epi128); } - #[simd_test(enable = "avx512vaes,avx512f")] + #[simd_test(enable = "vaes,avx512f")] unsafe fn test_mm512_aesdeclast_epi128() { // Constants taken from https://msdn.microsoft.com/en-us/library/cc714178.aspx. let (a, k) = setup_state_key_512(); @@ -303,10 +303,10 @@ mod tests { let r = _mm512_aesdeclast_epi128(a, k); assert_eq_m512i(r, e); - helper_for_512_avx512vaes(_mm_aesdeclast_si128, _mm512_aesdeclast_epi128); + helper_for_512_vaes(_mm_aesdeclast_si128, _mm512_aesdeclast_epi128); } - #[simd_test(enable = "avx512vaes,avx512f")] + #[simd_test(enable = "vaes,avx512f")] unsafe fn test_mm512_aesenc_epi128() { // Constants taken from https://msdn.microsoft.com/en-us/library/cc664810.aspx. let (a, k) = setup_state_key_512(); @@ -315,10 +315,10 @@ mod tests { let r = _mm512_aesenc_epi128(a, k); assert_eq_m512i(r, e); - helper_for_512_avx512vaes(_mm_aesenc_si128, _mm512_aesenc_epi128); + helper_for_512_vaes(_mm_aesenc_si128, _mm512_aesenc_epi128); } - #[simd_test(enable = "avx512vaes,avx512f")] + #[simd_test(enable = "vaes,avx512f")] unsafe fn test_mm512_aesenclast_epi128() { // Constants taken from https://msdn.microsoft.com/en-us/library/cc714136.aspx. let (a, k) = setup_state_key_512(); @@ -327,6 +327,6 @@ mod tests { let r = _mm512_aesenclast_epi128(a, k); assert_eq_m512i(r, e); - helper_for_512_avx512vaes(_mm_aesenclast_si128, _mm512_aesenclast_epi128); + helper_for_512_vaes(_mm_aesenclast_si128, _mm512_aesenclast_epi128); } } diff --git a/library/stdarch/crates/core_arch/src/x86/avx512vpclmulqdq.rs b/library/stdarch/crates/core_arch/src/x86/vpclmulqdq.rs index 9bfeb903a..ea76708b8 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512vpclmulqdq.rs +++ b/library/stdarch/crates/core_arch/src/x86/vpclmulqdq.rs @@ -32,7 +32,7 @@ extern "C" { /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_clmulepi64_epi128) #[inline] -#[target_feature(enable = "avx512vpclmulqdq,avx512f")] +#[target_feature(enable = "vpclmulqdq,avx512f")] // technically according to Intel's documentation we don't need avx512f here, however LLVM gets confused otherwise #[cfg_attr(test, assert_instr(vpclmul, IMM8 = 0))] #[rustc_legacy_const_generics(2)] @@ -50,7 +50,7 @@ pub unsafe fn _mm512_clmulepi64_epi128<const IMM8: i32>(a: __m512i, b: __m512i) /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_clmulepi64_epi128) #[inline] -#[target_feature(enable = "avx512vpclmulqdq,avx512vl")] +#[target_feature(enable = "vpclmulqdq")] #[cfg_attr(test, assert_instr(vpclmul, IMM8 = 0))] #[rustc_legacy_const_generics(2)] pub unsafe fn _mm256_clmulepi64_epi128<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i { @@ -121,7 +121,7 @@ mod tests { // this function tests one of the possible 4 instances // with different inputs across lanes - #[target_feature(enable = "avx512vpclmulqdq,avx512f")] + #[target_feature(enable = "vpclmulqdq,avx512f")] unsafe fn verify_512_helper( linear: unsafe fn(__m128i, __m128i) -> __m128i, vectorized: unsafe fn(__m512i, __m512i) -> __m512i, @@ -162,7 +162,7 @@ mod tests { // this function tests one of the possible 4 instances // with different inputs across lanes for the VL version - #[target_feature(enable = "avx512vpclmulqdq,avx512vl")] + #[target_feature(enable = "vpclmulqdq,avx512vl")] unsafe fn verify_256_helper( linear: unsafe fn(__m128i, __m128i) -> __m128i, vectorized: unsafe fn(__m256i, __m256i) -> __m256i, @@ -204,7 +204,7 @@ mod tests { unroll! {assert_eq_m128i(_mm256_extracti128_si256::<2>(r),e_decomp[2]);} } - #[simd_test(enable = "avx512vpclmulqdq,avx512f")] + #[simd_test(enable = "vpclmulqdq,avx512f")] unsafe fn test_mm512_clmulepi64_epi128() { verify_kat_pclmul!( _mm512_broadcast_i32x4, @@ -230,7 +230,7 @@ mod tests { ); } - #[simd_test(enable = "avx512vpclmulqdq,avx512vl")] + #[simd_test(enable = "vpclmulqdq,avx512vl")] unsafe fn test_mm256_clmulepi64_epi128() { verify_kat_pclmul!( _mm256_broadcastsi128_si256, diff --git a/library/stdarch/crates/core_arch/tests/cpu-detection.rs b/library/stdarch/crates/core_arch/tests/cpu-detection.rs index 61f5f0905..08caca738 100644 --- a/library/stdarch/crates/core_arch/tests/cpu-detection.rs +++ b/library/stdarch/crates/core_arch/tests/cpu-detection.rs @@ -31,12 +31,9 @@ fn x86_all() { is_x86_feature_detected!("avx512vpopcntdq") ); println!("avx512vbmi2 {:?}", is_x86_feature_detected!("avx512vbmi2")); - println!("avx512gfni {:?}", is_x86_feature_detected!("avx512gfni")); - println!("avx512vaes {:?}", is_x86_feature_detected!("avx512vaes")); - println!( - "avx512vpclmulqdq {:?}", - is_x86_feature_detected!("avx512vpclmulqdq") - ); + println!("gfni {:?}", is_x86_feature_detected!("gfni")); + println!("vaes {:?}", is_x86_feature_detected!("vaes")); + println!("vpclmulqdq {:?}", is_x86_feature_detected!("vpclmulqdq")); println!("avx512vnni {:?}", is_x86_feature_detected!("avx512vnni")); println!( "avx512bitalg {:?}", @@ -61,3 +58,15 @@ fn x86_all() { println!("xsaves: {:?}", is_x86_feature_detected!("xsaves")); println!("xsavec: {:?}", is_x86_feature_detected!("xsavec")); } + +#[test] +#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] +#[allow(deprecated)] +fn x86_deprecated() { + println!("avx512gfni {:?}", is_x86_feature_detected!("avx512gfni")); + println!("avx512vaes {:?}", is_x86_feature_detected!("avx512vaes")); + println!( + "avx512vpclmulqdq {:?}", + is_x86_feature_detected!("avx512vpclmulqdq") + ); +} diff --git a/library/stdarch/crates/intrinsic-test/Cargo.toml b/library/stdarch/crates/intrinsic-test/Cargo.toml index 5fde23c9e..7efbab755 100644 --- a/library/stdarch/crates/intrinsic-test/Cargo.toml +++ b/library/stdarch/crates/intrinsic-test/Cargo.toml @@ -2,7 +2,7 @@ name = "intrinsic-test" version = "0.1.0" authors = ["Jamie Cunliffe <Jamie.Cunliffe@arm.com>"] -edition = "2018" +edition = "2021" [dependencies] lazy_static = "1.4.0" @@ -14,4 +14,4 @@ log = "0.4.11" pretty_env_logger = "0.4.0" rayon = "1.5.0" diff = "0.1.12" -itertools = "0.10.1"
\ No newline at end of file +itertools = "0.10.1" diff --git a/library/stdarch/crates/intrinsic-test/missing_arm.txt b/library/stdarch/crates/intrinsic-test/missing_arm.txt index bbc8de584..3d7ead062 100644 --- a/library/stdarch/crates/intrinsic-test/missing_arm.txt +++ b/library/stdarch/crates/intrinsic-test/missing_arm.txt @@ -163,17 +163,6 @@ vcaddq_rot270_f32 vcaddq_rot90_f32 vcadd_rot270_f32 vcadd_rot90_f32 -vcombine_f32 -vcombine_p16 -vcombine_p8 -vcombine_s16 -vcombine_s32 -vcombine_s64 -vcombine_s8 -vcombine_u16 -vcombine_u32 -vcombine_u64 -vcombine_u8 vcvtaq_s32_f32 vcvtaq_u32_f32 vcvta_s32_f32 diff --git a/library/stdarch/crates/intrinsic-test/src/acle_csv_parser.rs b/library/stdarch/crates/intrinsic-test/src/acle_csv_parser.rs index d7b066485..7336c9e8b 100644 --- a/library/stdarch/crates/intrinsic-test/src/acle_csv_parser.rs +++ b/library/stdarch/crates/intrinsic-test/src/acle_csv_parser.rs @@ -59,8 +59,8 @@ impl Into<Intrinsic> for ACLEIntrinsicLine { let signature = self.intrinsic; let (ret_ty, remaining) = signature.split_once(' ').unwrap(); - let results = type_from_c(ret_ty) - .unwrap_or_else(|_| panic!("Failed to parse return type: {}", ret_ty)); + let results = + type_from_c(ret_ty).unwrap_or_else(|_| panic!("Failed to parse return type: {ret_ty}")); let (name, args) = remaining.split_once('(').unwrap(); let args = args.trim_end_matches(')'); @@ -177,7 +177,7 @@ fn from_c(pos: usize, s: &str) -> Argument { Argument { pos, name, - ty: type_from_c(s).unwrap_or_else(|_| panic!("Failed to parse type: {}", s)), + ty: type_from_c(s).unwrap_or_else(|_| panic!("Failed to parse type: {s}")), constraints: vec![], } } diff --git a/library/stdarch/crates/intrinsic-test/src/intrinsic.rs b/library/stdarch/crates/intrinsic-test/src/intrinsic.rs index e0645a36b..fb4eb4cb7 100644 --- a/library/stdarch/crates/intrinsic-test/src/intrinsic.rs +++ b/library/stdarch/crates/intrinsic-test/src/intrinsic.rs @@ -109,7 +109,7 @@ impl Intrinsic { pub fn generate_loop_rust(&self, additional: &str, passes: u32) -> String { let constraints = self.arguments.as_constraint_parameters_rust(); let constraints = if !constraints.is_empty() { - format!("::<{}>", constraints) + format!("::<{constraints}>") } else { constraints }; diff --git a/library/stdarch/crates/intrinsic-test/src/main.rs b/library/stdarch/crates/intrinsic-test/src/main.rs index 43f2df08b..dac934574 100644 --- a/library/stdarch/crates/intrinsic-test/src/main.rs +++ b/library/stdarch/crates/intrinsic-test/src/main.rs @@ -58,7 +58,7 @@ fn gen_code_c( pass = gen_code_c( intrinsic, constraints, - format!("{}-{}", name, i), + format!("{name}-{i}"), p64_armv7_workaround ) ) @@ -117,7 +117,7 @@ int main(int argc, char **argv) {{ }}"#, header_files = header_files .iter() - .map(|header| format!("#include <{}>", header)) + .map(|header| format!("#include <{header}>")) .collect::<Vec<_>>() .join("\n"), arglists = intrinsic.arguments.gen_arglists_c(PASSES), @@ -148,7 +148,7 @@ fn gen_code_rust(intrinsic: &Intrinsic, constraints: &[&Argument], name: String) name = current.name, ty = current.ty.rust_type(), val = i, - pass = gen_code_rust(intrinsic, constraints, format!("{}-{}", name, i)) + pass = gen_code_rust(intrinsic, constraints, format!("{name}-{i}")) ) }) .collect() @@ -237,7 +237,7 @@ fn build_rust(intrinsics: &Vec<Intrinsic>, toolchain: &str, a32: bool) -> bool { intrinsics.iter().for_each(|i| { let rust_dir = format!(r#"rust_programs/{}"#, i.name); let _ = std::fs::create_dir_all(&rust_dir); - let rust_filename = format!(r#"{}/main.rs"#, rust_dir); + let rust_filename = format!(r#"{rust_dir}/main.rs"#); let mut file = File::create(&rust_filename).unwrap(); let c_code = generate_rust_program(&i, a32); @@ -355,7 +355,7 @@ fn main() { let filename = matches.value_of("INPUT").unwrap(); let toolchain = matches .value_of("TOOLCHAIN") - .map_or("".into(), |t| format!("+{}", t)); + .map_or("".into(), |t| format!("+{t}")); let cpp_compiler = matches.value_of("CPPCOMPILER").unwrap(); let c_runner = matches.value_of("RUNNER").unwrap_or(""); @@ -443,7 +443,7 @@ fn compare_outputs(intrinsics: &Vec<Intrinsic>, toolchain: &str, runner: &str, a let (c, rust) = match (c, rust) { (Ok(c), Ok(rust)) => (c, rust), - a => panic!("{:#?}", a), + a => panic!("{a:#?}"), }; if !c.status.success() { @@ -480,20 +480,20 @@ fn compare_outputs(intrinsics: &Vec<Intrinsic>, toolchain: &str, runner: &str, a intrinsics.iter().for_each(|reason| match reason { FailureReason::Difference(intrinsic, c, rust) => { - println!("Difference for intrinsic: {}", intrinsic); + println!("Difference for intrinsic: {intrinsic}"); let diff = diff::lines(c, rust); diff.iter().for_each(|diff| match diff { - diff::Result::Left(c) => println!("C: {}", c), - diff::Result::Right(rust) => println!("Rust: {}", rust), + diff::Result::Left(c) => println!("C: {c}"), + diff::Result::Right(rust) => println!("Rust: {rust}"), diff::Result::Both(_, _) => (), }); println!("****************************************************************"); } FailureReason::RunC(intrinsic) => { - println!("Failed to run C program for intrinsic {}", intrinsic) + println!("Failed to run C program for intrinsic {intrinsic}") } FailureReason::RunRust(intrinsic) => { - println!("Failed to run rust program for intrinsic {}", intrinsic) + println!("Failed to run rust program for intrinsic {intrinsic}") } }); println!("{} differences found", intrinsics.len()); diff --git a/library/stdarch/crates/intrinsic-test/src/types.rs b/library/stdarch/crates/intrinsic-test/src/types.rs index dd23586e7..7442ad5e6 100644 --- a/library/stdarch/crates/intrinsic-test/src/types.rs +++ b/library/stdarch/crates/intrinsic-test/src/types.rs @@ -25,7 +25,7 @@ impl FromStr for TypeKind { "poly" => Ok(Self::Poly), "uint" | "unsigned" => Ok(Self::UInt), "void" => Ok(Self::Void), - _ => Err(format!("Impossible to parse argument kind {}", s)), + _ => Err(format!("Impossible to parse argument kind {s}")), } } } @@ -199,14 +199,14 @@ impl IntrinsicType { simd_len: Some(simd_len), vec_len: None, .. - } => format!("{}{}x{}_t", kind.c_prefix(), bit_len, simd_len), + } => format!("{}{bit_len}x{simd_len}_t", kind.c_prefix()), IntrinsicType::Type { kind, bit_len: Some(bit_len), simd_len: Some(simd_len), vec_len: Some(vec_len), .. - } => format!("{}{}x{}x{}_t", kind.c_prefix(), bit_len, simd_len, vec_len), + } => format!("{}{bit_len}x{simd_len}x{vec_len}_t", kind.c_prefix()), _ => todo!("{:#?}", self), } } @@ -220,7 +220,7 @@ impl IntrinsicType { simd_len: Some(simd_len), vec_len: Some(_), .. - } => format!("{}{}x{}_t", kind.c_prefix(), bit_len, simd_len), + } => format!("{}{bit_len}x{simd_len}_t", kind.c_prefix()), _ => unreachable!("Shouldn't be called on this type"), } } @@ -234,21 +234,21 @@ impl IntrinsicType { simd_len: None, vec_len: None, .. - } => format!("{}{}", kind.rust_prefix(), bit_len), + } => format!("{}{bit_len}", kind.rust_prefix()), IntrinsicType::Type { kind, bit_len: Some(bit_len), simd_len: Some(simd_len), vec_len: None, .. - } => format!("{}{}x{}_t", kind.c_prefix(), bit_len, simd_len), + } => format!("{}{bit_len}x{simd_len}_t", kind.c_prefix()), IntrinsicType::Type { kind, bit_len: Some(bit_len), simd_len: Some(simd_len), vec_len: Some(vec_len), .. - } => format!("{}{}x{}x{}_t", kind.c_prefix(), bit_len, simd_len, vec_len), + } => format!("{}{bit_len}x{simd_len}x{vec_len}_t", kind.c_prefix()), _ => todo!("{:#?}", self), } } diff --git a/library/stdarch/crates/intrinsic-test/src/values.rs b/library/stdarch/crates/intrinsic-test/src/values.rs index 64b4d9fc9..68dd30d44 100644 --- a/library/stdarch/crates/intrinsic-test/src/values.rs +++ b/library/stdarch/crates/intrinsic-test/src/values.rs @@ -13,7 +13,7 @@ pub fn value_for_array(bits: u32, index: u32) -> String { } else if bits == 64 { format!("{:#X}", VALUES_64[index % VALUES_64.len()]) } else { - panic!("Unknown size: {}", bits); + panic!("Unknown size: {bits}"); } } diff --git a/library/stdarch/crates/simd-test-macro/Cargo.toml b/library/stdarch/crates/simd-test-macro/Cargo.toml index c3ecf981e..cd110c1d3 100644 --- a/library/stdarch/crates/simd-test-macro/Cargo.toml +++ b/library/stdarch/crates/simd-test-macro/Cargo.toml @@ -2,7 +2,7 @@ name = "simd-test-macro" version = "0.1.0" authors = ["Alex Crichton <alex@alexcrichton.com>"] -edition = "2018" +edition = "2021" [lib] proc-macro = true diff --git a/library/stdarch/crates/simd-test-macro/src/lib.rs b/library/stdarch/crates/simd-test-macro/src/lib.rs index 9d81a4c5e..2a31dd745 100644 --- a/library/stdarch/crates/simd-test-macro/src/lib.rs +++ b/library/stdarch/crates/simd-test-macro/src/lib.rs @@ -59,7 +59,7 @@ pub fn simd_test( let macro_test = match target .split('-') .next() - .unwrap_or_else(|| panic!("target triple contained no \"-\": {}", target)) + .unwrap_or_else(|| panic!("target triple contained no \"-\": {target}")) { "i686" | "x86_64" | "i586" => "is_x86_feature_detected", "arm" | "armv7" => "is_arm_feature_detected", @@ -82,7 +82,7 @@ pub fn simd_test( force_test = true; "is_mips64_feature_detected" } - t => panic!("unknown target: {}", t), + t => panic!("unknown target: {t}"), }; let macro_test = Ident::new(macro_test, Span::call_site()); diff --git a/library/stdarch/crates/std_detect/Cargo.toml b/library/stdarch/crates/std_detect/Cargo.toml index 3a482564e..589a3900a 100644 --- a/library/stdarch/crates/std_detect/Cargo.toml +++ b/library/stdarch/crates/std_detect/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" keywords = ["std", "run-time", "feature", "detection"] categories = ["hardware-support"] license = "MIT OR Apache-2.0" -edition = "2018" +edition = "2021" [badges] is-it-maintained-issue-resolution = { repository = "rust-lang/stdarch" } diff --git a/library/stdarch/crates/std_detect/src/detect/arch/x86.rs b/library/stdarch/crates/std_detect/src/detect/arch/x86.rs index 893e1a887..d0bf92d3e 100644 --- a/library/stdarch/crates/std_detect/src/detect/arch/x86.rs +++ b/library/stdarch/crates/std_detect/src/detect/arch/x86.rs @@ -68,9 +68,9 @@ features! { /// * `"avx512vbmi"` /// * `"avx512vpopcntdq"` /// * `"avx512vbmi2"` - /// * `"avx512gfni"` - /// * `"avx512vaes"` - /// * `"avx512vpclmulqdq"` + /// * `"gfni"` + /// * `"vaes"` + /// * `"vpclmulqdq"` /// * `"avx512vnni"` /// * `"avx512bitalg"` /// * `"avx512bf16"` @@ -95,6 +95,9 @@ features! { /// [docs]: https://software.intel.com/sites/landingpage/IntrinsicsGuide #[stable(feature = "simd_x86", since = "1.27.0")] @BIND_FEATURE_NAME: "abm"; "lzcnt"; // abm is a synonym for lzcnt + @BIND_FEATURE_NAME: "avx512gfni"; "gfni"; #[deprecated(since = "1.67.0", note = "the `avx512gfni` feature has been renamed to `gfni`")]; + @BIND_FEATURE_NAME: "avx512vaes"; "vaes"; #[deprecated(since = "1.67.0", note = "the `avx512vaes` feature has been renamed to `vaes`")]; + @BIND_FEATURE_NAME: "avx512vpclmulqdq"; "vpclmulqdq"; #[deprecated(since = "1.67.0", note = "the `avx512vpclmulqdq` feature has been renamed to `vpclmulqdq`")]; @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] aes: "aes"; /// AES (Advanced Encryption Standard New Instructions AES-NI) @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] pclmulqdq: "pclmulqdq"; @@ -150,11 +153,11 @@ features! { /// Quadword) @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512vbmi2: "avx512vbmi2"; /// AVX-512 VBMI2 (Additional byte, word, dword and qword capabilities) - @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512gfni: "avx512gfni"; + @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] gfni: "gfni"; /// AVX-512 GFNI (Galois Field New Instruction) - @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512vaes: "avx512vaes"; + @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] vaes: "vaes"; /// AVX-512 VAES (Vector AES instruction) - @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512vpclmulqdq: "avx512vpclmulqdq"; + @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] vpclmulqdq: "vpclmulqdq"; /// AVX-512 VPCLMULQDQ (Vector PCLMULQDQ instructions) @FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] avx512vnni: "avx512vnni"; /// AVX-512 VNNI (Vector Neural Network Instructions) diff --git a/library/stdarch/crates/std_detect/src/detect/cache.rs b/library/stdarch/crates/std_detect/src/detect/cache.rs index d01a5ea24..a94c655c3 100644 --- a/library/stdarch/crates/std_detect/src/detect/cache.rs +++ b/library/stdarch/crates/std_detect/src/detect/cache.rs @@ -179,7 +179,7 @@ fn detect_and_initialize() -> Initializer { /// the bit is set, the feature is enabled, and otherwise it is disabled. /// /// If the feature `std_detect_env_override` is enabled looks for the env -/// variable `RUST_STD_DETECT_UNSTABLE` and uses its its content to disable +/// variable `RUST_STD_DETECT_UNSTABLE` and uses its content to disable /// Features that would had been otherwise detected. #[inline] pub(crate) fn test(bit: u32) -> bool { diff --git a/library/stdarch/crates/std_detect/src/detect/macros.rs b/library/stdarch/crates/std_detect/src/detect/macros.rs index a467f9db6..45feec79f 100644 --- a/library/stdarch/crates/std_detect/src/detect/macros.rs +++ b/library/stdarch/crates/std_detect/src/detect/macros.rs @@ -17,7 +17,7 @@ macro_rules! features { @CFG: $cfg:meta; @MACRO_NAME: $macro_name:ident; @MACRO_ATTRS: $(#[$macro_attrs:meta])* - $(@BIND_FEATURE_NAME: $bind_feature:tt; $feature_impl:tt; )* + $(@BIND_FEATURE_NAME: $bind_feature:tt; $feature_impl:tt; $(#[$deprecate_attr:meta];)?)* $(@NO_RUNTIME_DETECTION: $nort_feature:tt; )* $(@FEATURE: #[$stability_attr:meta] $feature:ident: $feature_lit:tt; $(implied by target_features: [$($target_feature_lit:tt),*];)? @@ -35,7 +35,15 @@ macro_rules! features { }; )* $( - ($bind_feature) => { $crate::$macro_name!($feature_impl) }; + ($bind_feature) => { + { + $( + #[$deprecate_attr] macro_rules! deprecated_feature { {} => {}; } + deprecated_feature! {}; + )? + $crate::$macro_name!($feature_impl) + } + }; )* $( ($nort_feature) => { diff --git a/library/stdarch/crates/std_detect/src/detect/mod.rs b/library/stdarch/crates/std_detect/src/detect/mod.rs index 2bca84ca1..9a135c90a 100644 --- a/library/stdarch/crates/std_detect/src/detect/mod.rs +++ b/library/stdarch/crates/std_detect/src/detect/mod.rs @@ -47,7 +47,7 @@ cfg_if! { // On x86/x86_64 no OS specific functionality is required. #[path = "os/x86.rs"] mod os; - } else if #[cfg(all(target_os = "linux", feature = "libc"))] { + } else if #[cfg(all(any(target_os = "linux", target_os = "android"), feature = "libc"))] { #[path = "os/linux/mod.rs"] mod os; } else if #[cfg(all(target_os = "freebsd", feature = "libc"))] { diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs b/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs index 6c79ba86d..a75185d43 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs +++ b/library/stdarch/crates/std_detect/src/detect/os/linux/aarch64.rs @@ -329,7 +329,7 @@ mod tests { env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-empty-hwcap2-aarch64.auxv" ); - println!("file: {}", file); + println!("file: {file}"); let v = auxv_from_file(file).unwrap(); println!("HWCAP : 0x{:0x}", v.hwcap); println!("HWCAP2: 0x{:0x}", v.hwcap2); @@ -341,7 +341,7 @@ mod tests { env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-no-hwcap2-aarch64.auxv" ); - println!("file: {}", file); + println!("file: {file}"); let v = auxv_from_file(file).unwrap(); println!("HWCAP : 0x{:0x}", v.hwcap); println!("HWCAP2: 0x{:0x}", v.hwcap2); @@ -353,7 +353,7 @@ mod tests { env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-hwcap2-aarch64.auxv" ); - println!("file: {}", file); + println!("file: {file}"); let v = auxv_from_file(file).unwrap(); println!("HWCAP : 0x{:0x}", v.hwcap); println!("HWCAP2: 0x{:0x}", v.hwcap2); diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs b/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs index c903903bd..d9e7b28ea 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs +++ b/library/stdarch/crates/std_detect/src/detect/os/linux/auxvec.rs @@ -313,7 +313,7 @@ mod tests { #[test] fn linux_rpi3() { let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-rpi3.auxv"); - println!("file: {}", file); + println!("file: {file}"); let v = auxv_from_file(file).unwrap(); assert_eq!(v.hwcap, 4174038); assert_eq!(v.hwcap2, 16); @@ -322,7 +322,7 @@ mod tests { #[test] fn linux_macos_vb() { let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/macos-virtualbox-linux-x86-4850HQ.auxv"); - println!("file: {}", file); + println!("file: {file}"); // The file contains HWCAP but not HWCAP2. In that case, we treat HWCAP2 as zero. let v = auxv_from_file(file).unwrap(); assert_eq!(v.hwcap, 126614527); @@ -332,7 +332,7 @@ mod tests { #[test] fn linux_artificial_aarch64() { let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-artificial-aarch64.auxv"); - println!("file: {}", file); + println!("file: {file}"); let v = auxv_from_file(file).unwrap(); assert_eq!(v.hwcap, 0x0123456789abcdef); assert_eq!(v.hwcap2, 0x02468ace13579bdf); @@ -340,7 +340,7 @@ mod tests { #[test] fn linux_no_hwcap2_aarch64() { let file = concat!(env!("CARGO_MANIFEST_DIR"), "/src/detect/test_data/linux-no-hwcap2-aarch64.auxv"); - println!("file: {}", file); + println!("file: {file}"); let v = auxv_from_file(file).unwrap(); // An absent HWCAP2 is treated as zero, and does not prevent acceptance of HWCAP. assert_ne!(v.hwcap, 0); diff --git a/library/stdarch/crates/std_detect/src/detect/os/linux/riscv.rs b/library/stdarch/crates/std_detect/src/detect/os/linux/riscv.rs index 1ec06959a..91a85d58e 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/linux/riscv.rs +++ b/library/stdarch/crates/std_detect/src/detect/os/linux/riscv.rs @@ -1,73 +1,73 @@ -//! Run-time feature detection for RISC-V on Linux.
-
-use super::auxvec;
-use crate::detect::{bit, cache, Feature};
-
-/// Read list of supported features from the auxiliary vector.
-pub(crate) fn detect_features() -> cache::Initializer {
- let mut value = cache::Initializer::default();
- let enable_feature = |value: &mut cache::Initializer, feature, enable| {
- if enable {
- value.set(feature as u32);
- }
- };
- let enable_features = |value: &mut cache::Initializer, feature_slice: &[Feature], enable| {
- if enable {
- for feature in feature_slice {
- value.set(*feature as u32);
- }
- }
- };
-
- // The values are part of the platform-specific [asm/hwcap.h][hwcap]
- //
- // [hwcap]: https://github.com/torvalds/linux/blob/master/arch/riscv/include/asm/hwcap.h
- let auxv = auxvec::auxv().expect("read auxvec"); // should not fail on RISC-V platform
- enable_feature(
- &mut value,
- Feature::a,
- bit::test(auxv.hwcap, (b'a' - b'a').into()),
- );
- enable_feature(
- &mut value,
- Feature::c,
- bit::test(auxv.hwcap, (b'c' - b'a').into()),
- );
- enable_features(
- &mut value,
- &[Feature::d, Feature::f, Feature::zicsr],
- bit::test(auxv.hwcap, (b'd' - b'a').into()),
- );
- enable_features(
- &mut value,
- &[Feature::f, Feature::zicsr],
- bit::test(auxv.hwcap, (b'f' - b'a').into()),
- );
- let has_i = bit::test(auxv.hwcap, (b'i' - b'a').into());
- // If future RV128I is supported, implement with `enable_feature` here
- #[cfg(target_pointer_width = "64")]
- enable_feature(&mut value, Feature::rv64i, has_i);
- #[cfg(target_pointer_width = "32")]
- enable_feature(&mut value, Feature::rv32i, has_i);
- #[cfg(target_pointer_width = "32")]
- enable_feature(
- &mut value,
- Feature::rv32e,
- bit::test(auxv.hwcap, (b'e' - b'a').into()),
- );
- enable_feature(
- &mut value,
- Feature::h,
- bit::test(auxv.hwcap, (b'h' - b'a').into()),
- );
- enable_feature(
- &mut value,
- Feature::m,
- bit::test(auxv.hwcap, (b'm' - b'a').into()),
- );
- // FIXME: Auxvec does not show supervisor feature support, but this mode may be useful
- // to detect when Rust is used to write Linux kernel modules.
- // These should be more than Auxvec way to detect supervisor features.
-
- value
-}
+//! Run-time feature detection for RISC-V on Linux. + +use super::auxvec; +use crate::detect::{bit, cache, Feature}; + +/// Read list of supported features from the auxiliary vector. +pub(crate) fn detect_features() -> cache::Initializer { + let mut value = cache::Initializer::default(); + let enable_feature = |value: &mut cache::Initializer, feature, enable| { + if enable { + value.set(feature as u32); + } + }; + let enable_features = |value: &mut cache::Initializer, feature_slice: &[Feature], enable| { + if enable { + for feature in feature_slice { + value.set(*feature as u32); + } + } + }; + + // The values are part of the platform-specific [asm/hwcap.h][hwcap] + // + // [hwcap]: https://github.com/torvalds/linux/blob/master/arch/riscv/include/asm/hwcap.h + let auxv = auxvec::auxv().expect("read auxvec"); // should not fail on RISC-V platform + enable_feature( + &mut value, + Feature::a, + bit::test(auxv.hwcap, (b'a' - b'a').into()), + ); + enable_feature( + &mut value, + Feature::c, + bit::test(auxv.hwcap, (b'c' - b'a').into()), + ); + enable_features( + &mut value, + &[Feature::d, Feature::f, Feature::zicsr], + bit::test(auxv.hwcap, (b'd' - b'a').into()), + ); + enable_features( + &mut value, + &[Feature::f, Feature::zicsr], + bit::test(auxv.hwcap, (b'f' - b'a').into()), + ); + let has_i = bit::test(auxv.hwcap, (b'i' - b'a').into()); + // If future RV128I is supported, implement with `enable_feature` here + #[cfg(target_pointer_width = "64")] + enable_feature(&mut value, Feature::rv64i, has_i); + #[cfg(target_pointer_width = "32")] + enable_feature(&mut value, Feature::rv32i, has_i); + #[cfg(target_pointer_width = "32")] + enable_feature( + &mut value, + Feature::rv32e, + bit::test(auxv.hwcap, (b'e' - b'a').into()), + ); + enable_feature( + &mut value, + Feature::h, + bit::test(auxv.hwcap, (b'h' - b'a').into()), + ); + enable_feature( + &mut value, + Feature::m, + bit::test(auxv.hwcap, (b'm' - b'a').into()), + ); + // FIXME: Auxvec does not show supervisor feature support, but this mode may be useful + // to detect when Rust is used to write Linux kernel modules. + // These should be more than Auxvec way to detect supervisor features. + + value +} diff --git a/library/stdarch/crates/std_detect/src/detect/os/x86.rs b/library/stdarch/crates/std_detect/src/detect/os/x86.rs index ea5f595ec..08f48cd17 100644 --- a/library/stdarch/crates/std_detect/src/detect/os/x86.rs +++ b/library/stdarch/crates/std_detect/src/detect/os/x86.rs @@ -211,10 +211,10 @@ pub(crate) fn detect_features() -> cache::Initializer { enable(extended_features_ecx, 1, Feature::avx512vbmi); enable(extended_features_ecx, 5, Feature::avx512bf16); enable(extended_features_ecx, 6, Feature::avx512vbmi2); - enable(extended_features_ecx, 8, Feature::avx512gfni); + enable(extended_features_ecx, 8, Feature::gfni); enable(extended_features_ecx, 8, Feature::avx512vp2intersect); - enable(extended_features_ecx, 9, Feature::avx512vaes); - enable(extended_features_ecx, 10, Feature::avx512vpclmulqdq); + enable(extended_features_ecx, 9, Feature::vaes); + enable(extended_features_ecx, 10, Feature::vpclmulqdq); enable(extended_features_ecx, 11, Feature::avx512vnni); enable(extended_features_ecx, 12, Feature::avx512bitalg); enable(extended_features_ecx, 14, Feature::avx512vpopcntdq); diff --git a/library/stdarch/crates/std_detect/tests/cpu-detection.rs b/library/stdarch/crates/std_detect/tests/cpu-detection.rs index ca8bf28f4..02ad77a63 100644 --- a/library/stdarch/crates/std_detect/tests/cpu-detection.rs +++ b/library/stdarch/crates/std_detect/tests/cpu-detection.rs @@ -15,7 +15,7 @@ extern crate std_detect; #[test] fn all() { for (f, e) in std_detect::detect::features() { - println!("{}: {}", f, e); + println!("{f}: {e}"); } } @@ -132,12 +132,9 @@ fn x86_all() { is_x86_feature_detected!("avx512vpopcntdq") ); println!("avx512vbmi2 {:?}", is_x86_feature_detected!("avx512vbmi2")); - println!("avx512gfni {:?}", is_x86_feature_detected!("avx512gfni")); - println!("avx512vaes {:?}", is_x86_feature_detected!("avx512vaes")); - println!( - "avx512vpclmulqdq {:?}", - is_x86_feature_detected!("avx512vpclmulqdq") - ); + println!("gfni {:?}", is_x86_feature_detected!("gfni")); + println!("vaes {:?}", is_x86_feature_detected!("vaes")); + println!("vpclmulqdq {:?}", is_x86_feature_detected!("vpclmulqdq")); println!("avx512vnni {:?}", is_x86_feature_detected!("avx512vnni")); println!( "avx512bitalg {:?}", diff --git a/library/stdarch/crates/std_detect/tests/x86-specific.rs b/library/stdarch/crates/std_detect/tests/x86-specific.rs index 59e9a62fd..e481620c7 100644 --- a/library/stdarch/crates/std_detect/tests/x86-specific.rs +++ b/library/stdarch/crates/std_detect/tests/x86-specific.rs @@ -36,12 +36,9 @@ fn dump() { is_x86_feature_detected!("avx512vpopcntdq") ); println!("avx512vbmi2 {:?}", is_x86_feature_detected!("avx512vbmi2")); - println!("avx512gfni {:?}", is_x86_feature_detected!("avx512gfni")); - println!("avx512vaes {:?}", is_x86_feature_detected!("avx512vaes")); - println!( - "avx512vpclmulqdq {:?}", - is_x86_feature_detected!("avx512vpclmulqdq") - ); + println!("gfni {:?}", is_x86_feature_detected!("gfni")); + println!("vaes {:?}", is_x86_feature_detected!("vaes")); + println!("vpclmulqdq {:?}", is_x86_feature_detected!("vpclmulqdq")); println!("avx512vnni {:?}", is_x86_feature_detected!("avx512vnni")); println!( "avx512bitalg {:?}", diff --git a/library/stdarch/crates/stdarch-gen/Cargo.toml b/library/stdarch/crates/stdarch-gen/Cargo.toml index b339672f4..a5d19b199 100644 --- a/library/stdarch/crates/stdarch-gen/Cargo.toml +++ b/library/stdarch/crates/stdarch-gen/Cargo.toml @@ -2,7 +2,7 @@ name = "stdarch-gen" version = "0.1.0" authors = ["Heinz Gies <heinz@licenser.net>"] -edition = "2018" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/library/stdarch/crates/stdarch-gen/src/main.rs b/library/stdarch/crates/stdarch-gen/src/main.rs index d2f865753..750e88091 100644 --- a/library/stdarch/crates/stdarch-gen/src/main.rs +++ b/library/stdarch/crates/stdarch-gen/src/main.rs @@ -59,7 +59,7 @@ fn type_len(t: &str) -> usize { "4_" => 4, "8_" => 8, "16" => 16, - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } else if s.len() == 3 { s[1].parse::<usize>().unwrap() * type_sub_len(t) @@ -77,7 +77,7 @@ fn type_sub_len(t: &str) -> usize { "2_t" => 2, "3_t" => 3, "4_t" => 4, - _ => panic!("unknown type len: {}", t), + _ => panic!("unknown type len: {t}"), } } } @@ -92,7 +92,7 @@ fn type_bits(t: &str) -> usize { | "float32x4_t" | "f32" => 32, "int64x1_t" | "int64x2_t" | "uint64x1_t" | "uint64x2_t" | "poly64x1_t" | "poly64x2_t" | "i64" | "u64" | "float64x1_t" | "float64x2_t" | "f64" => 64, - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -105,7 +105,7 @@ fn type_exp_len(t: &str, base_len: usize) -> usize { 4 => 2, 8 => 3, 16 => 4, - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -118,7 +118,7 @@ fn type_bits_exp_len(t: &str) -> usize { "int32x2_t" | "int32x4_t" | "uint32x2_t" | "uint32x4_t" | "i32" | "u32" => 5, "int64x1_t" | "int64x2_t" | "uint64x1_t" | "uint64x2_t" | "poly64x1_t" | "poly64x2_t" | "i64" | "u64" => 6, - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -243,7 +243,7 @@ fn type_to_suffix(t: &str) -> &str { "p8" => "b_p8", "p16" => "h_p16", "p128" => "q_p128", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -297,7 +297,7 @@ fn type_to_n_suffix(t: &str) -> &str { "u16" => "h_n_u16", "u32" => "s_n_u32", "u64" => "d_n_u64", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -325,7 +325,7 @@ fn type_to_noq_n_suffix(t: &str) -> &str { "u16" => "h_n_u16", "u32" => "s_n_u32", "u64" => "d_n_u64", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -354,7 +354,7 @@ fn type_to_rot_suffix(c_name: &str, suf: &str) -> String { if suf.starts_with("q") { format!("{}q_{}{}", ns[0], ns[1], &suf[1..]) } else { - format!("{}{}", c_name, suf) + format!("{c_name}{suf}") } } @@ -426,7 +426,7 @@ fn type_to_noq_suffix(t: &str) -> &str { "poly16x4_t" | "poly16x8_t" => "_p16", "poly64x1_t" | "poly64x2_t" | "p64" => "_p64", "p128" => "_p128", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -521,7 +521,7 @@ fn type_to_global_type(t: &str) -> &str { "p16" => "p16", "p64" => "p64", "p128" => "p128", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -530,7 +530,7 @@ fn type_to_sub_type(t: &str) -> String { match s.len() { 2 => String::from(t), 3 => format!("{}x{}_t", s[0], s[1]), - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -547,9 +547,9 @@ fn type_to_native_type(t: &str) -> String { "uin" => format!("u{}", &s[0][4..]), "flo" => format!("f{}", &s[0][5..]), "pol" => format!("u{}", &s[0][4..]), - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), }, - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -566,7 +566,7 @@ fn native_type_to_type(t: &str) -> &str { "f16" => "float16x4_t", "f32" => "float32x2_t", "f64" => "float64x1_t", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -583,7 +583,7 @@ fn native_type_to_long_type(t: &str) -> &str { "f16" => "float16x8_t", "f32" => "float32x4_t", "f64" => "float64x2_t", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -601,7 +601,7 @@ fn type_to_half(t: &str) -> &str { "poly16x8_t" => "poly16x4_t", "float32x4_t" => "float32x2_t", "float64x2_t" => "float64x1_t", - _ => panic!("unknown half type for {}", t), + _ => panic!("unknown half type for {t}"), } } @@ -624,7 +624,7 @@ fn transpose1(x: usize) -> &'static str { 4 => "[0, 4, 2, 6]", 8 => "[0, 8, 2, 10, 4, 12, 6, 14]", 16 => "[0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30]", - _ => panic!("unknown transpose order of len {}", x), + _ => panic!("unknown transpose order of len {x}"), } } @@ -634,7 +634,7 @@ fn transpose2(x: usize) -> &'static str { 4 => "[1, 5, 3, 7]", 8 => "[1, 9, 3, 11, 5, 13, 7, 15]", 16 => "[1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31]", - _ => panic!("unknown transpose order of len {}", x), + _ => panic!("unknown transpose order of len {x}"), } } @@ -644,7 +644,7 @@ fn zip1(x: usize) -> &'static str { 4 => "[0, 4, 1, 5]", 8 => "[0, 8, 1, 9, 2, 10, 3, 11]", 16 => "[0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23]", - _ => panic!("unknown zip order of len {}", x), + _ => panic!("unknown zip order of len {x}"), } } @@ -654,7 +654,7 @@ fn zip2(x: usize) -> &'static str { 4 => "[2, 6, 3, 7]", 8 => "[4, 12, 5, 13, 6, 14, 7, 15]", 16 => "[8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31]", - _ => panic!("unknown zip order of len {}", x), + _ => panic!("unknown zip order of len {x}"), } } @@ -664,7 +664,7 @@ fn unzip1(x: usize) -> &'static str { 4 => "[0, 2, 4, 6]", 8 => "[0, 2, 4, 6, 8, 10, 12, 14]", 16 => "[0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]", - _ => panic!("unknown unzip order of len {}", x), + _ => panic!("unknown unzip order of len {x}"), } } @@ -674,13 +674,13 @@ fn unzip2(x: usize) -> &'static str { 4 => "[1, 3, 5, 7]", 8 => "[1, 3, 5, 7, 9, 11, 13, 15]", 16 => "[1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]", - _ => panic!("unknown unzip order of len {}", x), + _ => panic!("unknown unzip order of len {x}"), } } fn values(t: &str, vs: &[String]) -> String { if vs.len() == 1 && !t.contains('x') { - format!(": {} = {}", t, vs[0]) + format!(": {t} = {}", vs[0]) } else if vs.len() == 1 && type_to_global_type(t) == "f64" { format!(": {} = {}", type_to_global_type(t), vs[0]) } else { @@ -723,7 +723,7 @@ fn max_val(t: &str) -> &'static str { "i64" => "0x7F_FF_FF_FF_FF_FF_FF_FF", "f32" => "3.40282347e+38", "f64" => "1.7976931348623157e+308", - _ => panic!("No TRUE for type {}", t), + _ => panic!("No TRUE for type {t}"), } } @@ -739,7 +739,7 @@ fn min_val(t: &str) -> &'static str { "i64" => "-9223372036854775808", "f32" => "-3.40282347e+38", "f64" => "-1.7976931348623157e+308", - _ => panic!("No TRUE for type {}", t), + _ => panic!("No TRUE for type {t}"), } } @@ -749,7 +749,7 @@ fn true_val(t: &str) -> &'static str { "u16" => "0xFF_FF", "u32" => "0xFF_FF_FF_FF", "u64" => "0xFF_FF_FF_FF_FF_FF_FF_FF", - _ => panic!("No TRUE for type {}", t), + _ => panic!("No TRUE for type {t}"), } } @@ -763,7 +763,7 @@ fn ff_val(t: &str) -> &'static str { "i16" => "0xFF_FF", "i32" => "0xFF_FF_FF_FF", "i64" => "0xFF_FF_FF_FF_FF_FF_FF_FF", - _ => panic!("No TRUE for type {}", t), + _ => panic!("No TRUE for type {t}"), } } @@ -784,7 +784,7 @@ fn bits(t: &str) -> &'static str { "p8x" => "8", "p16" => "16", "p64" => "64", - _ => panic!("Unknown bits for type {}", t), + _ => panic!("Unknown bits for type {t}"), } } @@ -801,7 +801,7 @@ fn bits_minus_one(t: &str) -> &'static str { "p8x" => "7", "p16" => "15", "p64" => "63", - _ => panic!("Unknown bits for type {}", t), + _ => panic!("Unknown bits for type {t}"), } } @@ -818,7 +818,7 @@ fn half_bits(t: &str) -> &'static str { "p8x" => "4", "p16" => "8", "p64" => "32", - _ => panic!("Unknown bits for type {}", t), + _ => panic!("Unknown bits for type {t}"), } } @@ -852,7 +852,7 @@ fn type_len_str(t: &str) -> &'static str { "poly16x8_t" => "8", "poly64x1_t" => "1", "poly64x2_t" => "2", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -886,7 +886,7 @@ fn type_len_minus_one_str(t: &str) -> &'static str { "poly16x8_t" => "7", "poly64x1_t" => "0", "poly64x2_t" => "1", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -920,7 +920,7 @@ fn type_half_len_str(t: &str) -> &'static str { "poly16x8_t" => "4", "poly64x1_t" => "0", "poly64x2_t" => "1", - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), } } @@ -954,12 +954,12 @@ fn type_to_ext(t: &str, v: bool, r: bool, pi8: bool) -> String { native ), _ if pi8 => format!(".p0i8"), - _ => format!(".p0{}", native), + _ => format!(".p0{native}"), }; let sub_type = match &native[0..1] { "i" | "f" => native, "u" => native.replace("u", "i"), - _ => panic!("unknown type: {}", t), + _ => panic!("unknown type: {t}"), }; let ext = format!( "v{}{}{}", @@ -1041,8 +1041,8 @@ fn gen_aarch64( fn_type: Fntype, ) -> (String, String) { let name = match suffix { - Normal => format!("{}{}", current_name, type_to_suffix(in_t[1])), - NoQ => format!("{}{}", current_name, type_to_noq_suffix(in_t[1])), + Normal => format!("{current_name}{}", type_to_suffix(in_t[1])), + NoQ => format!("{current_name}{}", type_to_noq_suffix(in_t[1])), Double => format!( "{}{}", current_name, @@ -1053,15 +1053,15 @@ fn gen_aarch64( current_name, type_to_noq_double_suffixes(out_t, in_t[1]) ), - NSuffix => format!("{}{}", current_name, type_to_n_suffix(in_t[1])), + NSuffix => format!("{current_name}{}", type_to_n_suffix(in_t[1])), DoubleN => format!( "{}{}", current_name, type_to_double_n_suffixes(out_t, in_t[1]) ), - NoQNSuffix => format!("{}{}", current_name, type_to_noq_n_suffix(in_t[1])), - OutSuffix => format!("{}{}", current_name, type_to_suffix(out_t)), - OutNSuffix => format!("{}{}", current_name, type_to_n_suffix(out_t)), + NoQNSuffix => format!("{current_name}{}", type_to_noq_n_suffix(in_t[1])), + OutSuffix => format!("{current_name}{}", type_to_suffix(out_t)), + OutNSuffix => format!("{current_name}{}", type_to_n_suffix(out_t)), OutNox => format!( "{}{}", current_name, @@ -1092,7 +1092,7 @@ fn gen_aarch64( current_name, type_to_lane_suffixes(out_t, in_t[1], false) ), - In2 => format!("{}{}", current_name, type_to_suffix(in_t[2])), + In2 => format!("{current_name}{}", type_to_suffix(in_t[2])), In2Lane => format!( "{}{}", current_name, @@ -1122,11 +1122,11 @@ fn gen_aarch64( }; let current_fn = if let Some(current_fn) = current_fn.clone() { if link_aarch64.is_some() { - panic!("[{}] Can't specify link and fn at the same time.", name) + panic!("[{name}] Can't specify link and fn at the same time.") } current_fn } else if link_aarch64.is_some() { - format!("{}_", name) + format!("{name}_") } else { if multi_fn.is_empty() { panic!( @@ -1174,8 +1174,8 @@ fn gen_aarch64( let sub = type_to_sub_type(in_t[1]); ( match type_sub_len(in_t[1]) { - 1 => format!("a: {}, n: i64, ptr: {}", sub, ptr_type), - 2 => format!("a: {}, b: {}, n: i64, ptr: {}", sub, sub, ptr_type), + 1 => format!("a: {sub}, n: i64, ptr: {ptr_type}"), + 2 => format!("a: {sub}, b: {sub}, n: i64, ptr: {ptr_type}"), 3 => format!( "a: {}, b: {}, c: {}, n: i64, ptr: {}", sub, sub, sub, ptr_type @@ -1187,7 +1187,7 @@ fn gen_aarch64( _ => panic!("unsupported type: {}", in_t[1]), }, if out_t != "void" { - format!(" -> {}", out_t) + format!(" -> {out_t}") } else { String::new() }, @@ -1200,7 +1200,7 @@ fn gen_aarch64( 3 => format!("a: {}, b: {}, c: {}, n: i32", in_t[0], in_t[1], in_t[2]), _ => unimplemented!("unknown para_num"), }, - format!(" -> {}", out_t), + format!(" -> {out_t}"), ) } } else if matches!(fn_type, Fntype::Store) { @@ -1211,23 +1211,20 @@ fn gen_aarch64( type_to_native_type(in_t[1]) }; let subs = match type_sub_len(in_t[1]) { - 1 => format!("a: {}", sub), - 2 => format!("a: {}, b: {}", sub, sub), - 3 => format!("a: {}, b: {}, c: {}", sub, sub, sub), - 4 => format!("a: {}, b: {}, c: {}, d: {}", sub, sub, sub, sub), + 1 => format!("a: {sub}"), + 2 => format!("a: {sub}, b: {sub}"), + 3 => format!("a: {sub}, b: {sub}, c: {sub}"), + 4 => format!("a: {sub}, b: {sub}, c: {sub}, d: {sub}"), _ => panic!("unsupported type: {}", in_t[1]), }; - (format!("{}, ptr: *mut {}", subs, ptr_type), String::new()) + (format!("{subs}, ptr: *mut {ptr_type}"), String::new()) } else if is_vldx(&name) { let ptr_type = if name.contains("dup") { type_to_native_type(out_t) } else { type_to_sub_type(out_t) }; - ( - format!("ptr: *const {}", ptr_type), - format!(" -> {}", out_t), - ) + (format!("ptr: *const {ptr_type}"), format!(" -> {out_t}")) } else { ( match para_num { @@ -1256,7 +1253,7 @@ fn gen_aarch64( assert_eq!(constns.len(), 2); format!(r#"<const {}: i32, const {}: i32>"#, constns[0], constns[1]) } else { - format!(r#"<const {}: i32>"#, constn) + format!(r#"<const {constn}: i32>"#) } } else { String::new() @@ -1314,7 +1311,7 @@ fn gen_aarch64( para_num + 1 ) } else { - format!("\n#[rustc_legacy_const_generics({})]", para_num) + format!("\n#[rustc_legacy_const_generics({para_num})]") } } else { String::new() @@ -1323,7 +1320,7 @@ fn gen_aarch64( let fn_output = if out_t == "void" { String::new() } else { - format!("-> {} ", out_t) + format!("-> {out_t} ") }; let fn_inputs = match para_num { 1 => format!("(a: {})", in_t[0]), @@ -1373,14 +1370,14 @@ fn gen_aarch64( } else if link_aarch64.is_some() && matches!(fn_type, Fntype::Store) { let cast = if is_vstx(&name) { " as _" } else { "" }; match type_sub_len(in_t[1]) { - 1 => format!(r#"{}{}(b, a{})"#, ext_c, current_fn, cast), - 2 => format!(r#"{}{}(b.0, b.1, a{})"#, ext_c, current_fn, cast), - 3 => format!(r#"{}{}(b.0, b.1, b.2, a{})"#, ext_c, current_fn, cast), - 4 => format!(r#"{}{}(b.0, b.1, b.2, b.3, a{})"#, ext_c, current_fn, cast), + 1 => format!(r#"{ext_c}{current_fn}(b, a{cast})"#), + 2 => format!(r#"{ext_c}{current_fn}(b.0, b.1, a{cast})"#), + 3 => format!(r#"{ext_c}{current_fn}(b.0, b.1, b.2, a{cast})"#), + 4 => format!(r#"{ext_c}{current_fn}(b.0, b.1, b.2, b.3, a{cast})"#), _ => panic!("unsupported type: {}", in_t[1]), } } else if link_aarch64.is_some() && is_vldx(&name) { - format!(r#"{}{}(a as _)"#, ext_c, current_fn,) + format!(r#"{ext_c}{current_fn}(a as _)"#,) } else { let trans: [&str; 2] = if link_t[3] != out_t { ["transmute(", ")"] @@ -1388,7 +1385,7 @@ fn gen_aarch64( ["", ""] }; match (multi_calls.len(), para_num, fixed.len()) { - (0, 1, 0) => format!(r#"{}{}{}(a){}"#, ext_c, trans[0], current_fn, trans[1]), + (0, 1, 0) => format!(r#"{ext_c}{}{current_fn}(a){}"#, trans[0], trans[1]), (0, 1, _) => { let fixed: Vec<String> = fixed.iter().take(type_len(in_t[0])).cloned().collect(); @@ -1402,11 +1399,11 @@ fn gen_aarch64( trans[1], ) } - (0, 2, _) => format!(r#"{}{}{}(a, b){}"#, ext_c, trans[0], current_fn, trans[1],), - (0, 3, _) => format!(r#"{}{}(a, b, c)"#, ext_c, current_fn,), - (_, 1, _) => format!(r#"{}{}"#, ext_c, multi_calls,), - (_, 2, _) => format!(r#"{}{}"#, ext_c, multi_calls,), - (_, 3, _) => format!(r#"{}{}"#, ext_c, multi_calls,), + (0, 2, _) => format!(r#"{ext_c}{}{current_fn}(a, b){}"#, trans[0], trans[1],), + (0, 3, _) => format!(r#"{ext_c}{current_fn}(a, b, c)"#,), + (_, 1, _) => format!(r#"{ext_c}{multi_calls}"#,), + (_, 2, _) => format!(r#"{ext_c}{multi_calls}"#,), + (_, 3, _) => format!(r#"{ext_c}{multi_calls}"#,), (_, _, _) => String::new(), } } @@ -1768,8 +1765,8 @@ fn gen_arm( separate: bool, ) -> (String, String) { let name = match suffix { - Normal => format!("{}{}", current_name, type_to_suffix(in_t[1])), - NoQ => format!("{}{}", current_name, type_to_noq_suffix(in_t[1])), + Normal => format!("{current_name}{}", type_to_suffix(in_t[1])), + NoQ => format!("{current_name}{}", type_to_noq_suffix(in_t[1])), Double => format!( "{}{}", current_name, @@ -1780,15 +1777,15 @@ fn gen_arm( current_name, type_to_noq_double_suffixes(out_t, in_t[1]) ), - NSuffix => format!("{}{}", current_name, type_to_n_suffix(in_t[1])), + NSuffix => format!("{current_name}{}", type_to_n_suffix(in_t[1])), DoubleN => format!( "{}{}", current_name, type_to_double_n_suffixes(out_t, in_t[1]) ), - NoQNSuffix => format!("{}{}", current_name, type_to_noq_n_suffix(in_t[1])), - OutSuffix => format!("{}{}", current_name, type_to_suffix(out_t)), - OutNSuffix => format!("{}{}", current_name, type_to_n_suffix(out_t)), + NoQNSuffix => format!("{current_name}{}", type_to_noq_n_suffix(in_t[1])), + OutSuffix => format!("{current_name}{}", type_to_suffix(out_t)), + OutNSuffix => format!("{current_name}{}", type_to_n_suffix(out_t)), OutNox => format!( "{}{}", current_name, @@ -1819,7 +1816,7 @@ fn gen_arm( current_name, type_to_lane_suffixes(out_t, in_t[1], false) ), - In2 => format!("{}{}", current_name, type_to_suffix(in_t[2])), + In2 => format!("{current_name}{}", type_to_suffix(in_t[2])), In2Lane => format!( "{}{}", current_name, @@ -1873,7 +1870,7 @@ fn gen_arm( } current_fn } else if link_aarch64.is_some() || link_arm.is_some() { - format!("{}_", name) + format!("{name}_") } else { if multi_fn.is_empty() { panic!( @@ -1980,9 +1977,9 @@ fn gen_arm( }; let sub_type = type_to_sub_type(in_t[1]); let inputs = match type_sub_len(in_t[1]) { - 1 => format!("a: {}", sub_type), - 2 => format!("a: {}, b: {}", sub_type, sub_type,), - 3 => format!("a: {}, b: {}, c: {}", sub_type, sub_type, sub_type,), + 1 => format!("a: {sub_type}"), + 2 => format!("a: {sub_type}, b: {sub_type}",), + 3 => format!("a: {sub_type}, b: {sub_type}, c: {sub_type}",), 4 => format!( "a: {}, b: {}, c: {}, d: {}", sub_type, sub_type, sub_type, sub_type, @@ -1992,12 +1989,9 @@ fn gen_arm( let out = if out_t == "void" { String::new() } else { - format!(" -> {}", out_t) + format!(" -> {out_t}") }; - ( - format!("ptr: {}, {}, n: i32, size: i32", ptr_type, inputs), - out, - ) + (format!("ptr: {ptr_type}, {inputs}, n: i32, size: i32"), out) } else { let (_, const_type) = if const_arm.contains(":") { let consts: Vec<_> = @@ -2011,15 +2005,15 @@ fn gen_arm( }; ( match para_num { - 1 => format!("a: {}, n: {}", in_t[0], const_type), - 2 => format!("a: {}, b: {}, n: {}", in_t[0], in_t[1], const_type), + 1 => format!("a: {}, n: {const_type}", in_t[0]), + 2 => format!("a: {}, b: {}, n: {const_type}", in_t[0], in_t[1]), 3 => format!( - "a: {}, b: {}, c: {}, n: {}", - in_t[0], in_t[1], in_t[2], const_type + "a: {}, b: {}, c: {}, n: {const_type}", + in_t[0], in_t[1], in_t[2] ), _ => unimplemented!("unknown para_num"), }, - format!(" -> {}", out_t), + format!(" -> {out_t}"), ) } } else if out_t != link_arm_t[3] { @@ -2038,9 +2032,9 @@ fn gen_arm( } else if matches!(fn_type, Fntype::Store) { let sub_type = type_to_sub_type(in_t[1]); let inputs = match type_sub_len(in_t[1]) { - 1 => format!("a: {}", sub_type), - 2 => format!("a: {}, b: {}", sub_type, sub_type,), - 3 => format!("a: {}, b: {}, c: {}", sub_type, sub_type, sub_type,), + 1 => format!("a: {sub_type}"), + 2 => format!("a: {sub_type}, b: {sub_type}",), + 3 => format!("a: {sub_type}, b: {sub_type}, c: {sub_type}",), 4 => format!( "a: {}, b: {}, c: {}, d: {}", sub_type, sub_type, sub_type, sub_type, @@ -2053,14 +2047,11 @@ fn gen_arm( (type_to_native_type(in_t[1]), "") }; ( - format!("ptr: *mut {}, {}{}", ptr_type, inputs, size), + format!("ptr: *mut {ptr_type}, {inputs}{size}"), String::new(), ) } else if is_vldx(&name) { - ( - format!("ptr: *const i8, size: i32"), - format!(" -> {}", out_t), - ) + (format!("ptr: *const i8, size: i32"), format!(" -> {out_t}")) } else { (String::new(), String::new()) } @@ -2084,20 +2075,20 @@ fn gen_arm( }; let sub_type = type_to_sub_type(in_t[1]); let mut inputs = match type_sub_len(in_t[1]) { - 1 => format!("a: {}", sub_type,), - 2 => format!("a: {}, b: {}", sub_type, sub_type,), - 3 => format!("a: {}, b: {}, c: {}", sub_type, sub_type, sub_type,), + 1 => format!("a: {sub_type}",), + 2 => format!("a: {sub_type}, b: {sub_type}",), + 3 => format!("a: {sub_type}, b: {sub_type}, c: {sub_type}",), 4 => format!( "a: {}, b: {}, c: {}, d: {}", sub_type, sub_type, sub_type, sub_type, ), _ => panic!("unknown type: {}", in_t[1]), }; - inputs.push_str(&format!(", n: i64, ptr: {}", ptr_type)); + inputs.push_str(&format!(", n: i64, ptr: {ptr_type}")); let out = if out_t == "void" { String::new() } else { - format!(" -> {}", out_t) + format!(" -> {out_t}") }; (inputs, out) } else if const_aarch64.contains("dup-in_len-N as ttn") { @@ -2111,7 +2102,7 @@ fn gen_arm( ), _ => unimplemented!("unknown para_num"), }, - format!(" -> {}", out_t), + format!(" -> {out_t}"), ) } else { ( @@ -2121,7 +2112,7 @@ fn gen_arm( 3 => format!("a: {}, b: {}, c: {}, n: i32", in_t[0], in_t[1], in_t[2]), _ => unimplemented!("unknown para_num"), }, - format!(" -> {}", out_t), + format!(" -> {out_t}"), ) } } else if out_t != link_aarch64_t[3] { @@ -2140,9 +2131,9 @@ fn gen_arm( } else if matches!(fn_type, Fntype::Store) { let sub_type = type_to_sub_type(in_t[1]); let mut inputs = match type_sub_len(in_t[1]) { - 1 => format!("a: {}", sub_type,), - 2 => format!("a: {}, b: {}", sub_type, sub_type,), - 3 => format!("a: {}, b: {}, c: {}", sub_type, sub_type, sub_type,), + 1 => format!("a: {sub_type}",), + 2 => format!("a: {sub_type}, b: {sub_type}",), + 3 => format!("a: {sub_type}, b: {sub_type}, c: {sub_type}",), 4 => format!( "a: {}, b: {}, c: {}, d: {}", sub_type, sub_type, sub_type, sub_type, @@ -2154,7 +2145,7 @@ fn gen_arm( } else { type_to_native_type(in_t[1]) }; - inputs.push_str(&format!(", ptr: *mut {}", ptr_type)); + inputs.push_str(&format!(", ptr: *mut {ptr_type}")); (inputs, String::new()) } else if is_vldx(&name) { let ptr_type = if name.contains("dup") { @@ -2162,10 +2153,7 @@ fn gen_arm( } else { type_to_sub_type(out_t) }; - ( - format!("ptr: *const {}", ptr_type), - format!(" -> {}", out_t), - ) + (format!("ptr: *const {ptr_type}"), format!(" -> {out_t}")) } else { (String::new(), String::new()) } @@ -2181,7 +2169,7 @@ fn gen_arm( )); }; let const_declare = if let Some(constn) = constn { - format!(r#"<const {}: i32>"#, constn) + format!(r#"<const {constn}: i32>"#) } else { String::new() }; @@ -2216,7 +2204,7 @@ fn gen_arm( String::new() }; let const_legacy = if constn.is_some() { - format!("\n#[rustc_legacy_const_generics({})]", para_num) + format!("\n#[rustc_legacy_const_generics({para_num})]") } else { String::new() }; @@ -2224,7 +2212,7 @@ fn gen_arm( let fn_output = if out_t == "void" { String::new() } else { - format!("-> {} ", out_t) + format!("-> {out_t} ") }; let fn_inputs = match para_num { 1 => format!("(a: {})", in_t[0]), @@ -2274,15 +2262,15 @@ fn gen_arm( cnt }; match para_num { - 1 => format!("{}(a, {})", current_fn, cnt), - 2 => format!("{}(a, b, {})", current_fn, cnt), + 1 => format!("{current_fn}(a, {cnt})"), + 2 => format!("{current_fn}(a, b, {cnt})"), _ => String::new(), } } } else if out_t != link_arm_t[3] { match para_num { - 1 => format!("transmute({}(a))", current_fn,), - 2 => format!("transmute({}(transmute(a), transmute(b)))", current_fn,), + 1 => format!("transmute({current_fn}(a))",), + 2 => format!("transmute({current_fn}(transmute(a), transmute(b)))",), _ => String::new(), } } else if matches!(fn_type, Fntype::Store) { @@ -2295,10 +2283,10 @@ fn gen_arm( ("", String::new()) }; match type_sub_len(in_t[1]) { - 1 => format!("{}(a{}, b{})", current_fn, cast, size), - 2 => format!("{}(a{}, b.0, b.1{})", current_fn, cast, size), - 3 => format!("{}(a{}, b.0, b.1, b.2{})", current_fn, cast, size), - 4 => format!("{}(a{}, b.0, b.1, b.2, b.3{})", current_fn, cast, size), + 1 => format!("{current_fn}(a{cast}, b{size})"), + 2 => format!("{current_fn}(a{cast}, b.0, b.1{size})"), + 3 => format!("{current_fn}(a{cast}, b.0, b.1, b.2{size})"), + 4 => format!("{current_fn}(a{cast}, b.0, b.1, b.2, b.3{size})"), _ => String::new(), } } else if link_arm.is_some() && is_vldx(&name) { @@ -2345,31 +2333,31 @@ fn gen_arm( cnt.push_str(&const_aarch64); } cnt.push_str(")"); - format!("{}(a, {})", current_fn, cnt) + format!("{current_fn}(a, {cnt})") } else { match para_num { - 1 => format!("{}(a, {})", current_fn, const_aarch64), - 2 => format!("{}(a, b, {})", current_fn, const_aarch64), + 1 => format!("{current_fn}(a, {const_aarch64})"), + 2 => format!("{current_fn}(a, b, {const_aarch64})"), _ => String::new(), } } } else if out_t != link_aarch64_t[3] { match para_num { - 1 => format!("transmute({}(a))", current_fn,), - 2 => format!("transmute({}(a, b))", current_fn,), + 1 => format!("transmute({current_fn}(a))",), + 2 => format!("transmute({current_fn}(a, b))",), _ => String::new(), } } else if matches!(fn_type, Fntype::Store) { let cast = if is_vstx(&name) { " as _" } else { "" }; match type_sub_len(in_t[1]) { - 1 => format!("{}(b, a{})", current_fn, cast), - 2 => format!("{}(b.0, b.1, a{})", current_fn, cast), - 3 => format!("{}(b.0, b.1, b.2, a{})", current_fn, cast), - 4 => format!("{}(b.0, b.1, b.2, b.3, a{})", current_fn, cast), + 1 => format!("{current_fn}(b, a{cast})"), + 2 => format!("{current_fn}(b.0, b.1, a{cast})"), + 3 => format!("{current_fn}(b.0, b.1, b.2, a{cast})"), + 4 => format!("{current_fn}(b.0, b.1, b.2, b.3, a{cast})"), _ => String::new(), } } else if link_aarch64.is_some() && is_vldx(&name) { - format!("{}(a as _)", current_fn) + format!("{current_fn}(a as _)") } else { String::new() }; @@ -2421,7 +2409,7 @@ fn gen_arm( } else { let call = { let stmts = match (multi_calls.len(), para_num, fixed.len()) { - (0, 1, 0) => format!(r#"{}{}(a)"#, ext_c, current_fn,), + (0, 1, 0) => format!(r#"{ext_c}{current_fn}(a)"#,), (0, 1, _) => { let fixed: Vec<String> = fixed.iter().take(type_len(in_t[0])).cloned().collect(); @@ -2433,11 +2421,11 @@ fn gen_arm( current_fn, ) } - (0, 2, _) => format!(r#"{}{}(a, b)"#, ext_c, current_fn,), - (0, 3, _) => format!(r#"{}{}(a, b, c)"#, ext_c, current_fn,), - (_, 1, _) => format!(r#"{}{}"#, ext_c, multi_calls,), - (_, 2, _) => format!(r#"{}{}"#, ext_c, multi_calls,), - (_, 3, _) => format!(r#"{}{}"#, ext_c, multi_calls,), + (0, 2, _) => format!(r#"{ext_c}{current_fn}(a, b)"#,), + (0, 3, _) => format!(r#"{ext_c}{current_fn}(a, b, c)"#,), + (_, 1, _) => format!(r#"{ext_c}{multi_calls}"#,), + (_, 2, _) => format!(r#"{ext_c}{multi_calls}"#,), + (_, 3, _) => format!(r#"{ext_c}{multi_calls}"#,), (_, _, _) => String::new(), }; if stmts != String::new() { @@ -2452,8 +2440,8 @@ fn gen_arm( } }; let stable_aarch64 = match target { - Default | ArmV7 | Vfp4 | FPArmV8 | AES => String::from("\n#[cfg_attr(target_arch = \"aarch64\", stable(feature = \"neon_intrinsics\", since = \"1.59.0\"))]"), - RDM => String::from("\n#[cfg_attr(target_arch = \"aarch64\", stable(feature = \"rdm_intrinsics\", since = \"1.62.0\"))]"), + Default | ArmV7 | Vfp4 | FPArmV8 | AES => String::from("\n#[cfg_attr(not(target_arch = \"arm\"), stable(feature = \"neon_intrinsics\", since = \"1.59.0\"))]"), + RDM => String::from("\n#[cfg_attr(not(target_arch = \"arm\"), stable(feature = \"rdm_intrinsics\", since = \"1.62.0\"))]"), _ => String::new(), }; let function_doc = create_doc_string(current_comment, &name); @@ -2536,9 +2524,9 @@ fn expand_intrinsic(intr: &str, t: &str) -> String { "poly64x1_t" => "i64x1", "poly64x2_t" => "i64x2", */ - _ => panic!("unknown type for extension: {}", t), + _ => panic!("unknown type for extension: {t}"), }; - format!(r#""{}{}""#, intr, ext) + format!(r#""{intr}{ext}""#) } else if intr.ends_with(".s") { let ext = match t { "int8x8_t" => "s8", @@ -2571,9 +2559,9 @@ fn expand_intrinsic(intr: &str, t: &str) -> String { "poly64x1_t" => "i64x1", "poly64x2_t" => "i64x2", */ - _ => panic!("unknown type for extension: {}", t), + _ => panic!("unknown type for extension: {t}"), }; - format!(r#""{}{}""#, &intr[..intr.len() - 1], ext) + format!(r#""{}{ext}""#, &intr[..intr.len() - 1]) } else if intr.ends_with(".l") { let ext = match t { "int8x8_t" => "8", @@ -2604,9 +2592,9 @@ fn expand_intrinsic(intr: &str, t: &str) -> String { "float64x2_t" => "64", "poly64x1_t" => "64", "poly64x2_t" => "64", - _ => panic!("unknown type for extension: {}", t), + _ => panic!("unknown type for extension: {t}"), }; - format!(r#""{}{}""#, &intr[..intr.len() - 1], ext) + format!(r#""{}{ext}""#, &intr[..intr.len() - 1]) } else { intr.to_string() } @@ -2655,7 +2643,7 @@ fn get_call( "halflen" => type_len(in_t[1]) / 2, _ => 0, }; - let mut s = format!("{} [", const_declare); + let mut s = format!("{const_declare} ["); for i in 0..len { if i != 0 { s.push_str(", "); @@ -2693,9 +2681,9 @@ fn get_call( if i != 0 || j != 0 { s.push_str(", "); } - s.push_str(&format!("{} * {} as u32", base_len, &fn_format[2])); + s.push_str(&format!("{base_len} * {} as u32", &fn_format[2])); if j != 0 { - s.push_str(&format!(" + {}", j)); + s.push_str(&format!(" + {j}")); } } } @@ -2709,7 +2697,7 @@ fn get_call( "in_ttn" => type_to_native_type(in_t[1]), _ => String::new(), }; - return format!("{} as {}", &fn_format[1], t); + return format!("{} as {t}", &fn_format[1]); } if fn_name.starts_with("ins") { let fn_format: Vec<_> = fn_name.split('-').map(|v| v.to_string()).collect(); @@ -2726,7 +2714,7 @@ fn get_call( "in0_len" => type_len(in_t[0]), _ => 0, }; - let mut s = format!("{} [", const_declare); + let mut s = format!("{const_declare} ["); for i in 0..len { if i != 0 { s.push_str(", "); @@ -2760,7 +2748,7 @@ fn get_call( fn_format[2], fn_format[2] ); } else { - return format!(r#"static_assert_imm{}!({});"#, len, fn_format[2]); + return format!(r#"static_assert_imm{len}!({});"#, fn_format[2]); } } if fn_name.starts_with("static_assert") { @@ -2781,13 +2769,13 @@ fn get_call( }; if lim1 == lim2 { return format!( - r#"static_assert!({} : i32 where {} == {});"#, - fn_format[1], fn_format[1], lim1 + r#"static_assert!({} : i32 where {} == {lim1});"#, + fn_format[1], fn_format[1] ); } else { return format!( - r#"static_assert!({} : i32 where {} >= {} && {} <= {});"#, - fn_format[1], fn_format[1], lim1, fn_format[1], lim2 + r#"static_assert!({} : i32 where {} >= {lim1} && {} <= {lim2});"#, + fn_format[1], fn_format[1], fn_format[1] ); } } @@ -2945,7 +2933,7 @@ fn get_call( if fn_name == "fixed" { let (re_name, re_type) = re.unwrap(); let fixed: Vec<String> = fixed.iter().take(type_len(in_t[1])).cloned().collect(); - return format!(r#"let {}{};"#, re_name, values(&re_type, &fixed)); + return format!(r#"let {re_name}{};"#, values(&re_type, &fixed)); } if fn_name == "fixed-half-right" { let fixed: Vec<String> = fixed.iter().take(type_len(in_t[1])).cloned().collect(); @@ -3083,9 +3071,9 @@ fn get_call( re_name, re_type, fn_name, param_str ) } else if fn_name.starts_with("*") { - format!(r#"{} = {};"#, fn_name, param_str) + format!(r#"{fn_name} = {param_str};"#) } else { - format!(r#"{}({})"#, fn_name, param_str) + format!(r#"{fn_name}({param_str})"#) }; return fn_str; } @@ -3337,7 +3325,7 @@ mod test { in_t = [spec[0], spec[1], spec[2]]; out_t = spec[3]; } else { - panic!("Bad spec: {}", line) + panic!("Bad spec: {line}") } if b.len() == 0 { if matches!(fn_type, Fntype::Store) { @@ -3430,8 +3418,8 @@ mod test { .arg(&arm_out_path) .arg(&aarch64_out_path) .status() { - eprintln!("Could not format `{}`: {}", arm_out_path.to_str().unwrap(), e); - eprintln!("Could not format `{}`: {}", aarch64_out_path.to_str().unwrap(), e); + eprintln!("Could not format `{}`: {e}", arm_out_path.to_str().unwrap()); + eprintln!("Could not format `{}`: {e}", aarch64_out_path.to_str().unwrap()); }; */ Ok(()) diff --git a/library/stdarch/crates/stdarch-test/Cargo.toml b/library/stdarch/crates/stdarch-test/Cargo.toml index 012b4e959..23bddeda6 100644 --- a/library/stdarch/crates/stdarch-test/Cargo.toml +++ b/library/stdarch/crates/stdarch-test/Cargo.toml @@ -2,7 +2,7 @@ name = "stdarch-test" version = "0.1.0" authors = ["Alex Crichton <alex@alexcrichton.com>"] -edition = "2018" +edition = "2021" [dependencies] assert-instr-macro = { path = "../assert-instr-macro" } diff --git a/library/stdarch/crates/stdarch-test/src/disassembly.rs b/library/stdarch/crates/stdarch-test/src/disassembly.rs index 3ace6b20e..8e4d57d4e 100644 --- a/library/stdarch/crates/stdarch-test/src/disassembly.rs +++ b/library/stdarch/crates/stdarch-test/src/disassembly.rs @@ -81,7 +81,7 @@ pub(crate) fn disassemble_myself() -> HashSet<Function> { .args(add_args) .arg(&me) .output() - .unwrap_or_else(|_| panic!("failed to execute objdump. OBJDUMP={}", objdump)); + .unwrap_or_else(|_| panic!("failed to execute objdump. OBJDUMP={objdump}")); println!( "{}\n{}", output.status, @@ -103,7 +103,7 @@ fn parse(output: &str) -> HashSet<Function> { lines.clone().count() ); for line in output.lines().take(100) { - println!("{}", line); + println!("{line}"); } let mut functions = HashSet::new(); @@ -112,9 +112,9 @@ fn parse(output: &str) -> HashSet<Function> { if !header.ends_with(':') || !header.contains("stdarch_test_shim") { continue; } - eprintln!("header: {}", header); + eprintln!("header: {header}"); let symbol = normalize(header); - eprintln!("normalized symbol: {}", symbol); + eprintln!("normalized symbol: {symbol}"); let mut instructions = Vec::new(); while let Some(instruction) = lines.next() { if instruction.ends_with(':') { diff --git a/library/stdarch/crates/stdarch-test/src/lib.rs b/library/stdarch/crates/stdarch-test/src/lib.rs index eba17771c..e0cf46cb4 100644 --- a/library/stdarch/crates/stdarch-test/src/lib.rs +++ b/library/stdarch/crates/stdarch-test/src/lib.rs @@ -64,10 +64,10 @@ pub fn assert(shim_addr: usize, fnname: &str, expected: &str) { // Make sure that the shim is not removed black_box(shim_addr); - //eprintln!("shim name: {}", fnname); + //eprintln!("shim name: {fnname}"); let function = &DISASSEMBLY .get(&Function::new(fnname)) - .unwrap_or_else(|| panic!("function \"{}\" not found in the disassembly", fnname)); + .unwrap_or_else(|| panic!("function \"{fnname}\" not found in the disassembly")); //eprintln!(" function: {:?}", function); let mut instrs = &function.instrs[..]; @@ -165,9 +165,9 @@ pub fn assert(shim_addr: usize, fnname: &str, expected: &str) { // Help debug by printing out the found disassembly, and then panic as we // didn't find the instruction. - println!("disassembly for {}: ", fnname,); + println!("disassembly for {fnname}: ",); for (i, instr) in instrs.iter().enumerate() { - println!("\t{:2}: {}", i, instr); + println!("\t{i:2}: {instr}"); } if !found { @@ -194,7 +194,7 @@ pub fn assert_skip_test_ok(name: &str) { if env::var("STDARCH_TEST_EVERYTHING").is_err() { return; } - panic!("skipped test `{}` when it shouldn't be skipped", name); + panic!("skipped test `{name}` when it shouldn't be skipped"); } // See comment in `assert-instr-macro` crate for why this exists diff --git a/library/stdarch/crates/stdarch-verify/Cargo.toml b/library/stdarch/crates/stdarch-verify/Cargo.toml index 6362e3d57..56548fd08 100644 --- a/library/stdarch/crates/stdarch-verify/Cargo.toml +++ b/library/stdarch/crates/stdarch-verify/Cargo.toml @@ -2,7 +2,7 @@ name = "stdarch-verify" version = "0.1.0" authors = ["Alex Crichton <alex@alexcrichton.com>"] -edition = "2018" +edition = "2021" [dependencies] proc-macro2 = "1.0" diff --git a/library/stdarch/crates/stdarch-verify/src/lib.rs b/library/stdarch/crates/stdarch-verify/src/lib.rs index 22108d26a..9b66137ba 100644 --- a/library/stdarch/crates/stdarch-verify/src/lib.rs +++ b/library/stdarch/crates/stdarch-verify/src/lib.rs @@ -85,20 +85,20 @@ fn functions(input: TokenStream, dirs: &[&str]) -> TokenStream { .iter() .map(|&(ref f, path)| { let name = &f.sig.ident; - // println!("{}", name); + // println!("{name}"); let mut arguments = Vec::new(); let mut const_arguments = Vec::new(); for input in f.sig.inputs.iter() { let ty = match *input { syn::FnArg::Typed(ref c) => &c.ty, - _ => panic!("invalid argument on {}", name), + _ => panic!("invalid argument on {name}"), }; arguments.push(to_type(ty)); } for generic in f.sig.generics.params.iter() { let ty = match *generic { syn::GenericParam::Const(ref c) => &c.ty, - _ => panic!("invalid generic argument on {}", name), + _ => panic!("invalid generic argument on {name}"), }; const_arguments.push(to_type(ty)); } @@ -144,12 +144,12 @@ fn functions(input: TokenStream, dirs: &[&str]) -> TokenStream { // strip leading underscore from fn name when building a test // _mm_foo -> mm_foo such that the test name is test_mm_foo. - let test_name_string = format!("{}", name); + let test_name_string = format!("{name}"); let mut test_name_id = test_name_string.as_str(); while test_name_id.starts_with('_') { test_name_id = &test_name_id[1..]; } - let has_test = tests.contains(&format!("test_{}", test_name_id)); + let has_test = tests.contains(&format!("test_{test_name_id}")); quote! { Function { @@ -167,7 +167,7 @@ fn functions(input: TokenStream, dirs: &[&str]) -> TokenStream { .collect::<Vec<_>>(); let ret = quote! { #input: &[Function] = &[#(#functions),*]; }; - // println!("{}", ret); + // println!("{ret}"); ret.into() } @@ -336,7 +336,7 @@ fn to_type(t: &syn::Type) -> proc_macro2::TokenStream { "v4f32" => quote! { &v4f32 }, "v2f64" => quote! { &v2f64 }, - s => panic!("unsupported type: \"{}\"", s), + s => panic!("unsupported type: \"{s}\""), }, syn::Type::Ptr(syn::TypePtr { ref elem, diff --git a/library/stdarch/crates/stdarch-verify/tests/arm.rs b/library/stdarch/crates/stdarch-verify/tests/arm.rs index 6ce5ce05f..dd6720ef0 100644 --- a/library/stdarch/crates/stdarch-verify/tests/arm.rs +++ b/library/stdarch/crates/stdarch-verify/tests/arm.rs @@ -628,7 +628,7 @@ fn verify_all_signatures() { if let Err(e) = matches(rust, arm) { println!("failed to verify `{}`", rust.name); - println!(" * {}", e); + println!(" * {e}"); all_valid = false; } } @@ -801,7 +801,7 @@ fn parse_intrinsic(node: &Rc<Node>) -> Intrinsic { let instruction = match instruction { Some(s) => s.trim().to_lowercase(), - None => panic!("can't find instruction for `{}`", name), + None => panic!("can't find instruction for `{name}`"), }; Intrinsic { @@ -973,7 +973,7 @@ fn parse_ty_base(s: &str) -> &'static Type { "uint8x8x3_t" => &U8X8X3, "uint8x8x4_t" => &U8X8X4, - _ => panic!("failed to parse html type {:?}", s), + _ => panic!("failed to parse html type {s:?}"), } } diff --git a/library/stdarch/crates/stdarch-verify/tests/mips.rs b/library/stdarch/crates/stdarch-verify/tests/mips.rs index 1eb86dc29..365057b1d 100644 --- a/library/stdarch/crates/stdarch-verify/tests/mips.rs +++ b/library/stdarch/crates/stdarch-verify/tests/mips.rs @@ -125,7 +125,7 @@ impl<'a> From<&'a str> for MsaTy { "u64" => MsaTy::u64, "void" => MsaTy::Void, "void *" => MsaTy::MutVoidPtr, - v => panic!("unknown ty: \"{}\"", v), + v => panic!("unknown ty: \"{v}\""), } } } @@ -198,8 +198,8 @@ fn verify_all_signatures() { } use std::convert::TryFrom; - let intrinsic: MsaIntrinsic = TryFrom::try_from(line) - .unwrap_or_else(|_| panic!("failed to parse line: \"{}\"", line)); + let intrinsic: MsaIntrinsic = + TryFrom::try_from(line).unwrap_or_else(|_| panic!("failed to parse line: \"{line}\"")); assert!(!intrinsics.contains_key(&intrinsic.id)); intrinsics.insert(intrinsic.id.clone(), intrinsic); } @@ -253,7 +253,7 @@ fn verify_all_signatures() { if let Err(e) = matches(rust, mips) { println!("failed to verify `{}`", rust.name); - println!(" * {}", e); + println!(" * {e}"); all_valid = false; } } diff --git a/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs b/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs index 89494bfd2..cd9bd18ea 100644 --- a/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs +++ b/library/stdarch/crates/stdarch-verify/tests/x86-intel.rs @@ -367,7 +367,7 @@ fn verify_all_signatures() { } println!("failed to verify `{}`", rust.name); for error in errors { - println!(" * {}", error); + println!(" * {error}"); } all_valid = false; } @@ -403,18 +403,18 @@ fn verify_all_signatures() { if PRINT_MISSING_LISTS || PRINT_MISSING_LISTS_MARKDOWN { for (k, v) in missing { if PRINT_MISSING_LISTS_MARKDOWN { - println!("\n<details><summary>{:?}</summary><p>\n", k); + println!("\n<details><summary>{k:?}</summary><p>\n"); for intel in v { let url = format!( "https://software.intel.com/sites/landingpage\ /IntrinsicsGuide/#text={}&expand=5236", intel.name ); - println!(" * [ ] [`{}`]({})", intel.name, url); + println!(" * [ ] [`{}`]({url})", intel.name); } println!("</p></details>\n"); } else { - println!("\n{:?}\n", k); + println!("\n{k:?}\n"); for intel in v { println!("\t{}", intel.name); } @@ -471,6 +471,18 @@ fn matches(rust: &Function, intel: &Intrinsic) -> Result<(), String> { continue; } + // Some CPUs support VAES/GFNI/VPCLMULQDQ without AVX512, even though + // the Intel documentation states that those instructions require + // AVX512VL. + if *cpuid == "AVX512VL" + && intel + .cpuid + .iter() + .any(|x| matches!(&**x, "VAES" | "GFNI" | "VPCLMULQDQ")) + { + continue; + } + let cpuid = cpuid .chars() .flat_map(|c| c.to_lowercase()) diff --git a/library/stdarch/examples/Cargo.toml b/library/stdarch/examples/Cargo.toml index e2590ed9f..38f497fa6 100644 --- a/library/stdarch/examples/Cargo.toml +++ b/library/stdarch/examples/Cargo.toml @@ -7,7 +7,7 @@ authors = [ "Gonzalo Brito Gadeschi <gonzalobg88@gmail.com>", ] description = "Examples of the stdarch crate." -edition = "2018" +edition = "2021" default-run = "hex" [dependencies] diff --git a/library/stdarch/examples/connect5.rs b/library/stdarch/examples/connect5.rs index 1b3325785..09e7e48a7 100644 --- a/library/stdarch/examples/connect5.rs +++ b/library/stdarch/examples/connect5.rs @@ -1256,7 +1256,7 @@ fn main() { pos_disp(&test1); if pos_is_end(&test1) { - println!("Game over!!!!!! at Move {}", i); + println!("Game over!!!!!! at Move {i}"); count = i + 1; break; } diff --git a/library/stdarch/examples/hex.rs b/library/stdarch/examples/hex.rs index d982a71b9..a961793a0 100644 --- a/library/stdarch/examples/hex.rs +++ b/library/stdarch/examples/hex.rs @@ -40,7 +40,7 @@ fn main() { io::stdin().read_to_end(&mut input).unwrap(); let mut dst = vec![0; 2 * input.len()]; let s = hex_encode(&input, &mut dst).unwrap(); - println!("{}", s); + println!("{s}"); } fn hex_encode<'a>(src: &[u8], dst: &'a mut [u8]) -> Result<&'a str, usize> { diff --git a/library/stdarch/triagebot.toml b/library/stdarch/triagebot.toml index fa0824ac5..f946af7f6 100644 --- a/library/stdarch/triagebot.toml +++ b/library/stdarch/triagebot.toml @@ -1 +1,4 @@ [assign] + +[assign.owners] +"*" = ["@Amanieu"] diff --git a/library/test/Cargo.toml b/library/test/Cargo.toml index 61b6f33bc..18cb023d2 100644 --- a/library/test/Cargo.toml +++ b/library/test/Cargo.toml @@ -7,11 +7,9 @@ edition = "2021" crate-type = ["dylib", "rlib"] [dependencies] -cfg-if = { version = "1.0", features = ['rustc-dep-of-std'] } getopts = { version = "0.2.21", features = ['rustc-dep-of-std'] } std = { path = "../std" } core = { path = "../core" } -libc = { version = "0.2", default-features = false } panic_unwind = { path = "../panic_unwind" } panic_abort = { path = "../panic_abort" } diff --git a/library/test/src/cli.rs b/library/test/src/cli.rs index 796796e07..9d22ebbee 100644 --- a/library/test/src/cli.rs +++ b/library/test/src/cli.rs @@ -309,7 +309,8 @@ fn parse_opts_impl(matches: getopts::Matches) -> OptRes { // FIXME: Copied from librustc_ast until linkage errors are resolved. Issue #47566 fn is_nightly() -> bool { // Whether this is a feature-staged build, i.e., on the beta or stable channel - let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some(); + let disable_unstable_features = + option_env!("CFG_DISABLE_UNSTABLE_FEATURES").map(|s| s != "0").unwrap_or(false); // Whether we should enable unstable features for bootstrapping let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok(); diff --git a/library/test/src/console.rs b/library/test/src/console.rs index 24cbe035f..1ee68c854 100644 --- a/library/test/src/console.rs +++ b/library/test/src/console.rs @@ -53,6 +53,7 @@ pub struct ConsoleTestState { pub metrics: MetricMap, pub failures: Vec<(TestDesc, Vec<u8>)>, pub not_failures: Vec<(TestDesc, Vec<u8>)>, + pub ignores: Vec<(TestDesc, Vec<u8>)>, pub time_failures: Vec<(TestDesc, Vec<u8>)>, pub options: Options, } @@ -76,6 +77,7 @@ impl ConsoleTestState { metrics: MetricMap::new(), failures: Vec::new(), not_failures: Vec::new(), + ignores: Vec::new(), time_failures: Vec::new(), options: opts.options, }) @@ -194,7 +196,10 @@ fn handle_test_result(st: &mut ConsoleTestState, completed_test: CompletedTest) st.passed += 1; st.not_failures.push((test, stdout)); } - TestResult::TrIgnored => st.ignored += 1, + TestResult::TrIgnored => { + st.ignored += 1; + st.ignores.push((test, stdout)); + } TestResult::TrBench(bs) => { st.metrics.insert_metric( test.name.as_slice(), diff --git a/library/test/src/formatters/terse.rs b/library/test/src/formatters/terse.rs index 0837ab169..a431acfbc 100644 --- a/library/test/src/formatters/terse.rs +++ b/library/test/src/formatters/terse.rs @@ -254,6 +254,15 @@ impl<T: Write> OutputFormatter for TerseFormatter<T> { self.write_plain("\n\n")?; + // Custom handling of cases where there is only 1 test to execute and that test was ignored. + // We want to show more detailed information(why was the test ignored) for investigation purposes. + if self.total_test_count == 1 && state.ignores.len() == 1 { + let test_desc = &state.ignores[0].0; + if let Some(im) = test_desc.ignore_message { + self.write_plain(format!("test: {}, ignore_message: {}\n\n", test_desc.name, im))?; + } + } + Ok(success) } } diff --git a/library/test/src/lib.rs b/library/test/src/lib.rs index 69fb529d7..88d8e5fe9 100644 --- a/library/test/src/lib.rs +++ b/library/test/src/lib.rs @@ -204,7 +204,7 @@ fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn { } /// Invoked when unit tests terminate. Returns `Result::Err` if the test is -/// considered a failure. By default, invokes `report() and checks for a `0` +/// considered a failure. By default, invokes `report()` and checks for a `0` /// result. pub fn assert_test_result<T: Termination>(result: T) -> Result<(), String> { let code = result.report().to_i32(); diff --git a/library/test/src/tests.rs b/library/test/src/tests.rs index 3a0260f86..44776fb0a 100644 --- a/library/test/src/tests.rs +++ b/library/test/src/tests.rs @@ -790,6 +790,7 @@ fn should_sort_failures_before_printing_them() { failures: vec![(test_b, Vec::new()), (test_a, Vec::new())], options: Options::new(), not_failures: Vec::new(), + ignores: Vec::new(), time_failures: Vec::new(), }; diff --git a/library/unwind/src/lib.rs b/library/unwind/src/lib.rs index 3753071d5..edc10aa39 100644 --- a/library/unwind/src/lib.rs +++ b/library/unwind/src/lib.rs @@ -127,3 +127,7 @@ extern "C" {} #[cfg(target_os = "haiku")] #[link(name = "gcc_s")] extern "C" {} + +#[cfg(target_os = "nto")] +#[link(name = "gcc_s")] +extern "C" {} diff --git a/library/unwind/src/libunwind.rs b/library/unwind/src/libunwind.rs index 036a35869..eeeed3afc 100644 --- a/library/unwind/src/libunwind.rs +++ b/library/unwind/src/libunwind.rs @@ -89,7 +89,7 @@ pub type _Unwind_Exception_Cleanup_Fn = // FIXME: The `#[link]` attributes on `extern "C"` block marks those symbols declared in // the block are reexported in dylib build of std. This is needed when build rustc with -// feature `llvm-libunwind', as no other cdylib will provided those _Unwind_* symbols. +// feature `llvm-libunwind`, as no other cdylib will provided those _Unwind_* symbols. // However the `link` attribute is duplicated multiple times and does not just export symbol, // a better way to manually export symbol would be another attribute like `#[export]`. // See the logic in function rustc_codegen_ssa::src::back::exported_symbols, module |