summaryrefslogtreecommitdiffstats
path: root/vendor/portable-atomic/src/imp/interrupt
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
commitc23a457e72abe608715ac76f076f47dc42af07a5 (patch)
tree2772049aaf84b5c9d0ed12ec8d86812f7a7904b6 /vendor/portable-atomic/src/imp/interrupt
parentReleasing progress-linux version 1.73.0+dfsg1-1~progress7.99u1. (diff)
downloadrustc-c23a457e72abe608715ac76f076f47dc42af07a5.tar.xz
rustc-c23a457e72abe608715ac76f076f47dc42af07a5.zip
Merging upstream version 1.74.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/portable-atomic/src/imp/interrupt')
-rw-r--r--vendor/portable-atomic/src/imp/interrupt/README.md26
-rw-r--r--vendor/portable-atomic/src/imp/interrupt/armv4t.rs152
-rw-r--r--vendor/portable-atomic/src/imp/interrupt/armv6m.rs46
-rw-r--r--vendor/portable-atomic/src/imp/interrupt/avr.rs52
-rw-r--r--vendor/portable-atomic/src/imp/interrupt/mod.rs630
-rw-r--r--vendor/portable-atomic/src/imp/interrupt/msp430.rs61
-rw-r--r--vendor/portable-atomic/src/imp/interrupt/riscv.rs79
-rw-r--r--vendor/portable-atomic/src/imp/interrupt/xtensa.rs46
8 files changed, 1092 insertions, 0 deletions
diff --git a/vendor/portable-atomic/src/imp/interrupt/README.md b/vendor/portable-atomic/src/imp/interrupt/README.md
new file mode 100644
index 000000000..32c202a8d
--- /dev/null
+++ b/vendor/portable-atomic/src/imp/interrupt/README.md
@@ -0,0 +1,26 @@
+# Implementation of disabling interrupts
+
+This module is used to provide atomic CAS for targets where atomic CAS is not available in the standard library.
+
+- On MSP430 and AVR, they are always single-core, so this module is always used.
+- On ARMv6-M (thumbv6m), pre-v6 ARM (e.g., thumbv4t, thumbv5te), RISC-V without A-extension, and Xtensa, they could be multi-core, so this module is used when the `unsafe-assume-single-core` feature is enabled.
+
+The implementation uses privileged instructions to disable interrupts, so it usually doesn't work on unprivileged mode.
+Enabling this feature in an environment where privileged instructions are not available, or if the instructions used are not sufficient to disable interrupts in the system, it is also usually considered **unsound**, although the details are system-dependent.
+
+Consider using the [`critical-section` feature](../../../README.md#optional-features-critical-section) for systems that cannot use the `unsafe-assume-single-core` feature.
+
+For some targets, the implementation can be changed by explicitly enabling features.
+
+- On ARMv6-M, this disables interrupts by modifying the PRIMASK register.
+- On pre-v6 ARM, this disables interrupts by modifying the I (IRQ mask) bit of the CPSR.
+- On pre-v6 ARM with the `disable-fiq` feature, this disables interrupts by modifying the I (IRQ mask) bit and F (FIQ mask) bit of the CPSR.
+- On RISC-V (without A-extension), this disables interrupts by modifying the MIE (Machine Interrupt Enable) bit of the `mstatus` register.
+- On RISC-V (without A-extension) with the `s-mode` feature, this disables interrupts by modifying the SIE (Supervisor Interrupt Enable) bit of the `sstatus` register.
+- On MSP430, this disables interrupts by modifying the GIE (Global Interrupt Enable) bit of the status register (SR).
+- On AVR, this disables interrupts by modifying the I (Global Interrupt Enable) bit of the status register (SREG).
+- On Xtensa, this disables interrupts by modifying the PS special register.
+
+Some operations don't require disabling interrupts (loads and stores on targets except for AVR, but additionally on MSP430 `add`, `sub`, `and`, `or`, `xor`, `not`). However, when the `critical-section` feature is enabled, critical sections are taken for all atomic operations.
+
+Feel free to submit an issue if your target is not supported yet.
diff --git a/vendor/portable-atomic/src/imp/interrupt/armv4t.rs b/vendor/portable-atomic/src/imp/interrupt/armv4t.rs
new file mode 100644
index 000000000..85c7ec1b5
--- /dev/null
+++ b/vendor/portable-atomic/src/imp/interrupt/armv4t.rs
@@ -0,0 +1,152 @@
+// Refs: https://developer.arm.com/documentation/ddi0406/cb/System-Level-Architecture/The-System-Level-Programmers--Model/ARM-processor-modes-and-ARM-core-registers/Program-Status-Registers--PSRs-?lang=en
+//
+// Generated asm:
+// - armv5te https://godbolt.org/z/5arYrfzYc
+
+#[cfg(not(portable_atomic_no_asm))]
+use core::arch::asm;
+
+#[cfg(not(portable_atomic_disable_fiq))]
+macro_rules! if_disable_fiq {
+ ($tt:tt) => {
+ ""
+ };
+}
+#[cfg(portable_atomic_disable_fiq)]
+macro_rules! if_disable_fiq {
+ ($tt:tt) => {
+ $tt
+ };
+}
+
+pub(super) type State = u32;
+
+/// Disables interrupts and returns the previous interrupt state.
+#[inline]
+#[instruction_set(arm::a32)]
+pub(super) fn disable() -> State {
+ let cpsr: State;
+ // SAFETY: reading CPSR and disabling interrupts are safe.
+ // (see module-level comments of interrupt/mod.rs on the safety of using privileged instructions)
+ unsafe {
+ // Do not use `nomem` and `readonly` because prevent subsequent memory accesses from being reordered before interrupts are disabled.
+ asm!(
+ "mrs {prev}, cpsr",
+ "orr {new}, {prev}, 0x80", // I (IRQ mask) bit (1 << 7)
+ // We disable only IRQs by default. See also https://github.com/taiki-e/portable-atomic/pull/28#issuecomment-1214146912.
+ if_disable_fiq!("orr {new}, {new}, 0x40"), // F (FIQ mask) bit (1 << 6)
+ "msr cpsr_c, {new}",
+ prev = out(reg) cpsr,
+ new = out(reg) _,
+ options(nostack, preserves_flags),
+ );
+ }
+ cpsr
+}
+
+/// Restores the previous interrupt state.
+///
+/// # Safety
+///
+/// The state must be the one retrieved by the previous `disable`.
+#[inline]
+#[instruction_set(arm::a32)]
+pub(super) unsafe fn restore(cpsr: State) {
+ // SAFETY: the caller must guarantee that the state was retrieved by the previous `disable`,
+ unsafe {
+ // This clobbers the entire CPSR. See msp430.rs to safety on this.
+ //
+ // Do not use `nomem` and `readonly` because prevent preceding memory accesses from being reordered after interrupts are enabled.
+ asm!("msr cpsr_c, {0}", in(reg) cpsr, options(nostack));
+ }
+}
+
+// On pre-v6 ARM, we cannot use core::sync::atomic here because they call the
+// `__sync_*` builtins for non-relaxed load/store (because pre-v6 ARM doesn't
+// have Data Memory Barrier).
+//
+// Generated asm:
+// - armv5te https://godbolt.org/z/a7zcs9hKa
+pub(crate) mod atomic {
+ #[cfg(not(portable_atomic_no_asm))]
+ use core::arch::asm;
+ use core::{cell::UnsafeCell, sync::atomic::Ordering};
+
+ macro_rules! atomic {
+ ($([$($generics:tt)*])? $atomic_type:ident, $value_type:ty, $asm_suffix:tt) => {
+ #[repr(transparent)]
+ pub(crate) struct $atomic_type $(<$($generics)*>)? {
+ v: UnsafeCell<$value_type>,
+ }
+
+ // Send is implicitly implemented for atomic integers, but not for atomic pointers.
+ // SAFETY: any data races are prevented by atomic operations.
+ unsafe impl $(<$($generics)*>)? Send for $atomic_type $(<$($generics)*>)? {}
+ // SAFETY: any data races are prevented by atomic operations.
+ unsafe impl $(<$($generics)*>)? Sync for $atomic_type $(<$($generics)*>)? {}
+
+ impl $(<$($generics)*>)? $atomic_type $(<$($generics)*>)? {
+ #[inline]
+ pub(crate) fn load(&self, order: Ordering) -> $value_type {
+ let src = self.v.get();
+ // SAFETY: any data races are prevented by atomic intrinsics and the raw
+ // pointer passed in is valid because we got it from a reference.
+ unsafe {
+ let out;
+ match order {
+ Ordering::Relaxed => {
+ asm!(
+ concat!("ldr", $asm_suffix, " {out}, [{src}]"),
+ src = in(reg) src,
+ out = lateout(reg) out,
+ options(nostack, preserves_flags, readonly),
+ );
+ }
+ Ordering::Acquire | Ordering::SeqCst => {
+ // inline asm without nomem/readonly implies compiler fence.
+ // And compiler fence is fine because the user explicitly declares that
+ // the system is single-core by using an unsafe cfg.
+ asm!(
+ concat!("ldr", $asm_suffix, " {out}, [{src}]"),
+ src = in(reg) src,
+ out = lateout(reg) out,
+ options(nostack, preserves_flags),
+ );
+ }
+ _ => unreachable!("{:?}", order),
+ }
+ out
+ }
+ }
+
+ #[inline]
+ pub(crate) fn store(&self, val: $value_type, _order: Ordering) {
+ let dst = self.v.get();
+ // SAFETY: any data races are prevented by atomic intrinsics and the raw
+ // pointer passed in is valid because we got it from a reference.
+ unsafe {
+ // inline asm without nomem/readonly implies compiler fence.
+ // And compiler fence is fine because the user explicitly declares that
+ // the system is single-core by using an unsafe cfg.
+ asm!(
+ concat!("str", $asm_suffix, " {val}, [{dst}]"),
+ dst = in(reg) dst,
+ val = in(reg) val,
+ options(nostack, preserves_flags),
+ );
+ }
+ }
+ }
+ };
+ }
+
+ atomic!(AtomicI8, i8, "b");
+ atomic!(AtomicU8, u8, "b");
+ atomic!(AtomicI16, i16, "h");
+ atomic!(AtomicU16, u16, "h");
+ atomic!(AtomicI32, i32, "");
+ atomic!(AtomicU32, u32, "");
+ atomic!(AtomicIsize, isize, "");
+ atomic!(AtomicUsize, usize, "");
+ atomic!([T] AtomicPtr, *mut T, "");
+}
diff --git a/vendor/portable-atomic/src/imp/interrupt/armv6m.rs b/vendor/portable-atomic/src/imp/interrupt/armv6m.rs
new file mode 100644
index 000000000..00413128c
--- /dev/null
+++ b/vendor/portable-atomic/src/imp/interrupt/armv6m.rs
@@ -0,0 +1,46 @@
+// Adapted from https://github.com/rust-embedded/cortex-m.
+//
+// Generated asm:
+// - armv6-m https://godbolt.org/z/sTezYnaj9
+
+#[cfg(not(portable_atomic_no_asm))]
+use core::arch::asm;
+
+pub(super) use core::sync::atomic;
+
+pub(super) type State = u32;
+
+/// Disables interrupts and returns the previous interrupt state.
+#[inline]
+pub(super) fn disable() -> State {
+ let r: State;
+ // SAFETY: reading the priority mask register and disabling interrupts are safe.
+ // (see module-level comments of interrupt/mod.rs on the safety of using privileged instructions)
+ unsafe {
+ // Do not use `nomem` and `readonly` because prevent subsequent memory accesses from being reordered before interrupts are disabled.
+ asm!(
+ "mrs {0}, PRIMASK",
+ "cpsid i",
+ out(reg) r,
+ options(nostack, preserves_flags),
+ );
+ }
+ r
+}
+
+/// Restores the previous interrupt state.
+///
+/// # Safety
+///
+/// The state must be the one retrieved by the previous `disable`.
+#[inline]
+pub(super) unsafe fn restore(r: State) {
+ if r & 0x1 == 0 {
+ // SAFETY: the caller must guarantee that the state was retrieved by the previous `disable`,
+ // and we've checked that interrupts were enabled before disabling interrupts.
+ unsafe {
+ // Do not use `nomem` and `readonly` because prevent preceding memory accesses from being reordered after interrupts are enabled.
+ asm!("cpsie i", options(nostack, preserves_flags));
+ }
+ }
+}
diff --git a/vendor/portable-atomic/src/imp/interrupt/avr.rs b/vendor/portable-atomic/src/imp/interrupt/avr.rs
new file mode 100644
index 000000000..7cc48c62e
--- /dev/null
+++ b/vendor/portable-atomic/src/imp/interrupt/avr.rs
@@ -0,0 +1,52 @@
+// Adapted from https://github.com/Rahix/avr-device.
+
+#[cfg(not(portable_atomic_no_asm))]
+use core::arch::asm;
+
+pub(super) type State = u8;
+
+/// Disables interrupts and returns the previous interrupt state.
+#[inline]
+pub(super) fn disable() -> State {
+ let sreg: State;
+ // SAFETY: reading the status register (SREG) and disabling interrupts are safe.
+ // (see module-level comments of interrupt/mod.rs on the safety of using privileged instructions)
+ unsafe {
+ // Do not use `nomem` and `readonly` because prevent subsequent memory accesses from being reordered before interrupts are disabled.
+ // Do not use `preserves_flags` because CLI modifies the I bit of the status register (SREG).
+ // Refs: https://ww1.microchip.com/downloads/en/DeviceDoc/AVR-InstructionSet-Manual-DS40002198.pdf#page=58
+ #[cfg(not(portable_atomic_no_asm))]
+ asm!(
+ "in {0}, 0x3F",
+ "cli",
+ out(reg) sreg,
+ options(nostack),
+ );
+ #[cfg(portable_atomic_no_asm)]
+ {
+ llvm_asm!("in $0, 0x3F" : "=r"(sreg) ::: "volatile");
+ llvm_asm!("cli" ::: "memory" : "volatile");
+ }
+ }
+ sreg
+}
+
+/// Restores the previous interrupt state.
+///
+/// # Safety
+///
+/// The state must be the one retrieved by the previous `disable`.
+#[inline]
+pub(super) unsafe fn restore(sreg: State) {
+ // SAFETY: the caller must guarantee that the state was retrieved by the previous `disable`,
+ unsafe {
+ // This clobbers the entire status register. See msp430.rs to safety on this.
+ //
+ // Do not use `nomem` and `readonly` because prevent preceding memory accesses from being reordered after interrupts are enabled.
+ // Do not use `preserves_flags` because OUT modifies the status register (SREG).
+ #[cfg(not(portable_atomic_no_asm))]
+ asm!("out 0x3F, {0}", in(reg) sreg, options(nostack));
+ #[cfg(portable_atomic_no_asm)]
+ llvm_asm!("out 0x3F, $0" :: "r"(sreg) : "memory" : "volatile");
+ }
+}
diff --git a/vendor/portable-atomic/src/imp/interrupt/mod.rs b/vendor/portable-atomic/src/imp/interrupt/mod.rs
new file mode 100644
index 000000000..a0ead68a6
--- /dev/null
+++ b/vendor/portable-atomic/src/imp/interrupt/mod.rs
@@ -0,0 +1,630 @@
+// Critical section based fallback implementations
+//
+// This module supports two different critical section implementations:
+// - Built-in "disable all interrupts".
+// - Call into the `critical-section` crate (which allows the user to plug any implementation).
+//
+// The `critical-section`-based fallback is enabled when the user asks for it with the `critical-section`
+// Cargo feature.
+//
+// The "disable interrupts" fallback is not sound on multi-core systems.
+// Also, this uses privileged instructions to disable interrupts, so it usually
+// doesn't work on unprivileged mode. Using this fallback in an environment where privileged
+// instructions are not available is also usually considered **unsound**,
+// although the details are system-dependent.
+//
+// Therefore, this implementation will only be enabled in one of the following cases:
+//
+// - When the user explicitly declares that the system is single-core and that
+// privileged instructions are available using an unsafe cfg.
+// - When we can safely assume that the system is single-core and that
+// privileged instructions are available on the system.
+//
+// AVR, which is single core[^avr1] and LLVM also generates code that disables
+// interrupts [^avr2] in atomic ops by default, is considered the latter.
+// MSP430 as well.
+//
+// See also README.md of this directory.
+//
+// [^avr1]: https://github.com/llvm/llvm-project/blob/llvmorg-16.0.0/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp#LL963
+// [^avr2]: https://github.com/llvm/llvm-project/blob/llvmorg-16.0.0/llvm/test/CodeGen/AVR/atomics/load16.ll#L5
+
+// On some platforms, atomic load/store can be implemented in a more efficient
+// way than disabling interrupts. On MSP430, some RMWs that do not return the
+// previous value can also be optimized.
+//
+// Note: On single-core systems, it is okay to use critical session-based
+// CAS together with atomic load/store. The load/store will not be
+// called while interrupts are disabled, and since the load/store is
+// atomic, it is not affected by interrupts even if interrupts are enabled.
+#[cfg(not(any(target_arch = "avr", feature = "critical-section")))]
+use arch::atomic;
+
+#[cfg(not(feature = "critical-section"))]
+#[cfg_attr(
+ all(
+ target_arch = "arm",
+ any(target_feature = "mclass", portable_atomic_target_feature = "mclass"),
+ ),
+ path = "armv6m.rs"
+)]
+#[cfg_attr(
+ all(
+ target_arch = "arm",
+ not(any(target_feature = "mclass", portable_atomic_target_feature = "mclass")),
+ ),
+ path = "armv4t.rs"
+)]
+#[cfg_attr(target_arch = "avr", path = "avr.rs")]
+#[cfg_attr(target_arch = "msp430", path = "msp430.rs")]
+#[cfg_attr(any(target_arch = "riscv32", target_arch = "riscv64"), path = "riscv.rs")]
+#[cfg_attr(target_arch = "xtensa", path = "xtensa.rs")]
+mod arch;
+
+use core::{cell::UnsafeCell, sync::atomic::Ordering};
+
+// Critical section implementations might use locks internally.
+#[cfg(feature = "critical-section")]
+const IS_ALWAYS_LOCK_FREE: bool = false;
+
+// Consider atomic operations based on disabling interrupts on single-core
+// systems are lock-free. (We consider the pre-v6 ARM Linux's atomic operations
+// provided in a similar way by the Linux kernel to be lock-free.)
+#[cfg(not(feature = "critical-section"))]
+const IS_ALWAYS_LOCK_FREE: bool = true;
+
+#[cfg(feature = "critical-section")]
+#[inline]
+fn with<F, R>(f: F) -> R
+where
+ F: FnOnce() -> R,
+{
+ critical_section::with(|_| f())
+}
+
+#[cfg(not(feature = "critical-section"))]
+#[inline]
+fn with<F, R>(f: F) -> R
+where
+ F: FnOnce() -> R,
+{
+ // Get current interrupt state and disable interrupts
+ let state = arch::disable();
+
+ let r = f();
+
+ // Restore interrupt state
+ // SAFETY: the state was retrieved by the previous `disable`.
+ unsafe { arch::restore(state) }
+
+ r
+}
+
+#[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
+#[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
+#[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
+#[cfg_attr(target_pointer_width = "128", repr(C, align(16)))]
+pub(crate) struct AtomicPtr<T> {
+ p: UnsafeCell<*mut T>,
+}
+
+// SAFETY: any data races are prevented by disabling interrupts or
+// atomic intrinsics (see module-level comments).
+unsafe impl<T> Send for AtomicPtr<T> {}
+// SAFETY: any data races are prevented by disabling interrupts or
+// atomic intrinsics (see module-level comments).
+unsafe impl<T> Sync for AtomicPtr<T> {}
+
+impl<T> AtomicPtr<T> {
+ #[inline]
+ pub(crate) const fn new(p: *mut T) -> Self {
+ Self { p: UnsafeCell::new(p) }
+ }
+
+ #[inline]
+ pub(crate) fn is_lock_free() -> bool {
+ Self::is_always_lock_free()
+ }
+ #[inline]
+ pub(crate) const fn is_always_lock_free() -> bool {
+ IS_ALWAYS_LOCK_FREE
+ }
+
+ #[inline]
+ pub(crate) fn get_mut(&mut self) -> &mut *mut T {
+ // SAFETY: the mutable reference guarantees unique ownership.
+ // (UnsafeCell::get_mut requires Rust 1.50)
+ unsafe { &mut *self.p.get() }
+ }
+
+ #[inline]
+ pub(crate) fn into_inner(self) -> *mut T {
+ self.p.into_inner()
+ }
+
+ #[inline]
+ #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
+ pub(crate) fn load(&self, order: Ordering) -> *mut T {
+ crate::utils::assert_load_ordering(order);
+ #[cfg(not(any(target_arch = "avr", feature = "critical-section")))]
+ {
+ self.as_native().load(order)
+ }
+ #[cfg(any(target_arch = "avr", feature = "critical-section"))]
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe { self.p.get().read() })
+ }
+
+ #[inline]
+ #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
+ pub(crate) fn store(&self, ptr: *mut T, order: Ordering) {
+ crate::utils::assert_store_ordering(order);
+ #[cfg(not(any(target_arch = "avr", feature = "critical-section")))]
+ {
+ self.as_native().store(ptr, order);
+ }
+ #[cfg(any(target_arch = "avr", feature = "critical-section"))]
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe { self.p.get().write(ptr) });
+ }
+
+ #[inline]
+ pub(crate) fn swap(&self, ptr: *mut T, _order: Ordering) -> *mut T {
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe { self.p.get().replace(ptr) })
+ }
+
+ #[inline]
+ #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
+ pub(crate) fn compare_exchange(
+ &self,
+ current: *mut T,
+ new: *mut T,
+ success: Ordering,
+ failure: Ordering,
+ ) -> Result<*mut T, *mut T> {
+ crate::utils::assert_compare_exchange_ordering(success, failure);
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe {
+ let result = self.p.get().read();
+ if result == current {
+ self.p.get().write(new);
+ Ok(result)
+ } else {
+ Err(result)
+ }
+ })
+ }
+
+ #[inline]
+ #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
+ pub(crate) fn compare_exchange_weak(
+ &self,
+ current: *mut T,
+ new: *mut T,
+ success: Ordering,
+ failure: Ordering,
+ ) -> Result<*mut T, *mut T> {
+ self.compare_exchange(current, new, success, failure)
+ }
+
+ #[inline]
+ pub(crate) const fn as_ptr(&self) -> *mut *mut T {
+ self.p.get()
+ }
+
+ #[cfg(not(any(target_arch = "avr", feature = "critical-section")))]
+ #[inline]
+ fn as_native(&self) -> &atomic::AtomicPtr<T> {
+ // SAFETY: AtomicPtr and atomic::AtomicPtr have the same layout and
+ // guarantee atomicity in a compatible way. (see module-level comments)
+ unsafe { &*(self as *const Self as *const atomic::AtomicPtr<T>) }
+ }
+}
+
+macro_rules! atomic_int {
+ (base, $atomic_type:ident, $int_type:ident, $align:literal) => {
+ #[repr(C, align($align))]
+ pub(crate) struct $atomic_type {
+ v: UnsafeCell<$int_type>,
+ }
+
+ // Send is implicitly implemented.
+ // SAFETY: any data races are prevented by disabling interrupts or
+ // atomic intrinsics (see module-level comments).
+ unsafe impl Sync for $atomic_type {}
+
+ impl $atomic_type {
+ #[inline]
+ pub(crate) const fn new(v: $int_type) -> Self {
+ Self { v: UnsafeCell::new(v) }
+ }
+
+ #[inline]
+ pub(crate) fn is_lock_free() -> bool {
+ Self::is_always_lock_free()
+ }
+ #[inline]
+ pub(crate) const fn is_always_lock_free() -> bool {
+ IS_ALWAYS_LOCK_FREE
+ }
+
+ #[inline]
+ pub(crate) fn get_mut(&mut self) -> &mut $int_type {
+ // SAFETY: the mutable reference guarantees unique ownership.
+ // (UnsafeCell::get_mut requires Rust 1.50)
+ unsafe { &mut *self.v.get() }
+ }
+
+ #[inline]
+ pub(crate) fn into_inner(self) -> $int_type {
+ self.v.into_inner()
+ }
+
+ #[inline]
+ pub(crate) const fn as_ptr(&self) -> *mut $int_type {
+ self.v.get()
+ }
+ }
+ };
+ (load_store_atomic, $atomic_type:ident, $int_type:ident, $align:literal) => {
+ atomic_int!(base, $atomic_type, $int_type, $align);
+ atomic_int!(cas, $atomic_type, $int_type);
+ impl $atomic_type {
+ #[inline]
+ #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
+ pub(crate) fn load(&self, order: Ordering) -> $int_type {
+ crate::utils::assert_load_ordering(order);
+ #[cfg(not(any(target_arch = "avr", feature = "critical-section")))]
+ {
+ self.as_native().load(order)
+ }
+ #[cfg(any(target_arch = "avr", feature = "critical-section"))]
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe { self.v.get().read() })
+ }
+
+ #[inline]
+ #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
+ pub(crate) fn store(&self, val: $int_type, order: Ordering) {
+ crate::utils::assert_store_ordering(order);
+ #[cfg(not(any(target_arch = "avr", feature = "critical-section")))]
+ {
+ self.as_native().store(val, order);
+ }
+ #[cfg(any(target_arch = "avr", feature = "critical-section"))]
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe { self.v.get().write(val) });
+ }
+
+ #[cfg(not(any(target_arch = "avr", feature = "critical-section")))]
+ #[inline]
+ fn as_native(&self) -> &atomic::$atomic_type {
+ // SAFETY: $atomic_type and atomic::$atomic_type have the same layout and
+ // guarantee atomicity in a compatible way. (see module-level comments)
+ unsafe { &*(self as *const Self as *const atomic::$atomic_type) }
+ }
+ }
+
+ #[cfg(not(all(target_arch = "msp430", not(feature = "critical-section"))))]
+ impl_default_no_fetch_ops!($atomic_type, $int_type);
+ impl_default_bit_opts!($atomic_type, $int_type);
+ #[cfg(not(all(target_arch = "msp430", not(feature = "critical-section"))))]
+ impl $atomic_type {
+ #[inline]
+ pub(crate) fn not(&self, order: Ordering) {
+ self.fetch_not(order);
+ }
+ }
+ #[cfg(all(target_arch = "msp430", not(feature = "critical-section")))]
+ impl $atomic_type {
+ #[inline]
+ pub(crate) fn add(&self, val: $int_type, order: Ordering) {
+ self.as_native().add(val, order);
+ }
+ #[inline]
+ pub(crate) fn sub(&self, val: $int_type, order: Ordering) {
+ self.as_native().sub(val, order);
+ }
+ #[inline]
+ pub(crate) fn and(&self, val: $int_type, order: Ordering) {
+ self.as_native().and(val, order);
+ }
+ #[inline]
+ pub(crate) fn or(&self, val: $int_type, order: Ordering) {
+ self.as_native().or(val, order);
+ }
+ #[inline]
+ pub(crate) fn xor(&self, val: $int_type, order: Ordering) {
+ self.as_native().xor(val, order);
+ }
+ #[inline]
+ pub(crate) fn not(&self, order: Ordering) {
+ self.as_native().not(order);
+ }
+ }
+ };
+ (load_store_critical_session, $atomic_type:ident, $int_type:ident, $align:literal) => {
+ atomic_int!(base, $atomic_type, $int_type, $align);
+ atomic_int!(cas, $atomic_type, $int_type);
+ impl_default_no_fetch_ops!($atomic_type, $int_type);
+ impl_default_bit_opts!($atomic_type, $int_type);
+ impl $atomic_type {
+ #[inline]
+ #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
+ pub(crate) fn load(&self, order: Ordering) -> $int_type {
+ crate::utils::assert_load_ordering(order);
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe { self.v.get().read() })
+ }
+
+ #[inline]
+ #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
+ pub(crate) fn store(&self, val: $int_type, order: Ordering) {
+ crate::utils::assert_store_ordering(order);
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe { self.v.get().write(val) });
+ }
+
+ #[inline]
+ pub(crate) fn not(&self, order: Ordering) {
+ self.fetch_not(order);
+ }
+ }
+ };
+ (cas, $atomic_type:ident, $int_type:ident) => {
+ impl $atomic_type {
+ #[inline]
+ pub(crate) fn swap(&self, val: $int_type, _order: Ordering) -> $int_type {
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe { self.v.get().replace(val) })
+ }
+
+ #[inline]
+ #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
+ pub(crate) fn compare_exchange(
+ &self,
+ current: $int_type,
+ new: $int_type,
+ success: Ordering,
+ failure: Ordering,
+ ) -> Result<$int_type, $int_type> {
+ crate::utils::assert_compare_exchange_ordering(success, failure);
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe {
+ let result = self.v.get().read();
+ if result == current {
+ self.v.get().write(new);
+ Ok(result)
+ } else {
+ Err(result)
+ }
+ })
+ }
+
+ #[inline]
+ #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
+ pub(crate) fn compare_exchange_weak(
+ &self,
+ current: $int_type,
+ new: $int_type,
+ success: Ordering,
+ failure: Ordering,
+ ) -> Result<$int_type, $int_type> {
+ self.compare_exchange(current, new, success, failure)
+ }
+
+ #[inline]
+ pub(crate) fn fetch_add(&self, val: $int_type, _order: Ordering) -> $int_type {
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe {
+ let result = self.v.get().read();
+ self.v.get().write(result.wrapping_add(val));
+ result
+ })
+ }
+
+ #[inline]
+ pub(crate) fn fetch_sub(&self, val: $int_type, _order: Ordering) -> $int_type {
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe {
+ let result = self.v.get().read();
+ self.v.get().write(result.wrapping_sub(val));
+ result
+ })
+ }
+
+ #[inline]
+ pub(crate) fn fetch_and(&self, val: $int_type, _order: Ordering) -> $int_type {
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe {
+ let result = self.v.get().read();
+ self.v.get().write(result & val);
+ result
+ })
+ }
+
+ #[inline]
+ pub(crate) fn fetch_nand(&self, val: $int_type, _order: Ordering) -> $int_type {
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe {
+ let result = self.v.get().read();
+ self.v.get().write(!(result & val));
+ result
+ })
+ }
+
+ #[inline]
+ pub(crate) fn fetch_or(&self, val: $int_type, _order: Ordering) -> $int_type {
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe {
+ let result = self.v.get().read();
+ self.v.get().write(result | val);
+ result
+ })
+ }
+
+ #[inline]
+ pub(crate) fn fetch_xor(&self, val: $int_type, _order: Ordering) -> $int_type {
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe {
+ let result = self.v.get().read();
+ self.v.get().write(result ^ val);
+ result
+ })
+ }
+
+ #[inline]
+ pub(crate) fn fetch_max(&self, val: $int_type, _order: Ordering) -> $int_type {
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe {
+ let result = self.v.get().read();
+ self.v.get().write(core::cmp::max(result, val));
+ result
+ })
+ }
+
+ #[inline]
+ pub(crate) fn fetch_min(&self, val: $int_type, _order: Ordering) -> $int_type {
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe {
+ let result = self.v.get().read();
+ self.v.get().write(core::cmp::min(result, val));
+ result
+ })
+ }
+
+ #[inline]
+ pub(crate) fn fetch_not(&self, _order: Ordering) -> $int_type {
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe {
+ let result = self.v.get().read();
+ self.v.get().write(!result);
+ result
+ })
+ }
+
+ #[inline]
+ pub(crate) fn fetch_neg(&self, _order: Ordering) -> $int_type {
+ // SAFETY: any data races are prevented by disabling interrupts (see
+ // module-level comments) and the raw pointer is valid because we got it
+ // from a reference.
+ with(|| unsafe {
+ let result = self.v.get().read();
+ self.v.get().write(result.wrapping_neg());
+ result
+ })
+ }
+ #[inline]
+ pub(crate) fn neg(&self, order: Ordering) {
+ self.fetch_neg(order);
+ }
+ }
+ };
+}
+
+#[cfg(target_pointer_width = "16")]
+atomic_int!(load_store_atomic, AtomicIsize, isize, 2);
+#[cfg(target_pointer_width = "16")]
+atomic_int!(load_store_atomic, AtomicUsize, usize, 2);
+#[cfg(target_pointer_width = "32")]
+atomic_int!(load_store_atomic, AtomicIsize, isize, 4);
+#[cfg(target_pointer_width = "32")]
+atomic_int!(load_store_atomic, AtomicUsize, usize, 4);
+#[cfg(target_pointer_width = "64")]
+atomic_int!(load_store_atomic, AtomicIsize, isize, 8);
+#[cfg(target_pointer_width = "64")]
+atomic_int!(load_store_atomic, AtomicUsize, usize, 8);
+#[cfg(target_pointer_width = "128")]
+atomic_int!(load_store_atomic, AtomicIsize, isize, 16);
+#[cfg(target_pointer_width = "128")]
+atomic_int!(load_store_atomic, AtomicUsize, usize, 16);
+
+atomic_int!(load_store_atomic, AtomicI8, i8, 1);
+atomic_int!(load_store_atomic, AtomicU8, u8, 1);
+atomic_int!(load_store_atomic, AtomicI16, i16, 2);
+atomic_int!(load_store_atomic, AtomicU16, u16, 2);
+
+#[cfg(not(target_pointer_width = "16"))]
+atomic_int!(load_store_atomic, AtomicI32, i32, 4);
+#[cfg(not(target_pointer_width = "16"))]
+atomic_int!(load_store_atomic, AtomicU32, u32, 4);
+#[cfg(target_pointer_width = "16")]
+#[cfg(any(test, feature = "fallback"))]
+atomic_int!(load_store_critical_session, AtomicI32, i32, 4);
+#[cfg(target_pointer_width = "16")]
+#[cfg(any(test, feature = "fallback"))]
+atomic_int!(load_store_critical_session, AtomicU32, u32, 4);
+
+#[cfg(not(any(target_pointer_width = "16", target_pointer_width = "32")))]
+atomic_int!(load_store_atomic, AtomicI64, i64, 8);
+#[cfg(not(any(target_pointer_width = "16", target_pointer_width = "32")))]
+atomic_int!(load_store_atomic, AtomicU64, u64, 8);
+#[cfg(any(target_pointer_width = "16", target_pointer_width = "32"))]
+#[cfg(any(test, feature = "fallback"))]
+atomic_int!(load_store_critical_session, AtomicI64, i64, 8);
+#[cfg(any(target_pointer_width = "16", target_pointer_width = "32"))]
+#[cfg(any(test, feature = "fallback"))]
+atomic_int!(load_store_critical_session, AtomicU64, u64, 8);
+
+#[cfg(any(test, feature = "fallback"))]
+atomic_int!(load_store_critical_session, AtomicI128, i128, 16);
+#[cfg(any(test, feature = "fallback"))]
+atomic_int!(load_store_critical_session, AtomicU128, u128, 16);
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ test_atomic_ptr_single_thread!();
+ test_atomic_int_single_thread!(i8);
+ test_atomic_int_single_thread!(u8);
+ test_atomic_int_single_thread!(i16);
+ test_atomic_int_single_thread!(u16);
+ test_atomic_int_single_thread!(i32);
+ test_atomic_int_single_thread!(u32);
+ test_atomic_int_single_thread!(i64);
+ test_atomic_int_single_thread!(u64);
+ test_atomic_int_single_thread!(i128);
+ test_atomic_int_single_thread!(u128);
+ test_atomic_int_single_thread!(isize);
+ test_atomic_int_single_thread!(usize);
+}
diff --git a/vendor/portable-atomic/src/imp/interrupt/msp430.rs b/vendor/portable-atomic/src/imp/interrupt/msp430.rs
new file mode 100644
index 000000000..020ed1023
--- /dev/null
+++ b/vendor/portable-atomic/src/imp/interrupt/msp430.rs
@@ -0,0 +1,61 @@
+// Adapted from https://github.com/rust-embedded/msp430.
+//
+// See also src/imp/msp430.rs.
+
+#[cfg(not(portable_atomic_no_asm))]
+use core::arch::asm;
+
+pub(super) use super::super::msp430 as atomic;
+
+pub(super) type State = u16;
+
+/// Disables interrupts and returns the previous interrupt state.
+#[inline]
+pub(super) fn disable() -> State {
+ let r: State;
+ // SAFETY: reading the status register and disabling interrupts are safe.
+ // (see module-level comments of interrupt/mod.rs on the safety of using privileged instructions)
+ unsafe {
+ // Do not use `nomem` and `readonly` because prevent subsequent memory accesses from being reordered before interrupts are disabled.
+ // Do not use `preserves_flags` because DINT modifies the GIE (global interrupt enable) bit of the status register.
+ // Refs: https://mspgcc.sourceforge.net/manual/x951.html
+ #[cfg(not(portable_atomic_no_asm))]
+ asm!(
+ "mov R2, {0}",
+ "dint {{ nop",
+ out(reg) r,
+ options(nostack),
+ );
+ #[cfg(portable_atomic_no_asm)]
+ {
+ llvm_asm!("mov R2, $0" : "=r"(r) ::: "volatile");
+ llvm_asm!("dint { nop" ::: "memory" : "volatile");
+ }
+ }
+ r
+}
+
+/// Restores the previous interrupt state.
+///
+/// # Safety
+///
+/// The state must be the one retrieved by the previous `disable`.
+#[inline]
+pub(super) unsafe fn restore(r: State) {
+ // SAFETY: the caller must guarantee that the state was retrieved by the previous `disable`,
+ unsafe {
+ // This clobbers the entire status register, but we never explicitly modify
+ // flags within a critical session, and the only flags that may be changed
+ // within a critical session are the arithmetic flags that are changed as
+ // a side effect of arithmetic operations, etc., which LLVM recognizes,
+ // so it is safe to clobber them here.
+ // See also the discussion at https://github.com/taiki-e/portable-atomic/pull/40.
+ //
+ // Do not use `nomem` and `readonly` because prevent preceding memory accesses from being reordered after interrupts are enabled.
+ // Do not use `preserves_flags` because MOV modifies the status register.
+ #[cfg(not(portable_atomic_no_asm))]
+ asm!("nop {{ mov {0}, R2 {{ nop", in(reg) r, options(nostack));
+ #[cfg(portable_atomic_no_asm)]
+ llvm_asm!("nop { mov $0, R2 { nop" :: "r"(r) : "memory" : "volatile");
+ }
+}
diff --git a/vendor/portable-atomic/src/imp/interrupt/riscv.rs b/vendor/portable-atomic/src/imp/interrupt/riscv.rs
new file mode 100644
index 000000000..c08545e1d
--- /dev/null
+++ b/vendor/portable-atomic/src/imp/interrupt/riscv.rs
@@ -0,0 +1,79 @@
+// Refs:
+// - https://five-embeddev.com/riscv-isa-manual/latest/machine.html#machine-status-registers-mstatus-and-mstatush
+// - https://five-embeddev.com/riscv-isa-manual/latest/supervisor.html#sstatus
+//
+// Generated asm:
+// - riscv64gc https://godbolt.org/z/a78zxf5sW
+
+#[cfg(not(portable_atomic_no_asm))]
+use core::arch::asm;
+
+pub(super) use super::super::riscv as atomic;
+
+// Status register
+#[cfg(not(portable_atomic_s_mode))]
+macro_rules! status {
+ () => {
+ "mstatus"
+ };
+}
+#[cfg(portable_atomic_s_mode)]
+macro_rules! status {
+ () => {
+ "sstatus"
+ };
+}
+
+// MIE (Machine Interrupt Enable) bit (1 << 3)
+#[cfg(not(portable_atomic_s_mode))]
+const MASK: State = 0x8;
+#[cfg(not(portable_atomic_s_mode))]
+macro_rules! mask {
+ () => {
+ "0x8"
+ };
+}
+// SIE (Supervisor Interrupt Enable) bit (1 << 1)
+#[cfg(portable_atomic_s_mode)]
+const MASK: State = 0x2;
+#[cfg(portable_atomic_s_mode)]
+macro_rules! mask {
+ () => {
+ "0x2"
+ };
+}
+
+#[cfg(target_arch = "riscv32")]
+pub(super) type State = u32;
+#[cfg(target_arch = "riscv64")]
+pub(super) type State = u64;
+
+/// Disables interrupts and returns the previous interrupt state.
+#[inline]
+pub(super) fn disable() -> State {
+ let r: State;
+ // SAFETY: reading mstatus and disabling interrupts is safe.
+ // (see module-level comments of interrupt/mod.rs on the safety of using privileged instructions)
+ unsafe {
+ // Do not use `nomem` and `readonly` because prevent subsequent memory accesses from being reordered before interrupts are disabled.
+ asm!(concat!("csrrci {0}, ", status!(), ", ", mask!()), out(reg) r, options(nostack, preserves_flags));
+ }
+ r
+}
+
+/// Restores the previous interrupt state.
+///
+/// # Safety
+///
+/// The state must be the one retrieved by the previous `disable`.
+#[inline]
+pub(super) unsafe fn restore(r: State) {
+ if r & MASK != 0 {
+ // SAFETY: the caller must guarantee that the state was retrieved by the previous `disable`,
+ // and we've checked that interrupts were enabled before disabling interrupts.
+ unsafe {
+ // Do not use `nomem` and `readonly` because prevent preceding memory accesses from being reordered after interrupts are enabled.
+ asm!(concat!("csrsi ", status!(), ", ", mask!()), options(nostack, preserves_flags));
+ }
+ }
+}
diff --git a/vendor/portable-atomic/src/imp/interrupt/xtensa.rs b/vendor/portable-atomic/src/imp/interrupt/xtensa.rs
new file mode 100644
index 000000000..3593c25af
--- /dev/null
+++ b/vendor/portable-atomic/src/imp/interrupt/xtensa.rs
@@ -0,0 +1,46 @@
+// Refs:
+// - Xtensa Instruction Set Architecture (ISA) Reference Manual https://0x04.net/~mwk/doc/xtensa.pdf
+// - Linux kernel's Xtensa atomic implementation https://github.com/torvalds/linux/blob/v6.1/arch/xtensa/include/asm/atomic.h
+
+#[cfg(not(portable_atomic_no_asm))]
+use core::arch::asm;
+
+pub(super) use core::sync::atomic;
+
+pub(super) type State = u32;
+
+/// Disables interrupts and returns the previous interrupt state.
+#[inline]
+pub(super) fn disable() -> State {
+ let r: State;
+ // SAFETY: reading the PS special register and disabling all interrupts is safe.
+ // (see module-level comments of interrupt/mod.rs on the safety of using privileged instructions)
+ unsafe {
+ // Do not use `nomem` and `readonly` because prevent subsequent memory accesses from being reordered before interrupts are disabled.
+ // Interrupt level 15 to disable all interrupts.
+ // SYNC after RSIL is not required.
+ asm!("rsil {0}, 15", out(reg) r, options(nostack));
+ }
+ r
+}
+
+/// Restores the previous interrupt state.
+///
+/// # Safety
+///
+/// The state must be the one retrieved by the previous `disable`.
+#[inline]
+pub(super) unsafe fn restore(r: State) {
+ // SAFETY: the caller must guarantee that the state was retrieved by the previous `disable`,
+ // and we've checked that interrupts were enabled before disabling interrupts.
+ unsafe {
+ // Do not use `nomem` and `readonly` because prevent preceding memory accesses from being reordered after interrupts are enabled.
+ // SYNC after WSR is required to guarantee that subsequent RSIL read the written value.
+ asm!(
+ "wsr.ps {0}",
+ "rsync",
+ in(reg) r,
+ options(nostack),
+ );
+ }
+}