diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-07 05:48:48 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-07 05:48:48 +0000 |
commit | ef24de24a82fe681581cc130f342363c47c0969a (patch) | |
tree | 0d494f7e1a38b95c92426f58fe6eaa877303a86c /vendor/portable-atomic/src/imp/interrupt | |
parent | Releasing progress-linux version 1.74.1+dfsg1-1~progress7.99u1. (diff) | |
download | rustc-ef24de24a82fe681581cc130f342363c47c0969a.tar.xz rustc-ef24de24a82fe681581cc130f342363c47c0969a.zip |
Merging upstream version 1.75.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/portable-atomic/src/imp/interrupt')
8 files changed, 364 insertions, 70 deletions
diff --git a/vendor/portable-atomic/src/imp/interrupt/README.md b/vendor/portable-atomic/src/imp/interrupt/README.md index 32c202a8d..edc5fbf2e 100644 --- a/vendor/portable-atomic/src/imp/interrupt/README.md +++ b/vendor/portable-atomic/src/imp/interrupt/README.md @@ -17,10 +17,11 @@ For some targets, the implementation can be changed by explicitly enabling featu - On pre-v6 ARM with the `disable-fiq` feature, this disables interrupts by modifying the I (IRQ mask) bit and F (FIQ mask) bit of the CPSR. - On RISC-V (without A-extension), this disables interrupts by modifying the MIE (Machine Interrupt Enable) bit of the `mstatus` register. - On RISC-V (without A-extension) with the `s-mode` feature, this disables interrupts by modifying the SIE (Supervisor Interrupt Enable) bit of the `sstatus` register. +- On RISC-V (without A-extension) with the `force-amo` feature, this uses AMO instructions for RMWs that have corresponding AMO instructions even if A-extension is disabled. For other RMWs, this disables interrupts as usual. - On MSP430, this disables interrupts by modifying the GIE (Global Interrupt Enable) bit of the status register (SR). - On AVR, this disables interrupts by modifying the I (Global Interrupt Enable) bit of the status register (SREG). - On Xtensa, this disables interrupts by modifying the PS special register. -Some operations don't require disabling interrupts (loads and stores on targets except for AVR, but additionally on MSP430 `add`, `sub`, `and`, `or`, `xor`, `not`). However, when the `critical-section` feature is enabled, critical sections are taken for all atomic operations. +Some operations don't require disabling interrupts (loads and stores on targets except for AVR, but additionally on MSP430 {8,16}-bit `add,sub,and,or,xor,not`, on RISC-V with the `force-amo` feature 32-bit(RV32)/{32,64}-bit(RV64) `swap,fetch_{add,sub,and,or,xor,not,max,min},add,sub,and,or,xor,not` and {8,16}-bit `fetch_{and,or,xor,not},and,or,xor,not`). However, when the `critical-section` feature is enabled, critical sections are taken for all atomic operations. Feel free to submit an issue if your target is not supported yet. diff --git a/vendor/portable-atomic/src/imp/interrupt/armv4t.rs b/vendor/portable-atomic/src/imp/interrupt/armv4t.rs index 85c7ec1b5..20f7089ce 100644 --- a/vendor/portable-atomic/src/imp/interrupt/armv4t.rs +++ b/vendor/portable-atomic/src/imp/interrupt/armv4t.rs @@ -1,21 +1,26 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + // Refs: https://developer.arm.com/documentation/ddi0406/cb/System-Level-Architecture/The-System-Level-Programmers--Model/ARM-processor-modes-and-ARM-core-registers/Program-Status-Registers--PSRs-?lang=en // // Generated asm: -// - armv5te https://godbolt.org/z/5arYrfzYc +// - armv5te https://godbolt.org/z/Teh7WajMs #[cfg(not(portable_atomic_no_asm))] use core::arch::asm; +// - 0x80 - I (IRQ mask) bit (1 << 7) +// - 0x40 - F (FIQ mask) bit (1 << 6) +// We disable only IRQs by default. See also https://github.com/taiki-e/portable-atomic/pull/28#issuecomment-1214146912. #[cfg(not(portable_atomic_disable_fiq))] -macro_rules! if_disable_fiq { - ($tt:tt) => { - "" +macro_rules! mask { + () => { + "0x80" }; } #[cfg(portable_atomic_disable_fiq)] -macro_rules! if_disable_fiq { - ($tt:tt) => { - $tt +macro_rules! mask { + () => { + "0xC0" // 0x80 | 0x40 }; } @@ -29,15 +34,13 @@ pub(super) fn disable() -> State { // SAFETY: reading CPSR and disabling interrupts are safe. // (see module-level comments of interrupt/mod.rs on the safety of using privileged instructions) unsafe { - // Do not use `nomem` and `readonly` because prevent subsequent memory accesses from being reordered before interrupts are disabled. asm!( "mrs {prev}, cpsr", - "orr {new}, {prev}, 0x80", // I (IRQ mask) bit (1 << 7) - // We disable only IRQs by default. See also https://github.com/taiki-e/portable-atomic/pull/28#issuecomment-1214146912. - if_disable_fiq!("orr {new}, {new}, 0x40"), // F (FIQ mask) bit (1 << 6) + concat!("orr {new}, {prev}, ", mask!()), "msr cpsr_c, {new}", prev = out(reg) cpsr, new = out(reg) _, + // Do not use `nomem` and `readonly` because prevent subsequent memory accesses from being reordered before interrupts are disabled. options(nostack, preserves_flags), ); } @@ -53,11 +56,14 @@ pub(super) fn disable() -> State { #[instruction_set(arm::a32)] pub(super) unsafe fn restore(cpsr: State) { // SAFETY: the caller must guarantee that the state was retrieved by the previous `disable`, + // + // This clobbers the control field mask byte of CPSR. See msp430.rs to safety on this. + // (preserves_flags is fine because we only clobber the I, F, T, and M bits of CPSR.) + // + // Refs: https://developer.arm.com/documentation/dui0473/m/arm-and-thumb-instructions/msr--general-purpose-register-to-psr- unsafe { - // This clobbers the entire CPSR. See msp430.rs to safety on this. - // // Do not use `nomem` and `readonly` because prevent preceding memory accesses from being reordered after interrupts are enabled. - asm!("msr cpsr_c, {0}", in(reg) cpsr, options(nostack)); + asm!("msr cpsr_c, {0}", in(reg) cpsr, options(nostack, preserves_flags)); } } @@ -66,7 +72,7 @@ pub(super) unsafe fn restore(cpsr: State) { // have Data Memory Barrier). // // Generated asm: -// - armv5te https://godbolt.org/z/a7zcs9hKa +// - armv5te https://godbolt.org/z/bMxK7M8Ta pub(crate) mod atomic { #[cfg(not(portable_atomic_no_asm))] use core::arch::asm; diff --git a/vendor/portable-atomic/src/imp/interrupt/armv6m.rs b/vendor/portable-atomic/src/imp/interrupt/armv6m.rs index 00413128c..85037a3ea 100644 --- a/vendor/portable-atomic/src/imp/interrupt/armv6m.rs +++ b/vendor/portable-atomic/src/imp/interrupt/armv6m.rs @@ -1,7 +1,9 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + // Adapted from https://github.com/rust-embedded/cortex-m. // // Generated asm: -// - armv6-m https://godbolt.org/z/sTezYnaj9 +// - armv6-m https://godbolt.org/z/YxME38xcM #[cfg(not(portable_atomic_no_asm))] use core::arch::asm; diff --git a/vendor/portable-atomic/src/imp/interrupt/avr.rs b/vendor/portable-atomic/src/imp/interrupt/avr.rs index 7cc48c62e..76d99c142 100644 --- a/vendor/portable-atomic/src/imp/interrupt/avr.rs +++ b/vendor/portable-atomic/src/imp/interrupt/avr.rs @@ -1,4 +1,9 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + // Adapted from https://github.com/Rahix/avr-device. +// +// Refs: +// - AVR Instruction Set Manual https://ww1.microchip.com/downloads/en/DeviceDoc/AVR-InstructionSet-Manual-DS40002198.pdf #[cfg(not(portable_atomic_no_asm))] use core::arch::asm; diff --git a/vendor/portable-atomic/src/imp/interrupt/mod.rs b/vendor/portable-atomic/src/imp/interrupt/mod.rs index a0ead68a6..e0ed0f6e6 100644 --- a/vendor/portable-atomic/src/imp/interrupt/mod.rs +++ b/vendor/portable-atomic/src/imp/interrupt/mod.rs @@ -1,3 +1,5 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + // Critical section based fallback implementations // // This module supports two different critical section implementations: @@ -26,8 +28,8 @@ // // See also README.md of this directory. // -// [^avr1]: https://github.com/llvm/llvm-project/blob/llvmorg-16.0.0/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp#LL963 -// [^avr2]: https://github.com/llvm/llvm-project/blob/llvmorg-16.0.0/llvm/test/CodeGen/AVR/atomics/load16.ll#L5 +// [^avr1]: https://github.com/llvm/llvm-project/blob/llvmorg-17.0.0-rc2/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp#L1074 +// [^avr2]: https://github.com/llvm/llvm-project/blob/llvmorg-17.0.0-rc2/llvm/test/CodeGen/AVR/atomics/load16.ll#L5 // On some platforms, atomic load/store can be implemented in a more efficient // way than disabling interrupts. On MSP430, some RMWs that do not return the @@ -173,11 +175,21 @@ impl<T> AtomicPtr<T> { } #[inline] - pub(crate) fn swap(&self, ptr: *mut T, _order: Ordering) -> *mut T { + pub(crate) fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T { + let _ = order; + #[cfg(portable_atomic_force_amo)] + { + self.as_native().swap(ptr, order) + } + #[cfg(not(portable_atomic_force_amo))] // SAFETY: any data races are prevented by disabling interrupts (see // module-level comments) and the raw pointer is valid because we got it // from a reference. - with(|| unsafe { self.p.get().replace(ptr) }) + with(|| unsafe { + let prev = self.p.get().read(); + self.p.get().write(ptr); + prev + }) } #[inline] @@ -194,12 +206,12 @@ impl<T> AtomicPtr<T> { // module-level comments) and the raw pointer is valid because we got it // from a reference. with(|| unsafe { - let result = self.p.get().read(); - if result == current { + let prev = self.p.get().read(); + if prev == current { self.p.get().write(new); - Ok(result) + Ok(prev) } else { - Err(result) + Err(prev) } }) } @@ -275,9 +287,12 @@ macro_rules! atomic_int { } } }; - (load_store_atomic, $atomic_type:ident, $int_type:ident, $align:literal) => { + (load_store_atomic $([$kind:ident])?, $atomic_type:ident, $int_type:ident, $align:literal) => { atomic_int!(base, $atomic_type, $int_type, $align); - atomic_int!(cas, $atomic_type, $int_type); + #[cfg(not(portable_atomic_force_amo))] + atomic_int!(cas[emulate], $atomic_type, $int_type); + #[cfg(portable_atomic_force_amo)] + atomic_int!(cas $([$kind])?, $atomic_type, $int_type); impl $atomic_type { #[inline] #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] @@ -358,7 +373,7 @@ macro_rules! atomic_int { }; (load_store_critical_session, $atomic_type:ident, $int_type:ident, $align:literal) => { atomic_int!(base, $atomic_type, $int_type, $align); - atomic_int!(cas, $atomic_type, $int_type); + atomic_int!(cas[emulate], $atomic_type, $int_type); impl_default_no_fetch_ops!($atomic_type, $int_type); impl_default_bit_opts!($atomic_type, $int_type); impl $atomic_type { @@ -388,14 +403,18 @@ macro_rules! atomic_int { } } }; - (cas, $atomic_type:ident, $int_type:ident) => { + (cas[emulate], $atomic_type:ident, $int_type:ident) => { impl $atomic_type { #[inline] pub(crate) fn swap(&self, val: $int_type, _order: Ordering) -> $int_type { // SAFETY: any data races are prevented by disabling interrupts (see // module-level comments) and the raw pointer is valid because we got it // from a reference. - with(|| unsafe { self.v.get().replace(val) }) + with(|| unsafe { + let prev = self.v.get().read(); + self.v.get().write(val); + prev + }) } #[inline] @@ -412,12 +431,12 @@ macro_rules! atomic_int { // module-level comments) and the raw pointer is valid because we got it // from a reference. with(|| unsafe { - let result = self.v.get().read(); - if result == current { + let prev = self.v.get().read(); + if prev == current { self.v.get().write(new); - Ok(result) + Ok(prev) } else { - Err(result) + Err(prev) } }) } @@ -440,9 +459,9 @@ macro_rules! atomic_int { // module-level comments) and the raw pointer is valid because we got it // from a reference. with(|| unsafe { - let result = self.v.get().read(); - self.v.get().write(result.wrapping_add(val)); - result + let prev = self.v.get().read(); + self.v.get().write(prev.wrapping_add(val)); + prev }) } @@ -452,9 +471,9 @@ macro_rules! atomic_int { // module-level comments) and the raw pointer is valid because we got it // from a reference. with(|| unsafe { - let result = self.v.get().read(); - self.v.get().write(result.wrapping_sub(val)); - result + let prev = self.v.get().read(); + self.v.get().write(prev.wrapping_sub(val)); + prev }) } @@ -464,9 +483,9 @@ macro_rules! atomic_int { // module-level comments) and the raw pointer is valid because we got it // from a reference. with(|| unsafe { - let result = self.v.get().read(); - self.v.get().write(result & val); - result + let prev = self.v.get().read(); + self.v.get().write(prev & val); + prev }) } @@ -476,9 +495,9 @@ macro_rules! atomic_int { // module-level comments) and the raw pointer is valid because we got it // from a reference. with(|| unsafe { - let result = self.v.get().read(); - self.v.get().write(!(result & val)); - result + let prev = self.v.get().read(); + self.v.get().write(!(prev & val)); + prev }) } @@ -488,9 +507,9 @@ macro_rules! atomic_int { // module-level comments) and the raw pointer is valid because we got it // from a reference. with(|| unsafe { - let result = self.v.get().read(); - self.v.get().write(result | val); - result + let prev = self.v.get().read(); + self.v.get().write(prev | val); + prev }) } @@ -500,9 +519,9 @@ macro_rules! atomic_int { // module-level comments) and the raw pointer is valid because we got it // from a reference. with(|| unsafe { - let result = self.v.get().read(); - self.v.get().write(result ^ val); - result + let prev = self.v.get().read(); + self.v.get().write(prev ^ val); + prev }) } @@ -512,9 +531,9 @@ macro_rules! atomic_int { // module-level comments) and the raw pointer is valid because we got it // from a reference. with(|| unsafe { - let result = self.v.get().read(); - self.v.get().write(core::cmp::max(result, val)); - result + let prev = self.v.get().read(); + self.v.get().write(core::cmp::max(prev, val)); + prev }) } @@ -524,9 +543,9 @@ macro_rules! atomic_int { // module-level comments) and the raw pointer is valid because we got it // from a reference. with(|| unsafe { - let result = self.v.get().read(); - self.v.get().write(core::cmp::min(result, val)); - result + let prev = self.v.get().read(); + self.v.get().write(core::cmp::min(prev, val)); + prev }) } @@ -536,21 +555,275 @@ macro_rules! atomic_int { // module-level comments) and the raw pointer is valid because we got it // from a reference. with(|| unsafe { - let result = self.v.get().read(); - self.v.get().write(!result); - result + let prev = self.v.get().read(); + self.v.get().write(!prev); + prev + }) + } + + #[inline] + pub(crate) fn fetch_neg(&self, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by disabling interrupts (see + // module-level comments) and the raw pointer is valid because we got it + // from a reference. + with(|| unsafe { + let prev = self.v.get().read(); + self.v.get().write(prev.wrapping_neg()); + prev + }) + } + #[inline] + pub(crate) fn neg(&self, order: Ordering) { + self.fetch_neg(order); + } + } + }; + // cfg(portable_atomic_force_amo) 32-bit(RV32)/{32,64}-bit(RV64) RMW + (cas, $atomic_type:ident, $int_type:ident) => { + impl $atomic_type { + #[inline] + pub(crate) fn swap(&self, val: $int_type, order: Ordering) -> $int_type { + self.as_native().swap(val, order) + } + + #[inline] + #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] + pub(crate) fn compare_exchange( + &self, + current: $int_type, + new: $int_type, + success: Ordering, + failure: Ordering, + ) -> Result<$int_type, $int_type> { + crate::utils::assert_compare_exchange_ordering(success, failure); + // SAFETY: any data races are prevented by disabling interrupts (see + // module-level comments) and the raw pointer is valid because we got it + // from a reference. + with(|| unsafe { + let prev = self.v.get().read(); + if prev == current { + self.v.get().write(new); + Ok(prev) + } else { + Err(prev) + } + }) + } + + #[inline] + #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] + pub(crate) fn compare_exchange_weak( + &self, + current: $int_type, + new: $int_type, + success: Ordering, + failure: Ordering, + ) -> Result<$int_type, $int_type> { + self.compare_exchange(current, new, success, failure) + } + + #[inline] + pub(crate) fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type { + self.as_native().fetch_add(val, order) + } + #[inline] + pub(crate) fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type { + self.as_native().fetch_sub(val, order) + } + #[inline] + pub(crate) fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type { + self.as_native().fetch_and(val, order) + } + + #[inline] + pub(crate) fn fetch_nand(&self, val: $int_type, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by disabling interrupts (see + // module-level comments) and the raw pointer is valid because we got it + // from a reference. + with(|| unsafe { + let prev = self.v.get().read(); + self.v.get().write(!(prev & val)); + prev + }) + } + + #[inline] + pub(crate) fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type { + self.as_native().fetch_or(val, order) + } + #[inline] + pub(crate) fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type { + self.as_native().fetch_xor(val, order) + } + #[inline] + pub(crate) fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type { + self.as_native().fetch_max(val, order) + } + #[inline] + pub(crate) fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type { + self.as_native().fetch_min(val, order) + } + #[inline] + pub(crate) fn fetch_not(&self, order: Ordering) -> $int_type { + self.as_native().fetch_not(order) + } + + #[inline] + pub(crate) fn fetch_neg(&self, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by disabling interrupts (see + // module-level comments) and the raw pointer is valid because we got it + // from a reference. + with(|| unsafe { + let prev = self.v.get().read(); + self.v.get().write(prev.wrapping_neg()); + prev + }) + } + #[inline] + pub(crate) fn neg(&self, order: Ordering) { + self.fetch_neg(order); + } + } + }; + // cfg(portable_atomic_force_amo) {8,16}-bit RMW + (cas[sub_word], $atomic_type:ident, $int_type:ident) => { + impl $atomic_type { + #[inline] + pub(crate) fn swap(&self, val: $int_type, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by disabling interrupts (see + // module-level comments) and the raw pointer is valid because we got it + // from a reference. + with(|| unsafe { + let prev = self.v.get().read(); + self.v.get().write(val); + prev + }) + } + + #[inline] + #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] + pub(crate) fn compare_exchange( + &self, + current: $int_type, + new: $int_type, + success: Ordering, + failure: Ordering, + ) -> Result<$int_type, $int_type> { + crate::utils::assert_compare_exchange_ordering(success, failure); + // SAFETY: any data races are prevented by disabling interrupts (see + // module-level comments) and the raw pointer is valid because we got it + // from a reference. + with(|| unsafe { + let prev = self.v.get().read(); + if prev == current { + self.v.get().write(new); + Ok(prev) + } else { + Err(prev) + } }) } #[inline] + #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)] + pub(crate) fn compare_exchange_weak( + &self, + current: $int_type, + new: $int_type, + success: Ordering, + failure: Ordering, + ) -> Result<$int_type, $int_type> { + self.compare_exchange(current, new, success, failure) + } + + #[inline] + pub(crate) fn fetch_add(&self, val: $int_type, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by disabling interrupts (see + // module-level comments) and the raw pointer is valid because we got it + // from a reference. + with(|| unsafe { + let prev = self.v.get().read(); + self.v.get().write(prev.wrapping_add(val)); + prev + }) + } + + #[inline] + pub(crate) fn fetch_sub(&self, val: $int_type, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by disabling interrupts (see + // module-level comments) and the raw pointer is valid because we got it + // from a reference. + with(|| unsafe { + let prev = self.v.get().read(); + self.v.get().write(prev.wrapping_sub(val)); + prev + }) + } + + #[inline] + pub(crate) fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type { + self.as_native().fetch_and(val, order) + } + + #[inline] + pub(crate) fn fetch_nand(&self, val: $int_type, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by disabling interrupts (see + // module-level comments) and the raw pointer is valid because we got it + // from a reference. + with(|| unsafe { + let prev = self.v.get().read(); + self.v.get().write(!(prev & val)); + prev + }) + } + + #[inline] + pub(crate) fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type { + self.as_native().fetch_or(val, order) + } + #[inline] + pub(crate) fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type { + self.as_native().fetch_xor(val, order) + } + + #[inline] + pub(crate) fn fetch_max(&self, val: $int_type, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by disabling interrupts (see + // module-level comments) and the raw pointer is valid because we got it + // from a reference. + with(|| unsafe { + let prev = self.v.get().read(); + self.v.get().write(core::cmp::max(prev, val)); + prev + }) + } + + #[inline] + pub(crate) fn fetch_min(&self, val: $int_type, _order: Ordering) -> $int_type { + // SAFETY: any data races are prevented by disabling interrupts (see + // module-level comments) and the raw pointer is valid because we got it + // from a reference. + with(|| unsafe { + let prev = self.v.get().read(); + self.v.get().write(core::cmp::min(prev, val)); + prev + }) + } + + #[inline] + pub(crate) fn fetch_not(&self, order: Ordering) -> $int_type { + self.as_native().fetch_not(order) + } + + #[inline] pub(crate) fn fetch_neg(&self, _order: Ordering) -> $int_type { // SAFETY: any data races are prevented by disabling interrupts (see // module-level comments) and the raw pointer is valid because we got it // from a reference. with(|| unsafe { - let result = self.v.get().read(); - self.v.get().write(result.wrapping_neg()); - result + let prev = self.v.get().read(); + self.v.get().write(prev.wrapping_neg()); + prev }) } #[inline] @@ -578,10 +851,10 @@ atomic_int!(load_store_atomic, AtomicIsize, isize, 16); #[cfg(target_pointer_width = "128")] atomic_int!(load_store_atomic, AtomicUsize, usize, 16); -atomic_int!(load_store_atomic, AtomicI8, i8, 1); -atomic_int!(load_store_atomic, AtomicU8, u8, 1); -atomic_int!(load_store_atomic, AtomicI16, i16, 2); -atomic_int!(load_store_atomic, AtomicU16, u16, 2); +atomic_int!(load_store_atomic[sub_word], AtomicI8, i8, 1); +atomic_int!(load_store_atomic[sub_word], AtomicU8, u8, 1); +atomic_int!(load_store_atomic[sub_word], AtomicI16, i16, 2); +atomic_int!(load_store_atomic[sub_word], AtomicU16, u16, 2); #[cfg(not(target_pointer_width = "16"))] atomic_int!(load_store_atomic, AtomicI32, i32, 4); diff --git a/vendor/portable-atomic/src/imp/interrupt/msp430.rs b/vendor/portable-atomic/src/imp/interrupt/msp430.rs index 020ed1023..8c1ca80ee 100644 --- a/vendor/portable-atomic/src/imp/interrupt/msp430.rs +++ b/vendor/portable-atomic/src/imp/interrupt/msp430.rs @@ -1,6 +1,10 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + // Adapted from https://github.com/rust-embedded/msp430. // // See also src/imp/msp430.rs. +// +// Refs: https://www.ti.com/lit/ug/slau208q/slau208q.pdf #[cfg(not(portable_atomic_no_asm))] use core::arch::asm; @@ -18,7 +22,6 @@ pub(super) fn disable() -> State { unsafe { // Do not use `nomem` and `readonly` because prevent subsequent memory accesses from being reordered before interrupts are disabled. // Do not use `preserves_flags` because DINT modifies the GIE (global interrupt enable) bit of the status register. - // Refs: https://mspgcc.sourceforge.net/manual/x951.html #[cfg(not(portable_atomic_no_asm))] asm!( "mov R2, {0}", diff --git a/vendor/portable-atomic/src/imp/interrupt/riscv.rs b/vendor/portable-atomic/src/imp/interrupt/riscv.rs index c08545e1d..65b1af2ff 100644 --- a/vendor/portable-atomic/src/imp/interrupt/riscv.rs +++ b/vendor/portable-atomic/src/imp/interrupt/riscv.rs @@ -1,9 +1,11 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + // Refs: // - https://five-embeddev.com/riscv-isa-manual/latest/machine.html#machine-status-registers-mstatus-and-mstatush // - https://five-embeddev.com/riscv-isa-manual/latest/supervisor.html#sstatus // // Generated asm: -// - riscv64gc https://godbolt.org/z/a78zxf5sW +// - riscv64gc https://godbolt.org/z/osbzsT679 #[cfg(not(portable_atomic_no_asm))] use core::arch::asm; diff --git a/vendor/portable-atomic/src/imp/interrupt/xtensa.rs b/vendor/portable-atomic/src/imp/interrupt/xtensa.rs index 3593c25af..6cbb4cffb 100644 --- a/vendor/portable-atomic/src/imp/interrupt/xtensa.rs +++ b/vendor/portable-atomic/src/imp/interrupt/xtensa.rs @@ -1,3 +1,5 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + // Refs: // - Xtensa Instruction Set Architecture (ISA) Reference Manual https://0x04.net/~mwk/doc/xtensa.pdf // - Linux kernel's Xtensa atomic implementation https://github.com/torvalds/linux/blob/v6.1/arch/xtensa/include/asm/atomic.h |