summaryrefslogtreecommitdiffstats
path: root/vendor/portable-atomic/src/imp/interrupt/armv4t.rs
blob: 20f7089ce2e38bd924a8e4eaa7b01854fa0cebd8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
// SPDX-License-Identifier: Apache-2.0 OR MIT

// Refs: https://developer.arm.com/documentation/ddi0406/cb/System-Level-Architecture/The-System-Level-Programmers--Model/ARM-processor-modes-and-ARM-core-registers/Program-Status-Registers--PSRs-?lang=en
//
// Generated asm:
// - armv5te https://godbolt.org/z/Teh7WajMs

#[cfg(not(portable_atomic_no_asm))]
use core::arch::asm;

// - 0x80 - I (IRQ mask) bit (1 << 7)
// - 0x40 - F (FIQ mask) bit (1 << 6)
// We disable only IRQs by default. See also https://github.com/taiki-e/portable-atomic/pull/28#issuecomment-1214146912.
#[cfg(not(portable_atomic_disable_fiq))]
macro_rules! mask {
    () => {
        "0x80"
    };
}
#[cfg(portable_atomic_disable_fiq)]
macro_rules! mask {
    () => {
        "0xC0" // 0x80 | 0x40
    };
}

pub(super) type State = u32;

/// Disables interrupts and returns the previous interrupt state.
#[inline]
#[instruction_set(arm::a32)]
pub(super) fn disable() -> State {
    let cpsr: State;
    // SAFETY: reading CPSR and disabling interrupts are safe.
    // (see module-level comments of interrupt/mod.rs on the safety of using privileged instructions)
    unsafe {
        asm!(
            "mrs {prev}, cpsr",
            concat!("orr {new}, {prev}, ", mask!()),
            "msr cpsr_c, {new}",
            prev = out(reg) cpsr,
            new = out(reg) _,
            // Do not use `nomem` and `readonly` because prevent subsequent memory accesses from being reordered before interrupts are disabled.
            options(nostack, preserves_flags),
        );
    }
    cpsr
}

/// Restores the previous interrupt state.
///
/// # Safety
///
/// The state must be the one retrieved by the previous `disable`.
#[inline]
#[instruction_set(arm::a32)]
pub(super) unsafe fn restore(cpsr: State) {
    // SAFETY: the caller must guarantee that the state was retrieved by the previous `disable`,
    //
    // This clobbers the control field mask byte of CPSR. See msp430.rs to safety on this.
    // (preserves_flags is fine because we only clobber the I, F, T, and M bits of CPSR.)
    //
    // Refs: https://developer.arm.com/documentation/dui0473/m/arm-and-thumb-instructions/msr--general-purpose-register-to-psr-
    unsafe {
        // Do not use `nomem` and `readonly` because prevent preceding memory accesses from being reordered after interrupts are enabled.
        asm!("msr cpsr_c, {0}", in(reg) cpsr, options(nostack, preserves_flags));
    }
}

// On pre-v6 ARM, we cannot use core::sync::atomic here because they call the
// `__sync_*` builtins for non-relaxed load/store (because pre-v6 ARM doesn't
// have Data Memory Barrier).
//
// Generated asm:
// - armv5te https://godbolt.org/z/bMxK7M8Ta
pub(crate) mod atomic {
    #[cfg(not(portable_atomic_no_asm))]
    use core::arch::asm;
    use core::{cell::UnsafeCell, sync::atomic::Ordering};

    macro_rules! atomic {
        ($([$($generics:tt)*])? $atomic_type:ident, $value_type:ty, $asm_suffix:tt) => {
            #[repr(transparent)]
            pub(crate) struct $atomic_type $(<$($generics)*>)? {
                v: UnsafeCell<$value_type>,
            }

            // Send is implicitly implemented for atomic integers, but not for atomic pointers.
            // SAFETY: any data races are prevented by atomic operations.
            unsafe impl $(<$($generics)*>)? Send for $atomic_type $(<$($generics)*>)? {}
            // SAFETY: any data races are prevented by atomic operations.
            unsafe impl $(<$($generics)*>)? Sync for $atomic_type $(<$($generics)*>)? {}

            impl $(<$($generics)*>)? $atomic_type $(<$($generics)*>)? {
                #[inline]
                pub(crate) fn load(&self, order: Ordering) -> $value_type {
                    let src = self.v.get();
                    // SAFETY: any data races are prevented by atomic intrinsics and the raw
                    // pointer passed in is valid because we got it from a reference.
                    unsafe {
                        let out;
                        match order {
                            Ordering::Relaxed => {
                                asm!(
                                    concat!("ldr", $asm_suffix, " {out}, [{src}]"),
                                    src = in(reg) src,
                                    out = lateout(reg) out,
                                    options(nostack, preserves_flags, readonly),
                                );
                            }
                            Ordering::Acquire | Ordering::SeqCst => {
                                // inline asm without nomem/readonly implies compiler fence.
                                // And compiler fence is fine because the user explicitly declares that
                                // the system is single-core by using an unsafe cfg.
                                asm!(
                                    concat!("ldr", $asm_suffix, " {out}, [{src}]"),
                                    src = in(reg) src,
                                    out = lateout(reg) out,
                                    options(nostack, preserves_flags),
                                );
                            }
                            _ => unreachable!("{:?}", order),
                        }
                        out
                    }
                }

                #[inline]
                pub(crate) fn store(&self, val: $value_type, _order: Ordering) {
                    let dst = self.v.get();
                    // SAFETY: any data races are prevented by atomic intrinsics and the raw
                    // pointer passed in is valid because we got it from a reference.
                    unsafe {
                        // inline asm without nomem/readonly implies compiler fence.
                        // And compiler fence is fine because the user explicitly declares that
                        // the system is single-core by using an unsafe cfg.
                        asm!(
                            concat!("str", $asm_suffix, " {val}, [{dst}]"),
                            dst = in(reg) dst,
                            val = in(reg) val,
                            options(nostack, preserves_flags),
                        );
                    }
                }
            }
        };
    }

    atomic!(AtomicI8, i8, "b");
    atomic!(AtomicU8, u8, "b");
    atomic!(AtomicI16, i16, "h");
    atomic!(AtomicU16, u16, "h");
    atomic!(AtomicI32, i32, "");
    atomic!(AtomicU32, u32, "");
    atomic!(AtomicIsize, isize, "");
    atomic!(AtomicUsize, usize, "");
    atomic!([T] AtomicPtr, *mut T, "");
}