summaryrefslogtreecommitdiffstats
path: root/vendor/portable-atomic/src/gen/utils.rs
blob: 61f6c99bfa326ea097bd3a90b83af1060a97b189 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
// SPDX-License-Identifier: Apache-2.0 OR MIT
// This file is @generated by target_spec.sh.
// It is not intended for manual editing.

#![allow(unused_macros)]

// On AArch64, the base register of load/store/atomic instructions must be 64-bit.
// Passing a 32-bit value to `in(reg)` on AArch64 results in the upper bits
// having an undefined value, but to work correctly with ILP32 ABI, the upper
// bits must be zero, which is handled here by casting to u64. Another way to
// handle this is to pass it as a pointer and clear the upper bits inside asm,
// but it is easier to overlook than cast, which can catch overlooks by
// asm_sub_register lint.
// See also https://github.com/ARM-software/abi-aa/blob/2023Q1/aapcs64/aapcs64.rst#57pointers
//
// Except for x86_64, which can use 32-bit registers in the destination operand
// (on x86_64, we use the ptr_modifier macro to handle this), we need to do the
// same for ILP32 ABI on other 64-bit architectures. (At least, as far as I can
// see from the assembly generated by LLVM, this is also required for MIPS64 N32
// ABI. I don't know about the RISC-V s64ilp32 ABI for which a patch was
// recently submitted to the kernel, but in any case, this should be a safe
// default for such ABIs).
//
// Known architectures that have such ABI are x86_64 (X32), aarch64 (ILP32),
// mips64 (N32), and riscv64 (s64ilp32, not merged yet though). (As of
// 2023-06-05, only the former two are supported by rustc.) However, we list all
// known 64-bit architectures because similar ABIs may exist or future added for
// other architectures.
#[cfg(all(
    target_pointer_width = "32",
    any(
        target_arch = "aarch64",
        target_arch = "bpf",
        target_arch = "loongarch64",
        target_arch = "mips64",
        target_arch = "mips64r6",
        target_arch = "nvptx64",
        target_arch = "powerpc64",
        target_arch = "riscv64",
        target_arch = "s390x",
        target_arch = "sparc64",
        target_arch = "wasm64",
        target_arch = "x86_64",
    ),
))]
macro_rules! ptr_reg {
    ($ptr:ident) => {{
        let _: *const _ = $ptr; // ensure $ptr is a pointer (*mut _ or *const _)
        #[cfg(not(portable_atomic_no_asm_maybe_uninit))]
        #[allow(clippy::ptr_as_ptr)]
        {
            // If we cast to u64 here, the provenance will be lost,
            // so we convert to MaybeUninit<u64> via zero extend helper.
            crate::utils::zero_extend64_ptr($ptr as *mut ())
        }
        #[cfg(portable_atomic_no_asm_maybe_uninit)]
        {
            // Use cast on old rustc because it does not support MaybeUninit
            // registers. This is still permissive-provenance compatible and
            // is sound.
            $ptr as u64
        }
    }};
}
#[cfg(not(all(
    target_pointer_width = "32",
    any(
        target_arch = "aarch64",
        target_arch = "bpf",
        target_arch = "loongarch64",
        target_arch = "mips64",
        target_arch = "mips64r6",
        target_arch = "nvptx64",
        target_arch = "powerpc64",
        target_arch = "riscv64",
        target_arch = "s390x",
        target_arch = "sparc64",
        target_arch = "wasm64",
        target_arch = "x86_64",
    ),
)))]
macro_rules! ptr_reg {
    ($ptr:ident) => {{
        let _: *const _ = $ptr; // ensure $ptr is a pointer (*mut _ or *const _)
        $ptr // cast is unnecessary here.
    }};
}

// Some 64-bit architectures have ABI with 32-bit pointer width (e.g., x86_64 X32 ABI,
// AArch64 ILP32 ABI, MIPS64 N32 ABI). On those targets, AtomicU64 is available
// and fast, so use it to implement normal sequence lock.
//
// See ptr_reg macro for the reason why all known 64-bit architectures are listed.
#[cfg(feature = "fallback")]
#[cfg(any(
    not(any(target_pointer_width = "16", target_pointer_width = "32")), // i.e., 64-bit or greater
    target_arch = "aarch64",
    target_arch = "bpf",
    target_arch = "loongarch64",
    target_arch = "mips64",
    target_arch = "mips64r6",
    target_arch = "nvptx64",
    target_arch = "powerpc64",
    target_arch = "riscv64",
    target_arch = "s390x",
    target_arch = "sparc64",
    target_arch = "wasm64",
    target_arch = "x86_64",
))]
#[macro_use]
mod fast_atomic_64_macros {
    macro_rules! cfg_has_fast_atomic_64 {
        ($($tt:tt)*) => {
            $($tt)*
        };
    }
    macro_rules! cfg_no_fast_atomic_64 {
        ($($tt:tt)*) => {};
    }
}
#[cfg(feature = "fallback")]
#[cfg(not(any(
    not(any(target_pointer_width = "16", target_pointer_width = "32")), // i.e., 64-bit or greater
    target_arch = "aarch64",
    target_arch = "bpf",
    target_arch = "loongarch64",
    target_arch = "mips64",
    target_arch = "mips64r6",
    target_arch = "nvptx64",
    target_arch = "powerpc64",
    target_arch = "riscv64",
    target_arch = "s390x",
    target_arch = "sparc64",
    target_arch = "wasm64",
    target_arch = "x86_64",
)))]
#[macro_use]
mod fast_atomic_64_macros {
    macro_rules! cfg_has_fast_atomic_64 {
        ($($tt:tt)*) => {};
    }
    macro_rules! cfg_no_fast_atomic_64 {
        ($($tt:tt)*) => {
            $($tt)*
        };
    }
}