diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:03:05 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:03:05 +0000 |
commit | 217d9223a5aa75daf9f286fd1fc06dae379b5dbc (patch) | |
tree | b43bedae234ad56894a82934ee57e3619f3374d5 /debian/patches/u-arm-compiler-builtins-add-sync-builtin-fallbacks.patch | |
parent | Adding upstream version 1.64.0+dfsg1. (diff) | |
download | rustc-217d9223a5aa75daf9f286fd1fc06dae379b5dbc.tar.xz rustc-217d9223a5aa75daf9f286fd1fc06dae379b5dbc.zip |
Adding debian version 1.64.0+dfsg1-1.debian/1.64.0+dfsg1-1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'debian/patches/u-arm-compiler-builtins-add-sync-builtin-fallbacks.patch')
-rw-r--r-- | debian/patches/u-arm-compiler-builtins-add-sync-builtin-fallbacks.patch | 223 |
1 files changed, 223 insertions, 0 deletions
diff --git a/debian/patches/u-arm-compiler-builtins-add-sync-builtin-fallbacks.patch b/debian/patches/u-arm-compiler-builtins-add-sync-builtin-fallbacks.patch new file mode 100644 index 000000000..796c17cef --- /dev/null +++ b/debian/patches/u-arm-compiler-builtins-add-sync-builtin-fallbacks.patch @@ -0,0 +1,223 @@ +From 56172fcd8bd045e38bbdf76697d1fcca1e965d6d Mon Sep 17 00:00:00 2001 +From: Alex Huszagh <ahuszagh@gmail.com> +Date: Fri, 29 Jul 2022 16:58:05 -0500 +Subject: [PATCH] Add compiler-rt fallbacks for sync builtins on armv5te-musl. + +--- +https://github.com/rust-lang/compiler-builtins/pull/484 + + src/arm_linux.rs | 110 +++++++++++++++++++++++++++++++---------------- + 1 file changed, 73 insertions(+), 37 deletions(-) + +diff --git a/vendor/compiler_builtins/src/arm_linux.rs b/vendor/compiler_builtins/src/arm_linux.rs +index 8fe0948..8f22eb6 100644 +--- a/vendor/compiler_builtins/src/arm_linux.rs ++++ b/vendor/compiler_builtins/src/arm_linux.rs +@@ -55,7 +55,7 @@ fn insert_aligned(aligned: u32, val: u32, shift: u32, mask: u32) -> u32 { + } + + // Generic atomic read-modify-write operation +-unsafe fn atomic_rmw<T, F: Fn(u32) -> u32>(ptr: *mut T, f: F) -> u32 { ++unsafe fn atomic_rmw<T, F: Fn(u32) -> u32, G: Fn(u32, u32) -> u32>(ptr: *mut T, f: F, g: G) -> u32 { + let aligned_ptr = align_ptr(ptr); + let (shift, mask) = get_shift_mask(ptr); + +@@ -65,7 +65,7 @@ unsafe fn atomic_rmw<T, F: Fn(u32) -> u32>(ptr: *mut T, f: F) -> u32 { + let newval = f(curval); + let newval_aligned = insert_aligned(curval_aligned, newval, shift, mask); + if __kuser_cmpxchg(curval_aligned, newval_aligned, aligned_ptr) { +- return curval; ++ return g(curval, newval); + } + } + } +@@ -89,13 +89,21 @@ unsafe fn atomic_cmpxchg<T>(ptr: *mut T, oldval: u32, newval: u32) -> u32 { + } + + macro_rules! atomic_rmw { +- ($name:ident, $ty:ty, $op:expr) => { ++ ($name:ident, $ty:ty, $op:expr, $fetch:expr) => { + intrinsics! { + pub unsafe extern "C" fn $name(ptr: *mut $ty, val: $ty) -> $ty { +- atomic_rmw(ptr, |x| $op(x as $ty, val) as u32) as $ty ++ atomic_rmw(ptr, |x| $op(x as $ty, val) as u32, |old, new| $fetch(old, new)) as $ty + } + } + }; ++ ++ (@old $name:ident, $ty:ty, $op:expr) => { ++ atomic_rmw!($name, $ty, $op, |old, _| old); ++ }; ++ ++ (@new $name:ident, $ty:ty, $op:expr) => { ++ atomic_rmw!($name, $ty, $op, |_, new| new); ++ }; + } + macro_rules! atomic_cmpxchg { + ($name:ident, $ty:ty) => { +@@ -107,101 +115,129 @@ macro_rules! atomic_cmpxchg { + }; + } + +-atomic_rmw!(__sync_fetch_and_add_1, u8, |a: u8, b: u8| a.wrapping_add(b)); +-atomic_rmw!(__sync_fetch_and_add_2, u16, |a: u16, b: u16| a ++atomic_rmw!(@old __sync_fetch_and_add_1, u8, |a: u8, b: u8| a.wrapping_add(b)); ++atomic_rmw!(@old __sync_fetch_and_add_2, u16, |a: u16, b: u16| a ++ .wrapping_add(b)); ++atomic_rmw!(@old __sync_fetch_and_add_4, u32, |a: u32, b: u32| a ++ .wrapping_add(b)); ++ ++atomic_rmw!(@new __sync_add_and_fetch_1, u8, |a: u8, b: u8| a.wrapping_add(b)); ++atomic_rmw!(@new __sync_add_and_fetch_2, u16, |a: u16, b: u16| a + .wrapping_add(b)); +-atomic_rmw!(__sync_fetch_and_add_4, u32, |a: u32, b: u32| a ++atomic_rmw!(@new __sync_add_and_fetch_4, u32, |a: u32, b: u32| a + .wrapping_add(b)); + +-atomic_rmw!(__sync_fetch_and_sub_1, u8, |a: u8, b: u8| a.wrapping_sub(b)); +-atomic_rmw!(__sync_fetch_and_sub_2, u16, |a: u16, b: u16| a ++atomic_rmw!(@old __sync_fetch_and_sub_1, u8, |a: u8, b: u8| a.wrapping_sub(b)); ++atomic_rmw!(@old __sync_fetch_and_sub_2, u16, |a: u16, b: u16| a + .wrapping_sub(b)); +-atomic_rmw!(__sync_fetch_and_sub_4, u32, |a: u32, b: u32| a ++atomic_rmw!(@old __sync_fetch_and_sub_4, u32, |a: u32, b: u32| a + .wrapping_sub(b)); + +-atomic_rmw!(__sync_fetch_and_and_1, u8, |a: u8, b: u8| a & b); +-atomic_rmw!(__sync_fetch_and_and_2, u16, |a: u16, b: u16| a & b); +-atomic_rmw!(__sync_fetch_and_and_4, u32, |a: u32, b: u32| a & b); ++atomic_rmw!(@new __sync_sub_and_fetch_1, u8, |a: u8, b: u8| a.wrapping_sub(b)); ++atomic_rmw!(@new __sync_sub_and_fetch_2, u16, |a: u16, b: u16| a ++ .wrapping_sub(b)); ++atomic_rmw!(@new __sync_sub_and_fetch_4, u32, |a: u32, b: u32| a ++ .wrapping_sub(b)); ++ ++atomic_rmw!(@old __sync_fetch_and_and_1, u8, |a: u8, b: u8| a & b); ++atomic_rmw!(@old __sync_fetch_and_and_2, u16, |a: u16, b: u16| a & b); ++atomic_rmw!(@old __sync_fetch_and_and_4, u32, |a: u32, b: u32| a & b); ++ ++atomic_rmw!(@new __sync_and_and_fetch_1, u8, |a: u8, b: u8| a & b); ++atomic_rmw!(@new __sync_and_and_fetch_2, u16, |a: u16, b: u16| a & b); ++atomic_rmw!(@new __sync_and_and_fetch_4, u32, |a: u32, b: u32| a & b); ++ ++atomic_rmw!(@old __sync_fetch_and_or_1, u8, |a: u8, b: u8| a | b); ++atomic_rmw!(@old __sync_fetch_and_or_2, u16, |a: u16, b: u16| a | b); ++atomic_rmw!(@old __sync_fetch_and_or_4, u32, |a: u32, b: u32| a | b); ++ ++atomic_rmw!(@new __sync_or_and_fetch_1, u8, |a: u8, b: u8| a | b); ++atomic_rmw!(@new __sync_or_and_fetch_2, u16, |a: u16, b: u16| a | b); ++atomic_rmw!(@new __sync_or_and_fetch_4, u32, |a: u32, b: u32| a | b); ++ ++atomic_rmw!(@old __sync_fetch_and_xor_1, u8, |a: u8, b: u8| a ^ b); ++atomic_rmw!(@old __sync_fetch_and_xor_2, u16, |a: u16, b: u16| a ^ b); ++atomic_rmw!(@old __sync_fetch_and_xor_4, u32, |a: u32, b: u32| a ^ b); + +-atomic_rmw!(__sync_fetch_and_or_1, u8, |a: u8, b: u8| a | b); +-atomic_rmw!(__sync_fetch_and_or_2, u16, |a: u16, b: u16| a | b); +-atomic_rmw!(__sync_fetch_and_or_4, u32, |a: u32, b: u32| a | b); ++atomic_rmw!(@new __sync_xor_and_fetch_1, u8, |a: u8, b: u8| a ^ b); ++atomic_rmw!(@new __sync_xor_and_fetch_2, u16, |a: u16, b: u16| a ^ b); ++atomic_rmw!(@new __sync_xor_and_fetch_4, u32, |a: u32, b: u32| a ^ b); + +-atomic_rmw!(__sync_fetch_and_xor_1, u8, |a: u8, b: u8| a ^ b); +-atomic_rmw!(__sync_fetch_and_xor_2, u16, |a: u16, b: u16| a ^ b); +-atomic_rmw!(__sync_fetch_and_xor_4, u32, |a: u32, b: u32| a ^ b); ++atomic_rmw!(@old __sync_fetch_and_nand_1, u8, |a: u8, b: u8| !(a & b)); ++atomic_rmw!(@old __sync_fetch_and_nand_2, u16, |a: u16, b: u16| !(a & b)); ++atomic_rmw!(@old __sync_fetch_and_nand_4, u32, |a: u32, b: u32| !(a & b)); + +-atomic_rmw!(__sync_fetch_and_nand_1, u8, |a: u8, b: u8| !(a & b)); +-atomic_rmw!(__sync_fetch_and_nand_2, u16, |a: u16, b: u16| !(a & b)); +-atomic_rmw!(__sync_fetch_and_nand_4, u32, |a: u32, b: u32| !(a & b)); ++atomic_rmw!(@new __sync_nand_and_fetch_1, u8, |a: u8, b: u8| !(a & b)); ++atomic_rmw!(@new __sync_nand_and_fetch_2, u16, |a: u16, b: u16| !(a & b)); ++atomic_rmw!(@new __sync_nand_and_fetch_4, u32, |a: u32, b: u32| !(a & b)); + +-atomic_rmw!(__sync_fetch_and_max_1, i8, |a: i8, b: i8| if a > b { ++atomic_rmw!(@old __sync_fetch_and_max_1, i8, |a: i8, b: i8| if a > b { + a + } else { + b + }); +-atomic_rmw!(__sync_fetch_and_max_2, i16, |a: i16, b: i16| if a > b { ++atomic_rmw!(@old __sync_fetch_and_max_2, i16, |a: i16, b: i16| if a > b { + a + } else { + b + }); +-atomic_rmw!(__sync_fetch_and_max_4, i32, |a: i32, b: i32| if a > b { ++atomic_rmw!(@old __sync_fetch_and_max_4, i32, |a: i32, b: i32| if a > b { + a + } else { + b + }); + +-atomic_rmw!(__sync_fetch_and_umax_1, u8, |a: u8, b: u8| if a > b { ++atomic_rmw!(@old __sync_fetch_and_umax_1, u8, |a: u8, b: u8| if a > b { + a + } else { + b + }); +-atomic_rmw!(__sync_fetch_and_umax_2, u16, |a: u16, b: u16| if a > b { ++atomic_rmw!(@old __sync_fetch_and_umax_2, u16, |a: u16, b: u16| if a > b { + a + } else { + b + }); +-atomic_rmw!(__sync_fetch_and_umax_4, u32, |a: u32, b: u32| if a > b { ++atomic_rmw!(@old __sync_fetch_and_umax_4, u32, |a: u32, b: u32| if a > b { + a + } else { + b + }); + +-atomic_rmw!(__sync_fetch_and_min_1, i8, |a: i8, b: i8| if a < b { ++atomic_rmw!(@old __sync_fetch_and_min_1, i8, |a: i8, b: i8| if a < b { + a + } else { + b + }); +-atomic_rmw!(__sync_fetch_and_min_2, i16, |a: i16, b: i16| if a < b { ++atomic_rmw!(@old __sync_fetch_and_min_2, i16, |a: i16, b: i16| if a < b { + a + } else { + b + }); +-atomic_rmw!(__sync_fetch_and_min_4, i32, |a: i32, b: i32| if a < b { ++atomic_rmw!(@old __sync_fetch_and_min_4, i32, |a: i32, b: i32| if a < b { + a + } else { + b + }); + +-atomic_rmw!(__sync_fetch_and_umin_1, u8, |a: u8, b: u8| if a < b { ++atomic_rmw!(@old __sync_fetch_and_umin_1, u8, |a: u8, b: u8| if a < b { + a + } else { + b + }); +-atomic_rmw!(__sync_fetch_and_umin_2, u16, |a: u16, b: u16| if a < b { ++atomic_rmw!(@old __sync_fetch_and_umin_2, u16, |a: u16, b: u16| if a < b { + a + } else { + b + }); +-atomic_rmw!(__sync_fetch_and_umin_4, u32, |a: u32, b: u32| if a < b { ++atomic_rmw!(@old __sync_fetch_and_umin_4, u32, |a: u32, b: u32| if a < b { + a + } else { + b + }); + +-atomic_rmw!(__sync_lock_test_and_set_1, u8, |_: u8, b: u8| b); +-atomic_rmw!(__sync_lock_test_and_set_2, u16, |_: u16, b: u16| b); +-atomic_rmw!(__sync_lock_test_and_set_4, u32, |_: u32, b: u32| b); ++atomic_rmw!(@old __sync_lock_test_and_set_1, u8, |_: u8, b: u8| b); ++atomic_rmw!(@old __sync_lock_test_and_set_2, u16, |_: u16, b: u16| b); ++atomic_rmw!(@old __sync_lock_test_and_set_4, u32, |_: u32, b: u32| b); + + atomic_cmpxchg!(__sync_val_compare_and_swap_1, u8); + atomic_cmpxchg!(__sync_val_compare_and_swap_2, u16); +-- +2.39.0 + |