From 698f8c2f01ea549d77d7dc3338a12e04c11057b9 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 17 Apr 2024 14:02:58 +0200 Subject: Adding upstream version 1.64.0+dfsg1. Signed-off-by: Daniel Baumann --- src/test/assembly/nvptx-atomics.rs | 86 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 src/test/assembly/nvptx-atomics.rs (limited to 'src/test/assembly/nvptx-atomics.rs') diff --git a/src/test/assembly/nvptx-atomics.rs b/src/test/assembly/nvptx-atomics.rs new file mode 100644 index 000000000..f96398064 --- /dev/null +++ b/src/test/assembly/nvptx-atomics.rs @@ -0,0 +1,86 @@ +// assembly-output: ptx-linker +// compile-flags: --crate-type cdylib +// only-nvptx64 +// ignore-nvptx64 + +#![feature(abi_ptx, core_intrinsics)] +#![no_std] + +use core::intrinsics::*; + +// aux-build: breakpoint-panic-handler.rs +extern crate breakpoint_panic_handler; + +// Currently, LLVM NVPTX backend can only emit atomic instructions with +// `relaxed` (PTX default) ordering. But it's also useful to make sure +// the backend won't fail with other orders. Apparently, the backend +// doesn't support fences as well. As a workaround `llvm.nvvm.membar.*` +// could work, and perhaps on the long run, all the atomic operations +// should rather be provided by `core::arch::nvptx`. + +// Also, PTX ISA doesn't have atomic `load`, `store` and `nand`. + +// FIXME(denzp): add tests for `core::sync::atomic::*`. + +#[no_mangle] +pub unsafe extern "ptx-kernel" fn atomics_kernel(a: *mut u32) { + // CHECK: atom.global.and.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; + // CHECK: atom.global.and.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; + atomic_and(a, 1); + atomic_and_relaxed(a, 1); + + // CHECK: atom.global.cas.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1, 2; + // CHECK: atom.global.cas.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1, 2; + atomic_cxchg(a, 1, 2); + atomic_cxchg_relaxed(a, 1, 2); + + // CHECK: atom.global.max.s32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; + // CHECK: atom.global.max.s32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; + atomic_max(a, 1); + atomic_max_relaxed(a, 1); + + // CHECK: atom.global.min.s32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; + // CHECK: atom.global.min.s32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; + atomic_min(a, 1); + atomic_min_relaxed(a, 1); + + // CHECK: atom.global.or.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; + // CHECK: atom.global.or.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; + atomic_or(a, 1); + atomic_or_relaxed(a, 1); + + // CHECK: atom.global.max.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; + // CHECK: atom.global.max.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; + atomic_umax(a, 1); + atomic_umax_relaxed(a, 1); + + // CHECK: atom.global.min.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; + // CHECK: atom.global.min.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; + atomic_umin(a, 1); + atomic_umin_relaxed(a, 1); + + // CHECK: atom.global.add.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; + // CHECK: atom.global.add.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; + atomic_xadd(a, 1); + atomic_xadd_relaxed(a, 1); + + // CHECK: atom.global.exch.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; + // CHECK: atom.global.exch.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; + atomic_xchg(a, 1); + atomic_xchg_relaxed(a, 1); + + // CHECK: atom.global.xor.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; + // CHECK: atom.global.xor.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; + atomic_xor(a, 1); + atomic_xor_relaxed(a, 1); + + // CHECK: mov.u32 %[[sub_0_arg:r[0-9]+]], 100; + // CHECK: neg.s32 temp, %[[sub_0_arg]]; + // CHECK: atom.global.add.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], temp; + atomic_xsub(a, 100); + + // CHECK: mov.u32 %[[sub_1_arg:r[0-9]+]], 200; + // CHECK: neg.s32 temp, %[[sub_1_arg]]; + // CHECK: atom.global.add.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], temp; + atomic_xsub_relaxed(a, 200); +} -- cgit v1.2.3