summaryrefslogtreecommitdiffstats
path: root/tests/run-make/atomic-lock-free
diff options
context:
space:
mode:
Diffstat (limited to 'tests/run-make/atomic-lock-free')
-rw-r--r--tests/run-make/atomic-lock-free/Makefile48
-rw-r--r--tests/run-make/atomic-lock-free/atomic_lock_free.rs66
2 files changed, 114 insertions, 0 deletions
diff --git a/tests/run-make/atomic-lock-free/Makefile b/tests/run-make/atomic-lock-free/Makefile
new file mode 100644
index 000000000..37e59624a
--- /dev/null
+++ b/tests/run-make/atomic-lock-free/Makefile
@@ -0,0 +1,48 @@
+include ../tools.mk
+
+# This tests ensure that atomic types are never lowered into runtime library calls that are not
+# guaranteed to be lock-free.
+
+all:
+ifeq ($(UNAME),Linux)
+ifeq ($(filter x86,$(LLVM_COMPONENTS)),x86)
+ $(RUSTC) --target=i686-unknown-linux-gnu atomic_lock_free.rs
+ nm "$(TMPDIR)/libatomic_lock_free.rlib" | $(CGREP) -v __atomic_fetch_add
+ $(RUSTC) --target=x86_64-unknown-linux-gnu atomic_lock_free.rs
+ nm "$(TMPDIR)/libatomic_lock_free.rlib" | $(CGREP) -v __atomic_fetch_add
+endif
+ifeq ($(filter arm,$(LLVM_COMPONENTS)),arm)
+ $(RUSTC) --target=arm-unknown-linux-gnueabi atomic_lock_free.rs
+ nm "$(TMPDIR)/libatomic_lock_free.rlib" | $(CGREP) -v __atomic_fetch_add
+ $(RUSTC) --target=arm-unknown-linux-gnueabihf atomic_lock_free.rs
+ nm "$(TMPDIR)/libatomic_lock_free.rlib" | $(CGREP) -v __atomic_fetch_add
+ $(RUSTC) --target=armv7-unknown-linux-gnueabihf atomic_lock_free.rs
+ nm "$(TMPDIR)/libatomic_lock_free.rlib" | $(CGREP) -v __atomic_fetch_add
+ $(RUSTC) --target=thumbv7neon-unknown-linux-gnueabihf atomic_lock_free.rs
+ nm "$(TMPDIR)/libatomic_lock_free.rlib" | $(CGREP) -v __atomic_fetch_add
+endif
+ifeq ($(filter aarch64,$(LLVM_COMPONENTS)),aarch64)
+ $(RUSTC) --target=aarch64-unknown-linux-gnu atomic_lock_free.rs
+ nm "$(TMPDIR)/libatomic_lock_free.rlib" | $(CGREP) -v __atomic_fetch_add
+endif
+ifeq ($(filter mips,$(LLVM_COMPONENTS)),mips)
+ $(RUSTC) --target=mips-unknown-linux-gnu atomic_lock_free.rs
+ nm "$(TMPDIR)/libatomic_lock_free.rlib" | $(CGREP) -v __atomic_fetch_add
+ $(RUSTC) --target=mipsel-unknown-linux-gnu atomic_lock_free.rs
+ nm "$(TMPDIR)/libatomic_lock_free.rlib" | $(CGREP) -v __atomic_fetch_add
+endif
+ifeq ($(filter powerpc,$(LLVM_COMPONENTS)),powerpc)
+ $(RUSTC) --target=powerpc-unknown-linux-gnu atomic_lock_free.rs
+ nm "$(TMPDIR)/libatomic_lock_free.rlib" | $(CGREP) -v __atomic_fetch_add
+ $(RUSTC) --target=powerpc-unknown-linux-gnuspe atomic_lock_free.rs
+ nm "$(TMPDIR)/libatomic_lock_free.rlib" | $(CGREP) -v __atomic_fetch_add
+ $(RUSTC) --target=powerpc64-unknown-linux-gnu atomic_lock_free.rs
+ nm "$(TMPDIR)/libatomic_lock_free.rlib" | $(CGREP) -v __atomic_fetch_add
+ $(RUSTC) --target=powerpc64le-unknown-linux-gnu atomic_lock_free.rs
+ nm "$(TMPDIR)/libatomic_lock_free.rlib" | $(CGREP) -v __atomic_fetch_add
+endif
+ifeq ($(filter systemz,$(LLVM_COMPONENTS)),systemz)
+ $(RUSTC) --target=s390x-unknown-linux-gnu atomic_lock_free.rs
+ nm "$(TMPDIR)/libatomic_lock_free.rlib" | $(CGREP) -v __atomic_fetch_add
+endif
+endif
diff --git a/tests/run-make/atomic-lock-free/atomic_lock_free.rs b/tests/run-make/atomic-lock-free/atomic_lock_free.rs
new file mode 100644
index 000000000..47d90b185
--- /dev/null
+++ b/tests/run-make/atomic-lock-free/atomic_lock_free.rs
@@ -0,0 +1,66 @@
+#![feature(no_core, intrinsics, lang_items)]
+#![crate_type="rlib"]
+#![no_core]
+
+extern "rust-intrinsic" {
+ fn atomic_xadd_seqcst<T>(dst: *mut T, src: T) -> T;
+}
+
+#[lang = "sized"]
+trait Sized {}
+#[lang = "copy"]
+trait Copy {}
+#[lang = "freeze"]
+trait Freeze {}
+
+impl<T: ?Sized> Copy for *mut T {}
+
+#[cfg(target_has_atomic = "8")]
+pub unsafe fn atomic_u8(x: *mut u8) {
+ atomic_xadd_seqcst(x, 1);
+ atomic_xadd_seqcst(x, 1);
+}
+#[cfg(target_has_atomic = "8")]
+pub unsafe fn atomic_i8(x: *mut i8) {
+ atomic_xadd_seqcst(x, 1);
+}
+#[cfg(target_has_atomic = "16")]
+pub unsafe fn atomic_u16(x: *mut u16) {
+ atomic_xadd_seqcst(x, 1);
+}
+#[cfg(target_has_atomic = "16")]
+pub unsafe fn atomic_i16(x: *mut i16) {
+ atomic_xadd_seqcst(x, 1);
+}
+#[cfg(target_has_atomic = "32")]
+pub unsafe fn atomic_u32(x: *mut u32) {
+ atomic_xadd_seqcst(x, 1);
+}
+#[cfg(target_has_atomic = "32")]
+pub unsafe fn atomic_i32(x: *mut i32) {
+ atomic_xadd_seqcst(x, 1);
+}
+#[cfg(target_has_atomic = "64")]
+pub unsafe fn atomic_u64(x: *mut u64) {
+ atomic_xadd_seqcst(x, 1);
+}
+#[cfg(target_has_atomic = "64")]
+pub unsafe fn atomic_i64(x: *mut i64) {
+ atomic_xadd_seqcst(x, 1);
+}
+#[cfg(target_has_atomic = "128")]
+pub unsafe fn atomic_u128(x: *mut u128) {
+ atomic_xadd_seqcst(x, 1);
+}
+#[cfg(target_has_atomic = "128")]
+pub unsafe fn atomic_i128(x: *mut i128) {
+ atomic_xadd_seqcst(x, 1);
+}
+#[cfg(target_has_atomic = "ptr")]
+pub unsafe fn atomic_usize(x: *mut usize) {
+ atomic_xadd_seqcst(x, 1);
+}
+#[cfg(target_has_atomic = "ptr")]
+pub unsafe fn atomic_isize(x: *mut isize) {
+ atomic_xadd_seqcst(x, 1);
+}