diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /tools/testing/selftests/rseq | |
parent | Initial commit. (diff) | |
download | linux-upstream/5.10.209.tar.xz linux-upstream/5.10.209.zip |
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
22 files changed, 7859 insertions, 0 deletions
diff --git a/tools/testing/selftests/rseq/.gitignore b/tools/testing/selftests/rseq/.gitignore new file mode 100644 index 000000000..5910888eb --- /dev/null +++ b/tools/testing/selftests/rseq/.gitignore @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +basic_percpu_ops_test +basic_test +basic_rseq_op_test +param_test +param_test_benchmark +param_test_compare_twice diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile new file mode 100644 index 000000000..82ceca6aa --- /dev/null +++ b/tools/testing/selftests/rseq/Makefile @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: GPL-2.0+ OR MIT + +ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) +CLANG_FLAGS += -no-integrated-as +endif + +top_srcdir = ../../../.. + +CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L$(OUTPUT) -Wl,-rpath=./ \ + $(CLANG_FLAGS) -I$(top_srcdir)/tools/include +LDLIBS += -lpthread -ldl + +# Own dependencies because we only want to build against 1st prerequisite, but +# still track changes to header files and depend on shared object. +OVERRIDE_TARGETS = 1 + +TEST_GEN_PROGS = basic_test basic_percpu_ops_test param_test \ + param_test_benchmark param_test_compare_twice + +TEST_GEN_PROGS_EXTENDED = librseq.so + +TEST_PROGS = run_param_test.sh + +TEST_FILES := settings + +include ../lib.mk + +$(OUTPUT)/librseq.so: rseq.c rseq.h rseq-*.h + $(CC) $(CFLAGS) -shared -fPIC $< $(LDLIBS) -o $@ + +$(OUTPUT)/%: %.c $(TEST_GEN_PROGS_EXTENDED) rseq.h rseq-*.h + $(CC) $(CFLAGS) $< $(LDLIBS) -lrseq -o $@ + +$(OUTPUT)/param_test_benchmark: param_test.c $(TEST_GEN_PROGS_EXTENDED) \ + rseq.h rseq-*.h + $(CC) $(CFLAGS) -DBENCHMARK $< $(LDLIBS) -lrseq -o $@ + +$(OUTPUT)/param_test_compare_twice: param_test.c $(TEST_GEN_PROGS_EXTENDED) \ + rseq.h rseq-*.h + $(CC) $(CFLAGS) -DRSEQ_COMPARE_TWICE $< $(LDLIBS) -lrseq -o $@ diff --git a/tools/testing/selftests/rseq/basic_percpu_ops_test.c b/tools/testing/selftests/rseq/basic_percpu_ops_test.c new file mode 100644 index 000000000..517756afc --- /dev/null +++ b/tools/testing/selftests/rseq/basic_percpu_ops_test.c @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: LGPL-2.1 +#define _GNU_SOURCE +#include <assert.h> +#include <pthread.h> +#include <sched.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <stddef.h> + +#include "../kselftest.h" +#include "rseq.h" + +struct percpu_lock_entry { + intptr_t v; +} __attribute__((aligned(128))); + +struct percpu_lock { + struct percpu_lock_entry c[CPU_SETSIZE]; +}; + +struct test_data_entry { + intptr_t count; +} __attribute__((aligned(128))); + +struct spinlock_test_data { + struct percpu_lock lock; + struct test_data_entry c[CPU_SETSIZE]; + int reps; +}; + +struct percpu_list_node { + intptr_t data; + struct percpu_list_node *next; +}; + +struct percpu_list_entry { + struct percpu_list_node *head; +} __attribute__((aligned(128))); + +struct percpu_list { + struct percpu_list_entry c[CPU_SETSIZE]; +}; + +/* A simple percpu spinlock. Returns the cpu lock was acquired on. */ +int rseq_this_cpu_lock(struct percpu_lock *lock) +{ + int cpu; + + for (;;) { + int ret; + + cpu = rseq_cpu_start(); + ret = rseq_cmpeqv_storev(&lock->c[cpu].v, + 0, 1, cpu); + if (rseq_likely(!ret)) + break; + /* Retry if comparison fails or rseq aborts. */ + } + /* + * Acquire semantic when taking lock after control dependency. + * Matches rseq_smp_store_release(). + */ + rseq_smp_acquire__after_ctrl_dep(); + return cpu; +} + +void rseq_percpu_unlock(struct percpu_lock *lock, int cpu) +{ + assert(lock->c[cpu].v == 1); + /* + * Release lock, with release semantic. Matches + * rseq_smp_acquire__after_ctrl_dep(). + */ + rseq_smp_store_release(&lock->c[cpu].v, 0); +} + +void *test_percpu_spinlock_thread(void *arg) +{ + struct spinlock_test_data *data = arg; + int i, cpu; + + if (rseq_register_current_thread()) { + fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n", + errno, strerror(errno)); + abort(); + } + for (i = 0; i < data->reps; i++) { + cpu = rseq_this_cpu_lock(&data->lock); + data->c[cpu].count++; + rseq_percpu_unlock(&data->lock, cpu); + } + if (rseq_unregister_current_thread()) { + fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n", + errno, strerror(errno)); + abort(); + } + + return NULL; +} + +/* + * A simple test which implements a sharded counter using a per-cpu + * lock. Obviously real applications might prefer to simply use a + * per-cpu increment; however, this is reasonable for a test and the + * lock can be extended to synchronize more complicated operations. + */ +void test_percpu_spinlock(void) +{ + const int num_threads = 200; + int i; + uint64_t sum; + pthread_t test_threads[num_threads]; + struct spinlock_test_data data; + + memset(&data, 0, sizeof(data)); + data.reps = 5000; + + for (i = 0; i < num_threads; i++) + pthread_create(&test_threads[i], NULL, + test_percpu_spinlock_thread, &data); + + for (i = 0; i < num_threads; i++) + pthread_join(test_threads[i], NULL); + + sum = 0; + for (i = 0; i < CPU_SETSIZE; i++) + sum += data.c[i].count; + + assert(sum == (uint64_t)data.reps * num_threads); +} + +void this_cpu_list_push(struct percpu_list *list, + struct percpu_list_node *node, + int *_cpu) +{ + int cpu; + + for (;;) { + intptr_t *targetptr, newval, expect; + int ret; + + cpu = rseq_cpu_start(); + /* Load list->c[cpu].head with single-copy atomicity. */ + expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head); + newval = (intptr_t)node; + targetptr = (intptr_t *)&list->c[cpu].head; + node->next = (struct percpu_list_node *)expect; + ret = rseq_cmpeqv_storev(targetptr, expect, newval, cpu); + if (rseq_likely(!ret)) + break; + /* Retry if comparison fails or rseq aborts. */ + } + if (_cpu) + *_cpu = cpu; +} + +/* + * Unlike a traditional lock-less linked list; the availability of a + * rseq primitive allows us to implement pop without concerns over + * ABA-type races. + */ +struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list, + int *_cpu) +{ + for (;;) { + struct percpu_list_node *head; + intptr_t *targetptr, expectnot, *load; + long offset; + int ret, cpu; + + cpu = rseq_cpu_start(); + targetptr = (intptr_t *)&list->c[cpu].head; + expectnot = (intptr_t)NULL; + offset = offsetof(struct percpu_list_node, next); + load = (intptr_t *)&head; + ret = rseq_cmpnev_storeoffp_load(targetptr, expectnot, + offset, load, cpu); + if (rseq_likely(!ret)) { + if (_cpu) + *_cpu = cpu; + return head; + } + if (ret > 0) + return NULL; + /* Retry if rseq aborts. */ + } +} + +/* + * __percpu_list_pop is not safe against concurrent accesses. Should + * only be used on lists that are not concurrently modified. + */ +struct percpu_list_node *__percpu_list_pop(struct percpu_list *list, int cpu) +{ + struct percpu_list_node *node; + + node = list->c[cpu].head; + if (!node) + return NULL; + list->c[cpu].head = node->next; + return node; +} + +void *test_percpu_list_thread(void *arg) +{ + int i; + struct percpu_list *list = (struct percpu_list *)arg; + + if (rseq_register_current_thread()) { + fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n", + errno, strerror(errno)); + abort(); + } + + for (i = 0; i < 100000; i++) { + struct percpu_list_node *node; + + node = this_cpu_list_pop(list, NULL); + sched_yield(); /* encourage shuffling */ + if (node) + this_cpu_list_push(list, node, NULL); + } + + if (rseq_unregister_current_thread()) { + fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n", + errno, strerror(errno)); + abort(); + } + + return NULL; +} + +/* Simultaneous modification to a per-cpu linked list from many threads. */ +void test_percpu_list(void) +{ + int i, j; + uint64_t sum = 0, expected_sum = 0; + struct percpu_list list; + pthread_t test_threads[200]; + cpu_set_t allowed_cpus; + + memset(&list, 0, sizeof(list)); + + /* Generate list entries for every usable cpu. */ + sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus); + for (i = 0; i < CPU_SETSIZE; i++) { + if (!CPU_ISSET(i, &allowed_cpus)) + continue; + for (j = 1; j <= 100; j++) { + struct percpu_list_node *node; + + expected_sum += j; + + node = malloc(sizeof(*node)); + assert(node); + node->data = j; + node->next = list.c[i].head; + list.c[i].head = node; + } + } + + for (i = 0; i < 200; i++) + pthread_create(&test_threads[i], NULL, + test_percpu_list_thread, &list); + + for (i = 0; i < 200; i++) + pthread_join(test_threads[i], NULL); + + for (i = 0; i < CPU_SETSIZE; i++) { + struct percpu_list_node *node; + + if (!CPU_ISSET(i, &allowed_cpus)) + continue; + + while ((node = __percpu_list_pop(&list, i))) { + sum += node->data; + free(node); + } + } + + /* + * All entries should now be accounted for (unless some external + * actor is interfering with our allowed affinity while this + * test is running). + */ + assert(sum == expected_sum); +} + +int main(int argc, char **argv) +{ + if (rseq_register_current_thread()) { + fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n", + errno, strerror(errno)); + goto error; + } + printf("spinlock\n"); + test_percpu_spinlock(); + printf("percpu_list\n"); + test_percpu_list(); + if (rseq_unregister_current_thread()) { + fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n", + errno, strerror(errno)); + goto error; + } + return 0; + +error: + return -1; +} diff --git a/tools/testing/selftests/rseq/basic_test.c b/tools/testing/selftests/rseq/basic_test.c new file mode 100644 index 000000000..d8efbfb89 --- /dev/null +++ b/tools/testing/selftests/rseq/basic_test.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: LGPL-2.1 +/* + * Basic test coverage for critical regions and rseq_current_cpu(). + */ + +#define _GNU_SOURCE +#include <assert.h> +#include <sched.h> +#include <signal.h> +#include <stdio.h> +#include <string.h> +#include <sys/time.h> + +#include "rseq.h" + +void test_cpu_pointer(void) +{ + cpu_set_t affinity, test_affinity; + int i; + + sched_getaffinity(0, sizeof(affinity), &affinity); + CPU_ZERO(&test_affinity); + for (i = 0; i < CPU_SETSIZE; i++) { + if (CPU_ISSET(i, &affinity)) { + CPU_SET(i, &test_affinity); + sched_setaffinity(0, sizeof(test_affinity), + &test_affinity); + assert(sched_getcpu() == i); + assert(rseq_current_cpu() == i); + assert(rseq_current_cpu_raw() == i); + assert(rseq_cpu_start() == i); + CPU_CLR(i, &test_affinity); + } + } + sched_setaffinity(0, sizeof(affinity), &affinity); +} + +int main(int argc, char **argv) +{ + if (rseq_register_current_thread()) { + fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n", + errno, strerror(errno)); + goto init_thread_error; + } + printf("testing current cpu\n"); + test_cpu_pointer(); + if (rseq_unregister_current_thread()) { + fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n", + errno, strerror(errno)); + goto init_thread_error; + } + return 0; + +init_thread_error: + return -1; +} diff --git a/tools/testing/selftests/rseq/compiler.h b/tools/testing/selftests/rseq/compiler.h new file mode 100644 index 000000000..876eb6a7f --- /dev/null +++ b/tools/testing/selftests/rseq/compiler.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: LGPL-2.1-only OR MIT */ +/* + * rseq/compiler.h + * + * Work-around asm goto compiler bugs. + * + * (C) Copyright 2021 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> + */ + +#ifndef RSEQ_COMPILER_H +#define RSEQ_COMPILER_H + +/* + * gcc prior to 4.8.2 miscompiles asm goto. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 + * + * gcc prior to 8.1.0 miscompiles asm goto at O1. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103908 + * + * clang prior to version 13.0.1 miscompiles asm goto at O2. + * https://github.com/llvm/llvm-project/issues/52735 + * + * Work around these issues by adding a volatile inline asm with + * memory clobber in the fallthrough after the asm goto and at each + * label target. Emit this for all compilers in case other similar + * issues are found in the future. + */ +#define rseq_after_asm_goto() asm volatile ("" : : : "memory") + +#endif /* RSEQ_COMPILER_H_ */ diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c new file mode 100644 index 000000000..e29ecc715 --- /dev/null +++ b/tools/testing/selftests/rseq/param_test.c @@ -0,0 +1,1550 @@ +// SPDX-License-Identifier: LGPL-2.1 +#define _GNU_SOURCE +#include <assert.h> +#include <linux/membarrier.h> +#include <pthread.h> +#include <sched.h> +#include <stdatomic.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <syscall.h> +#include <unistd.h> +#include <poll.h> +#include <sys/types.h> +#include <signal.h> +#include <errno.h> +#include <stddef.h> + +static inline pid_t rseq_gettid(void) +{ + return syscall(__NR_gettid); +} + +#define NR_INJECT 9 +static int loop_cnt[NR_INJECT + 1]; + +static int loop_cnt_1 asm("asm_loop_cnt_1") __attribute__((used)); +static int loop_cnt_2 asm("asm_loop_cnt_2") __attribute__((used)); +static int loop_cnt_3 asm("asm_loop_cnt_3") __attribute__((used)); +static int loop_cnt_4 asm("asm_loop_cnt_4") __attribute__((used)); +static int loop_cnt_5 asm("asm_loop_cnt_5") __attribute__((used)); +static int loop_cnt_6 asm("asm_loop_cnt_6") __attribute__((used)); + +static int opt_modulo, verbose; + +static int opt_yield, opt_signal, opt_sleep, + opt_disable_rseq, opt_threads = 200, + opt_disable_mod = 0, opt_test = 's', opt_mb = 0; + +#ifndef RSEQ_SKIP_FASTPATH +static long long opt_reps = 5000; +#else +static long long opt_reps = 100; +#endif + +static __thread __attribute__((tls_model("initial-exec"))) +unsigned int signals_delivered; + +#ifndef BENCHMARK + +static __thread __attribute__((tls_model("initial-exec"), unused)) +unsigned int yield_mod_cnt, nr_abort; + +#define printf_verbose(fmt, ...) \ + do { \ + if (verbose) \ + printf(fmt, ## __VA_ARGS__); \ + } while (0) + +#ifdef __i386__ + +#define INJECT_ASM_REG "eax" + +#define RSEQ_INJECT_CLOBBER \ + , INJECT_ASM_REG + +#define RSEQ_INJECT_ASM(n) \ + "mov asm_loop_cnt_" #n ", %%" INJECT_ASM_REG "\n\t" \ + "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \ + "jz 333f\n\t" \ + "222:\n\t" \ + "dec %%" INJECT_ASM_REG "\n\t" \ + "jnz 222b\n\t" \ + "333:\n\t" + +#elif defined(__x86_64__) + +#define INJECT_ASM_REG_P "rax" +#define INJECT_ASM_REG "eax" + +#define RSEQ_INJECT_CLOBBER \ + , INJECT_ASM_REG_P \ + , INJECT_ASM_REG + +#define RSEQ_INJECT_ASM(n) \ + "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG_P "\n\t" \ + "mov (%%" INJECT_ASM_REG_P "), %%" INJECT_ASM_REG "\n\t" \ + "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \ + "jz 333f\n\t" \ + "222:\n\t" \ + "dec %%" INJECT_ASM_REG "\n\t" \ + "jnz 222b\n\t" \ + "333:\n\t" + +#elif defined(__s390__) + +#define RSEQ_INJECT_INPUT \ + , [loop_cnt_1]"m"(loop_cnt[1]) \ + , [loop_cnt_2]"m"(loop_cnt[2]) \ + , [loop_cnt_3]"m"(loop_cnt[3]) \ + , [loop_cnt_4]"m"(loop_cnt[4]) \ + , [loop_cnt_5]"m"(loop_cnt[5]) \ + , [loop_cnt_6]"m"(loop_cnt[6]) + +#define INJECT_ASM_REG "r12" + +#define RSEQ_INJECT_CLOBBER \ + , INJECT_ASM_REG + +#define RSEQ_INJECT_ASM(n) \ + "l %%" INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \ + "ltr %%" INJECT_ASM_REG ", %%" INJECT_ASM_REG "\n\t" \ + "je 333f\n\t" \ + "222:\n\t" \ + "ahi %%" INJECT_ASM_REG ", -1\n\t" \ + "jnz 222b\n\t" \ + "333:\n\t" + +#elif defined(__ARMEL__) + +#define RSEQ_INJECT_INPUT \ + , [loop_cnt_1]"m"(loop_cnt[1]) \ + , [loop_cnt_2]"m"(loop_cnt[2]) \ + , [loop_cnt_3]"m"(loop_cnt[3]) \ + , [loop_cnt_4]"m"(loop_cnt[4]) \ + , [loop_cnt_5]"m"(loop_cnt[5]) \ + , [loop_cnt_6]"m"(loop_cnt[6]) + +#define INJECT_ASM_REG "r4" + +#define RSEQ_INJECT_CLOBBER \ + , INJECT_ASM_REG + +#define RSEQ_INJECT_ASM(n) \ + "ldr " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \ + "cmp " INJECT_ASM_REG ", #0\n\t" \ + "beq 333f\n\t" \ + "222:\n\t" \ + "subs " INJECT_ASM_REG ", #1\n\t" \ + "bne 222b\n\t" \ + "333:\n\t" + +#elif defined(__AARCH64EL__) + +#define RSEQ_INJECT_INPUT \ + , [loop_cnt_1] "Qo" (loop_cnt[1]) \ + , [loop_cnt_2] "Qo" (loop_cnt[2]) \ + , [loop_cnt_3] "Qo" (loop_cnt[3]) \ + , [loop_cnt_4] "Qo" (loop_cnt[4]) \ + , [loop_cnt_5] "Qo" (loop_cnt[5]) \ + , [loop_cnt_6] "Qo" (loop_cnt[6]) + +#define INJECT_ASM_REG RSEQ_ASM_TMP_REG32 + +#define RSEQ_INJECT_ASM(n) \ + " ldr " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n" \ + " cbz " INJECT_ASM_REG ", 333f\n" \ + "222:\n" \ + " sub " INJECT_ASM_REG ", " INJECT_ASM_REG ", #1\n" \ + " cbnz " INJECT_ASM_REG ", 222b\n" \ + "333:\n" + +#elif defined(__PPC__) + +#define RSEQ_INJECT_INPUT \ + , [loop_cnt_1]"m"(loop_cnt[1]) \ + , [loop_cnt_2]"m"(loop_cnt[2]) \ + , [loop_cnt_3]"m"(loop_cnt[3]) \ + , [loop_cnt_4]"m"(loop_cnt[4]) \ + , [loop_cnt_5]"m"(loop_cnt[5]) \ + , [loop_cnt_6]"m"(loop_cnt[6]) + +#define INJECT_ASM_REG "r18" + +#define RSEQ_INJECT_CLOBBER \ + , INJECT_ASM_REG + +#define RSEQ_INJECT_ASM(n) \ + "lwz %%" INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \ + "cmpwi %%" INJECT_ASM_REG ", 0\n\t" \ + "beq 333f\n\t" \ + "222:\n\t" \ + "subic. %%" INJECT_ASM_REG ", %%" INJECT_ASM_REG ", 1\n\t" \ + "bne 222b\n\t" \ + "333:\n\t" + +#elif defined(__mips__) + +#define RSEQ_INJECT_INPUT \ + , [loop_cnt_1]"m"(loop_cnt[1]) \ + , [loop_cnt_2]"m"(loop_cnt[2]) \ + , [loop_cnt_3]"m"(loop_cnt[3]) \ + , [loop_cnt_4]"m"(loop_cnt[4]) \ + , [loop_cnt_5]"m"(loop_cnt[5]) \ + , [loop_cnt_6]"m"(loop_cnt[6]) + +#define INJECT_ASM_REG "$5" + +#define RSEQ_INJECT_CLOBBER \ + , INJECT_ASM_REG + +#define RSEQ_INJECT_ASM(n) \ + "lw " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \ + "beqz " INJECT_ASM_REG ", 333f\n\t" \ + "222:\n\t" \ + "addiu " INJECT_ASM_REG ", -1\n\t" \ + "bnez " INJECT_ASM_REG ", 222b\n\t" \ + "333:\n\t" + +#else +#error unsupported target +#endif + +#define RSEQ_INJECT_FAILED \ + nr_abort++; + +#define RSEQ_INJECT_C(n) \ +{ \ + int loc_i, loc_nr_loops = loop_cnt[n]; \ + \ + for (loc_i = 0; loc_i < loc_nr_loops; loc_i++) { \ + rseq_barrier(); \ + } \ + if (loc_nr_loops == -1 && opt_modulo) { \ + if (yield_mod_cnt == opt_modulo - 1) { \ + if (opt_sleep > 0) \ + poll(NULL, 0, opt_sleep); \ + if (opt_yield) \ + sched_yield(); \ + if (opt_signal) \ + raise(SIGUSR1); \ + yield_mod_cnt = 0; \ + } else { \ + yield_mod_cnt++; \ + } \ + } \ +} + +#else + +#define printf_verbose(fmt, ...) + +#endif /* BENCHMARK */ + +#include "rseq.h" + +struct percpu_lock_entry { + intptr_t v; +} __attribute__((aligned(128))); + +struct percpu_lock { + struct percpu_lock_entry c[CPU_SETSIZE]; +}; + +struct test_data_entry { + intptr_t count; +} __attribute__((aligned(128))); + +struct spinlock_test_data { + struct percpu_lock lock; + struct test_data_entry c[CPU_SETSIZE]; +}; + +struct spinlock_thread_test_data { + struct spinlock_test_data *data; + long long reps; + int reg; +}; + +struct inc_test_data { + struct test_data_entry c[CPU_SETSIZE]; +}; + +struct inc_thread_test_data { + struct inc_test_data *data; + long long reps; + int reg; +}; + +struct percpu_list_node { + intptr_t data; + struct percpu_list_node *next; +}; + +struct percpu_list_entry { + struct percpu_list_node *head; +} __attribute__((aligned(128))); + +struct percpu_list { + struct percpu_list_entry c[CPU_SETSIZE]; +}; + +#define BUFFER_ITEM_PER_CPU 100 + +struct percpu_buffer_node { + intptr_t data; +}; + +struct percpu_buffer_entry { + intptr_t offset; + intptr_t buflen; + struct percpu_buffer_node **array; +} __attribute__((aligned(128))); + +struct percpu_buffer { + struct percpu_buffer_entry c[CPU_SETSIZE]; +}; + +#define MEMCPY_BUFFER_ITEM_PER_CPU 100 + +struct percpu_memcpy_buffer_node { + intptr_t data1; + uint64_t data2; +}; + +struct percpu_memcpy_buffer_entry { + intptr_t offset; + intptr_t buflen; + struct percpu_memcpy_buffer_node *array; +} __attribute__((aligned(128))); + +struct percpu_memcpy_buffer { + struct percpu_memcpy_buffer_entry c[CPU_SETSIZE]; +}; + +/* A simple percpu spinlock. Grabs lock on current cpu. */ +static int rseq_this_cpu_lock(struct percpu_lock *lock) +{ + int cpu; + + for (;;) { + int ret; + + cpu = rseq_cpu_start(); + ret = rseq_cmpeqv_storev(&lock->c[cpu].v, + 0, 1, cpu); + if (rseq_likely(!ret)) + break; + /* Retry if comparison fails or rseq aborts. */ + } + /* + * Acquire semantic when taking lock after control dependency. + * Matches rseq_smp_store_release(). + */ + rseq_smp_acquire__after_ctrl_dep(); + return cpu; +} + +static void rseq_percpu_unlock(struct percpu_lock *lock, int cpu) +{ + assert(lock->c[cpu].v == 1); + /* + * Release lock, with release semantic. Matches + * rseq_smp_acquire__after_ctrl_dep(). + */ + rseq_smp_store_release(&lock->c[cpu].v, 0); +} + +void *test_percpu_spinlock_thread(void *arg) +{ + struct spinlock_thread_test_data *thread_data = arg; + struct spinlock_test_data *data = thread_data->data; + long long i, reps; + + if (!opt_disable_rseq && thread_data->reg && + rseq_register_current_thread()) + abort(); + reps = thread_data->reps; + for (i = 0; i < reps; i++) { + int cpu = rseq_this_cpu_lock(&data->lock); + data->c[cpu].count++; + rseq_percpu_unlock(&data->lock, cpu); +#ifndef BENCHMARK + if (i != 0 && !(i % (reps / 10))) + printf_verbose("tid %d: count %lld\n", + (int) rseq_gettid(), i); +#endif + } + printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", + (int) rseq_gettid(), nr_abort, signals_delivered); + if (!opt_disable_rseq && thread_data->reg && + rseq_unregister_current_thread()) + abort(); + return NULL; +} + +/* + * A simple test which implements a sharded counter using a per-cpu + * lock. Obviously real applications might prefer to simply use a + * per-cpu increment; however, this is reasonable for a test and the + * lock can be extended to synchronize more complicated operations. + */ +void test_percpu_spinlock(void) +{ + const int num_threads = opt_threads; + int i, ret; + uint64_t sum; + pthread_t test_threads[num_threads]; + struct spinlock_test_data data; + struct spinlock_thread_test_data thread_data[num_threads]; + + memset(&data, 0, sizeof(data)); + for (i = 0; i < num_threads; i++) { + thread_data[i].reps = opt_reps; + if (opt_disable_mod <= 0 || (i % opt_disable_mod)) + thread_data[i].reg = 1; + else + thread_data[i].reg = 0; + thread_data[i].data = &data; + ret = pthread_create(&test_threads[i], NULL, + test_percpu_spinlock_thread, + &thread_data[i]); + if (ret) { + errno = ret; + perror("pthread_create"); + abort(); + } + } + + for (i = 0; i < num_threads; i++) { + ret = pthread_join(test_threads[i], NULL); + if (ret) { + errno = ret; + perror("pthread_join"); + abort(); + } + } + + sum = 0; + for (i = 0; i < CPU_SETSIZE; i++) + sum += data.c[i].count; + + assert(sum == (uint64_t)opt_reps * num_threads); +} + +void *test_percpu_inc_thread(void *arg) +{ + struct inc_thread_test_data *thread_data = arg; + struct inc_test_data *data = thread_data->data; + long long i, reps; + + if (!opt_disable_rseq && thread_data->reg && + rseq_register_current_thread()) + abort(); + reps = thread_data->reps; + for (i = 0; i < reps; i++) { + int ret; + + do { + int cpu; + + cpu = rseq_cpu_start(); + ret = rseq_addv(&data->c[cpu].count, 1, cpu); + } while (rseq_unlikely(ret)); +#ifndef BENCHMARK + if (i != 0 && !(i % (reps / 10))) + printf_verbose("tid %d: count %lld\n", + (int) rseq_gettid(), i); +#endif + } + printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", + (int) rseq_gettid(), nr_abort, signals_delivered); + if (!opt_disable_rseq && thread_data->reg && + rseq_unregister_current_thread()) + abort(); + return NULL; +} + +void test_percpu_inc(void) +{ + const int num_threads = opt_threads; + int i, ret; + uint64_t sum; + pthread_t test_threads[num_threads]; + struct inc_test_data data; + struct inc_thread_test_data thread_data[num_threads]; + + memset(&data, 0, sizeof(data)); + for (i = 0; i < num_threads; i++) { + thread_data[i].reps = opt_reps; + if (opt_disable_mod <= 0 || (i % opt_disable_mod)) + thread_data[i].reg = 1; + else + thread_data[i].reg = 0; + thread_data[i].data = &data; + ret = pthread_create(&test_threads[i], NULL, + test_percpu_inc_thread, + &thread_data[i]); + if (ret) { + errno = ret; + perror("pthread_create"); + abort(); + } + } + + for (i = 0; i < num_threads; i++) { + ret = pthread_join(test_threads[i], NULL); + if (ret) { + errno = ret; + perror("pthread_join"); + abort(); + } + } + + sum = 0; + for (i = 0; i < CPU_SETSIZE; i++) + sum += data.c[i].count; + + assert(sum == (uint64_t)opt_reps * num_threads); +} + +void this_cpu_list_push(struct percpu_list *list, + struct percpu_list_node *node, + int *_cpu) +{ + int cpu; + + for (;;) { + intptr_t *targetptr, newval, expect; + int ret; + + cpu = rseq_cpu_start(); + /* Load list->c[cpu].head with single-copy atomicity. */ + expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head); + newval = (intptr_t)node; + targetptr = (intptr_t *)&list->c[cpu].head; + node->next = (struct percpu_list_node *)expect; + ret = rseq_cmpeqv_storev(targetptr, expect, newval, cpu); + if (rseq_likely(!ret)) + break; + /* Retry if comparison fails or rseq aborts. */ + } + if (_cpu) + *_cpu = cpu; +} + +/* + * Unlike a traditional lock-less linked list; the availability of a + * rseq primitive allows us to implement pop without concerns over + * ABA-type races. + */ +struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list, + int *_cpu) +{ + struct percpu_list_node *node = NULL; + int cpu; + + for (;;) { + struct percpu_list_node *head; + intptr_t *targetptr, expectnot, *load; + long offset; + int ret; + + cpu = rseq_cpu_start(); + targetptr = (intptr_t *)&list->c[cpu].head; + expectnot = (intptr_t)NULL; + offset = offsetof(struct percpu_list_node, next); + load = (intptr_t *)&head; + ret = rseq_cmpnev_storeoffp_load(targetptr, expectnot, + offset, load, cpu); + if (rseq_likely(!ret)) { + node = head; + break; + } + if (ret > 0) + break; + /* Retry if rseq aborts. */ + } + if (_cpu) + *_cpu = cpu; + return node; +} + +/* + * __percpu_list_pop is not safe against concurrent accesses. Should + * only be used on lists that are not concurrently modified. + */ +struct percpu_list_node *__percpu_list_pop(struct percpu_list *list, int cpu) +{ + struct percpu_list_node *node; + + node = list->c[cpu].head; + if (!node) + return NULL; + list->c[cpu].head = node->next; + return node; +} + +void *test_percpu_list_thread(void *arg) +{ + long long i, reps; + struct percpu_list *list = (struct percpu_list *)arg; + + if (!opt_disable_rseq && rseq_register_current_thread()) + abort(); + + reps = opt_reps; + for (i = 0; i < reps; i++) { + struct percpu_list_node *node; + + node = this_cpu_list_pop(list, NULL); + if (opt_yield) + sched_yield(); /* encourage shuffling */ + if (node) + this_cpu_list_push(list, node, NULL); + } + + printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", + (int) rseq_gettid(), nr_abort, signals_delivered); + if (!opt_disable_rseq && rseq_unregister_current_thread()) + abort(); + + return NULL; +} + +/* Simultaneous modification to a per-cpu linked list from many threads. */ +void test_percpu_list(void) +{ + const int num_threads = opt_threads; + int i, j, ret; + uint64_t sum = 0, expected_sum = 0; + struct percpu_list list; + pthread_t test_threads[num_threads]; + cpu_set_t allowed_cpus; + + memset(&list, 0, sizeof(list)); + + /* Generate list entries for every usable cpu. */ + sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus); + for (i = 0; i < CPU_SETSIZE; i++) { + if (!CPU_ISSET(i, &allowed_cpus)) + continue; + for (j = 1; j <= 100; j++) { + struct percpu_list_node *node; + + expected_sum += j; + + node = malloc(sizeof(*node)); + assert(node); + node->data = j; + node->next = list.c[i].head; + list.c[i].head = node; + } + } + + for (i = 0; i < num_threads; i++) { + ret = pthread_create(&test_threads[i], NULL, + test_percpu_list_thread, &list); + if (ret) { + errno = ret; + perror("pthread_create"); + abort(); + } + } + + for (i = 0; i < num_threads; i++) { + ret = pthread_join(test_threads[i], NULL); + if (ret) { + errno = ret; + perror("pthread_join"); + abort(); + } + } + + for (i = 0; i < CPU_SETSIZE; i++) { + struct percpu_list_node *node; + + if (!CPU_ISSET(i, &allowed_cpus)) + continue; + + while ((node = __percpu_list_pop(&list, i))) { + sum += node->data; + free(node); + } + } + + /* + * All entries should now be accounted for (unless some external + * actor is interfering with our allowed affinity while this + * test is running). + */ + assert(sum == expected_sum); +} + +bool this_cpu_buffer_push(struct percpu_buffer *buffer, + struct percpu_buffer_node *node, + int *_cpu) +{ + bool result = false; + int cpu; + + for (;;) { + intptr_t *targetptr_spec, newval_spec; + intptr_t *targetptr_final, newval_final; + intptr_t offset; + int ret; + + cpu = rseq_cpu_start(); + offset = RSEQ_READ_ONCE(buffer->c[cpu].offset); + if (offset == buffer->c[cpu].buflen) + break; + newval_spec = (intptr_t)node; + targetptr_spec = (intptr_t *)&buffer->c[cpu].array[offset]; + newval_final = offset + 1; + targetptr_final = &buffer->c[cpu].offset; + if (opt_mb) + ret = rseq_cmpeqv_trystorev_storev_release( + targetptr_final, offset, targetptr_spec, + newval_spec, newval_final, cpu); + else + ret = rseq_cmpeqv_trystorev_storev(targetptr_final, + offset, targetptr_spec, newval_spec, + newval_final, cpu); + if (rseq_likely(!ret)) { + result = true; + break; + } + /* Retry if comparison fails or rseq aborts. */ + } + if (_cpu) + *_cpu = cpu; + return result; +} + +struct percpu_buffer_node *this_cpu_buffer_pop(struct percpu_buffer *buffer, + int *_cpu) +{ + struct percpu_buffer_node *head; + int cpu; + + for (;;) { + intptr_t *targetptr, newval; + intptr_t offset; + int ret; + + cpu = rseq_cpu_start(); + /* Load offset with single-copy atomicity. */ + offset = RSEQ_READ_ONCE(buffer->c[cpu].offset); + if (offset == 0) { + head = NULL; + break; + } + head = RSEQ_READ_ONCE(buffer->c[cpu].array[offset - 1]); + newval = offset - 1; + targetptr = (intptr_t *)&buffer->c[cpu].offset; + ret = rseq_cmpeqv_cmpeqv_storev(targetptr, offset, + (intptr_t *)&buffer->c[cpu].array[offset - 1], + (intptr_t)head, newval, cpu); + if (rseq_likely(!ret)) + break; + /* Retry if comparison fails or rseq aborts. */ + } + if (_cpu) + *_cpu = cpu; + return head; +} + +/* + * __percpu_buffer_pop is not safe against concurrent accesses. Should + * only be used on buffers that are not concurrently modified. + */ +struct percpu_buffer_node *__percpu_buffer_pop(struct percpu_buffer *buffer, + int cpu) +{ + struct percpu_buffer_node *head; + intptr_t offset; + + offset = buffer->c[cpu].offset; + if (offset == 0) + return NULL; + head = buffer->c[cpu].array[offset - 1]; + buffer->c[cpu].offset = offset - 1; + return head; +} + +void *test_percpu_buffer_thread(void *arg) +{ + long long i, reps; + struct percpu_buffer *buffer = (struct percpu_buffer *)arg; + + if (!opt_disable_rseq && rseq_register_current_thread()) + abort(); + + reps = opt_reps; + for (i = 0; i < reps; i++) { + struct percpu_buffer_node *node; + + node = this_cpu_buffer_pop(buffer, NULL); + if (opt_yield) + sched_yield(); /* encourage shuffling */ + if (node) { + if (!this_cpu_buffer_push(buffer, node, NULL)) { + /* Should increase buffer size. */ + abort(); + } + } + } + + printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", + (int) rseq_gettid(), nr_abort, signals_delivered); + if (!opt_disable_rseq && rseq_unregister_current_thread()) + abort(); + + return NULL; +} + +/* Simultaneous modification to a per-cpu buffer from many threads. */ +void test_percpu_buffer(void) +{ + const int num_threads = opt_threads; + int i, j, ret; + uint64_t sum = 0, expected_sum = 0; + struct percpu_buffer buffer; + pthread_t test_threads[num_threads]; + cpu_set_t allowed_cpus; + + memset(&buffer, 0, sizeof(buffer)); + + /* Generate list entries for every usable cpu. */ + sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus); + for (i = 0; i < CPU_SETSIZE; i++) { + if (!CPU_ISSET(i, &allowed_cpus)) + continue; + /* Worse-case is every item in same CPU. */ + buffer.c[i].array = + malloc(sizeof(*buffer.c[i].array) * CPU_SETSIZE * + BUFFER_ITEM_PER_CPU); + assert(buffer.c[i].array); + buffer.c[i].buflen = CPU_SETSIZE * BUFFER_ITEM_PER_CPU; + for (j = 1; j <= BUFFER_ITEM_PER_CPU; j++) { + struct percpu_buffer_node *node; + + expected_sum += j; + + /* + * We could theoretically put the word-sized + * "data" directly in the buffer. However, we + * want to model objects that would not fit + * within a single word, so allocate an object + * for each node. + */ + node = malloc(sizeof(*node)); + assert(node); + node->data = j; + buffer.c[i].array[j - 1] = node; + buffer.c[i].offset++; + } + } + + for (i = 0; i < num_threads; i++) { + ret = pthread_create(&test_threads[i], NULL, + test_percpu_buffer_thread, &buffer); + if (ret) { + errno = ret; + perror("pthread_create"); + abort(); + } + } + + for (i = 0; i < num_threads; i++) { + ret = pthread_join(test_threads[i], NULL); + if (ret) { + errno = ret; + perror("pthread_join"); + abort(); + } + } + + for (i = 0; i < CPU_SETSIZE; i++) { + struct percpu_buffer_node *node; + + if (!CPU_ISSET(i, &allowed_cpus)) + continue; + + while ((node = __percpu_buffer_pop(&buffer, i))) { + sum += node->data; + free(node); + } + free(buffer.c[i].array); + } + + /* + * All entries should now be accounted for (unless some external + * actor is interfering with our allowed affinity while this + * test is running). + */ + assert(sum == expected_sum); +} + +bool this_cpu_memcpy_buffer_push(struct percpu_memcpy_buffer *buffer, + struct percpu_memcpy_buffer_node item, + int *_cpu) +{ + bool result = false; + int cpu; + + for (;;) { + intptr_t *targetptr_final, newval_final, offset; + char *destptr, *srcptr; + size_t copylen; + int ret; + + cpu = rseq_cpu_start(); + /* Load offset with single-copy atomicity. */ + offset = RSEQ_READ_ONCE(buffer->c[cpu].offset); + if (offset == buffer->c[cpu].buflen) + break; + destptr = (char *)&buffer->c[cpu].array[offset]; + srcptr = (char *)&item; + /* copylen must be <= 4kB. */ + copylen = sizeof(item); + newval_final = offset + 1; + targetptr_final = &buffer->c[cpu].offset; + if (opt_mb) + ret = rseq_cmpeqv_trymemcpy_storev_release( + targetptr_final, offset, + destptr, srcptr, copylen, + newval_final, cpu); + else + ret = rseq_cmpeqv_trymemcpy_storev(targetptr_final, + offset, destptr, srcptr, copylen, + newval_final, cpu); + if (rseq_likely(!ret)) { + result = true; + break; + } + /* Retry if comparison fails or rseq aborts. */ + } + if (_cpu) + *_cpu = cpu; + return result; +} + +bool this_cpu_memcpy_buffer_pop(struct percpu_memcpy_buffer *buffer, + struct percpu_memcpy_buffer_node *item, + int *_cpu) +{ + bool result = false; + int cpu; + + for (;;) { + intptr_t *targetptr_final, newval_final, offset; + char *destptr, *srcptr; + size_t copylen; + int ret; + + cpu = rseq_cpu_start(); + /* Load offset with single-copy atomicity. */ + offset = RSEQ_READ_ONCE(buffer->c[cpu].offset); + if (offset == 0) + break; + destptr = (char *)item; + srcptr = (char *)&buffer->c[cpu].array[offset - 1]; + /* copylen must be <= 4kB. */ + copylen = sizeof(*item); + newval_final = offset - 1; + targetptr_final = &buffer->c[cpu].offset; + ret = rseq_cmpeqv_trymemcpy_storev(targetptr_final, + offset, destptr, srcptr, copylen, + newval_final, cpu); + if (rseq_likely(!ret)) { + result = true; + break; + } + /* Retry if comparison fails or rseq aborts. */ + } + if (_cpu) + *_cpu = cpu; + return result; +} + +/* + * __percpu_memcpy_buffer_pop is not safe against concurrent accesses. Should + * only be used on buffers that are not concurrently modified. + */ +bool __percpu_memcpy_buffer_pop(struct percpu_memcpy_buffer *buffer, + struct percpu_memcpy_buffer_node *item, + int cpu) +{ + intptr_t offset; + + offset = buffer->c[cpu].offset; + if (offset == 0) + return false; + memcpy(item, &buffer->c[cpu].array[offset - 1], sizeof(*item)); + buffer->c[cpu].offset = offset - 1; + return true; +} + +void *test_percpu_memcpy_buffer_thread(void *arg) +{ + long long i, reps; + struct percpu_memcpy_buffer *buffer = (struct percpu_memcpy_buffer *)arg; + + if (!opt_disable_rseq && rseq_register_current_thread()) + abort(); + + reps = opt_reps; + for (i = 0; i < reps; i++) { + struct percpu_memcpy_buffer_node item; + bool result; + + result = this_cpu_memcpy_buffer_pop(buffer, &item, NULL); + if (opt_yield) + sched_yield(); /* encourage shuffling */ + if (result) { + if (!this_cpu_memcpy_buffer_push(buffer, item, NULL)) { + /* Should increase buffer size. */ + abort(); + } + } + } + + printf_verbose("tid %d: number of rseq abort: %d, signals delivered: %u\n", + (int) rseq_gettid(), nr_abort, signals_delivered); + if (!opt_disable_rseq && rseq_unregister_current_thread()) + abort(); + + return NULL; +} + +/* Simultaneous modification to a per-cpu buffer from many threads. */ +void test_percpu_memcpy_buffer(void) +{ + const int num_threads = opt_threads; + int i, j, ret; + uint64_t sum = 0, expected_sum = 0; + struct percpu_memcpy_buffer buffer; + pthread_t test_threads[num_threads]; + cpu_set_t allowed_cpus; + + memset(&buffer, 0, sizeof(buffer)); + + /* Generate list entries for every usable cpu. */ + sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus); + for (i = 0; i < CPU_SETSIZE; i++) { + if (!CPU_ISSET(i, &allowed_cpus)) + continue; + /* Worse-case is every item in same CPU. */ + buffer.c[i].array = + malloc(sizeof(*buffer.c[i].array) * CPU_SETSIZE * + MEMCPY_BUFFER_ITEM_PER_CPU); + assert(buffer.c[i].array); + buffer.c[i].buflen = CPU_SETSIZE * MEMCPY_BUFFER_ITEM_PER_CPU; + for (j = 1; j <= MEMCPY_BUFFER_ITEM_PER_CPU; j++) { + expected_sum += 2 * j + 1; + + /* + * We could theoretically put the word-sized + * "data" directly in the buffer. However, we + * want to model objects that would not fit + * within a single word, so allocate an object + * for each node. + */ + buffer.c[i].array[j - 1].data1 = j; + buffer.c[i].array[j - 1].data2 = j + 1; + buffer.c[i].offset++; + } + } + + for (i = 0; i < num_threads; i++) { + ret = pthread_create(&test_threads[i], NULL, + test_percpu_memcpy_buffer_thread, + &buffer); + if (ret) { + errno = ret; + perror("pthread_create"); + abort(); + } + } + + for (i = 0; i < num_threads; i++) { + ret = pthread_join(test_threads[i], NULL); + if (ret) { + errno = ret; + perror("pthread_join"); + abort(); + } + } + + for (i = 0; i < CPU_SETSIZE; i++) { + struct percpu_memcpy_buffer_node item; + + if (!CPU_ISSET(i, &allowed_cpus)) + continue; + + while (__percpu_memcpy_buffer_pop(&buffer, &item, i)) { + sum += item.data1; + sum += item.data2; + } + free(buffer.c[i].array); + } + + /* + * All entries should now be accounted for (unless some external + * actor is interfering with our allowed affinity while this + * test is running). + */ + assert(sum == expected_sum); +} + +static void test_signal_interrupt_handler(int signo) +{ + signals_delivered++; +} + +static int set_signal_handler(void) +{ + int ret = 0; + struct sigaction sa; + sigset_t sigset; + + ret = sigemptyset(&sigset); + if (ret < 0) { + perror("sigemptyset"); + return ret; + } + + sa.sa_handler = test_signal_interrupt_handler; + sa.sa_mask = sigset; + sa.sa_flags = 0; + ret = sigaction(SIGUSR1, &sa, NULL); + if (ret < 0) { + perror("sigaction"); + return ret; + } + + printf_verbose("Signal handler set for SIGUSR1\n"); + + return ret; +} + +struct test_membarrier_thread_args { + int stop; + intptr_t percpu_list_ptr; +}; + +/* Worker threads modify data in their "active" percpu lists. */ +void *test_membarrier_worker_thread(void *arg) +{ + struct test_membarrier_thread_args *args = + (struct test_membarrier_thread_args *)arg; + const int iters = opt_reps; + int i; + + if (rseq_register_current_thread()) { + fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n", + errno, strerror(errno)); + abort(); + } + + /* Wait for initialization. */ + while (!atomic_load(&args->percpu_list_ptr)) {} + + for (i = 0; i < iters; ++i) { + int ret; + + do { + int cpu = rseq_cpu_start(); + + ret = rseq_offset_deref_addv(&args->percpu_list_ptr, + sizeof(struct percpu_list_entry) * cpu, 1, cpu); + } while (rseq_unlikely(ret)); + } + + if (rseq_unregister_current_thread()) { + fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n", + errno, strerror(errno)); + abort(); + } + return NULL; +} + +void test_membarrier_init_percpu_list(struct percpu_list *list) +{ + int i; + + memset(list, 0, sizeof(*list)); + for (i = 0; i < CPU_SETSIZE; i++) { + struct percpu_list_node *node; + + node = malloc(sizeof(*node)); + assert(node); + node->data = 0; + node->next = NULL; + list->c[i].head = node; + } +} + +void test_membarrier_free_percpu_list(struct percpu_list *list) +{ + int i; + + for (i = 0; i < CPU_SETSIZE; i++) + free(list->c[i].head); +} + +static int sys_membarrier(int cmd, int flags, int cpu_id) +{ + return syscall(__NR_membarrier, cmd, flags, cpu_id); +} + +/* + * The manager thread swaps per-cpu lists that worker threads see, + * and validates that there are no unexpected modifications. + */ +void *test_membarrier_manager_thread(void *arg) +{ + struct test_membarrier_thread_args *args = + (struct test_membarrier_thread_args *)arg; + struct percpu_list list_a, list_b; + intptr_t expect_a = 0, expect_b = 0; + int cpu_a = 0, cpu_b = 0; + + if (rseq_register_current_thread()) { + fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n", + errno, strerror(errno)); + abort(); + } + + /* Init lists. */ + test_membarrier_init_percpu_list(&list_a); + test_membarrier_init_percpu_list(&list_b); + + atomic_store(&args->percpu_list_ptr, (intptr_t)&list_a); + + while (!atomic_load(&args->stop)) { + /* list_a is "active". */ + cpu_a = rand() % CPU_SETSIZE; + /* + * As list_b is "inactive", we should never see changes + * to list_b. + */ + if (expect_b != atomic_load(&list_b.c[cpu_b].head->data)) { + fprintf(stderr, "Membarrier test failed\n"); + abort(); + } + + /* Make list_b "active". */ + atomic_store(&args->percpu_list_ptr, (intptr_t)&list_b); + if (sys_membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ, + MEMBARRIER_CMD_FLAG_CPU, cpu_a) && + errno != ENXIO /* missing CPU */) { + perror("sys_membarrier"); + abort(); + } + /* + * Cpu A should now only modify list_b, so the values + * in list_a should be stable. + */ + expect_a = atomic_load(&list_a.c[cpu_a].head->data); + + cpu_b = rand() % CPU_SETSIZE; + /* + * As list_a is "inactive", we should never see changes + * to list_a. + */ + if (expect_a != atomic_load(&list_a.c[cpu_a].head->data)) { + fprintf(stderr, "Membarrier test failed\n"); + abort(); + } + + /* Make list_a "active". */ + atomic_store(&args->percpu_list_ptr, (intptr_t)&list_a); + if (sys_membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ, + MEMBARRIER_CMD_FLAG_CPU, cpu_b) && + errno != ENXIO /* missing CPU*/) { + perror("sys_membarrier"); + abort(); + } + /* Remember a value from list_b. */ + expect_b = atomic_load(&list_b.c[cpu_b].head->data); + } + + test_membarrier_free_percpu_list(&list_a); + test_membarrier_free_percpu_list(&list_b); + + if (rseq_unregister_current_thread()) { + fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n", + errno, strerror(errno)); + abort(); + } + return NULL; +} + +/* Test MEMBARRIER_CMD_PRIVATE_RESTART_RSEQ_ON_CPU membarrier command. */ +#ifdef RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV +void test_membarrier(void) +{ + const int num_threads = opt_threads; + struct test_membarrier_thread_args thread_args; + pthread_t worker_threads[num_threads]; + pthread_t manager_thread; + int i, ret; + + if (sys_membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ, 0, 0)) { + perror("sys_membarrier"); + abort(); + } + + thread_args.stop = 0; + thread_args.percpu_list_ptr = 0; + ret = pthread_create(&manager_thread, NULL, + test_membarrier_manager_thread, &thread_args); + if (ret) { + errno = ret; + perror("pthread_create"); + abort(); + } + + for (i = 0; i < num_threads; i++) { + ret = pthread_create(&worker_threads[i], NULL, + test_membarrier_worker_thread, &thread_args); + if (ret) { + errno = ret; + perror("pthread_create"); + abort(); + } + } + + + for (i = 0; i < num_threads; i++) { + ret = pthread_join(worker_threads[i], NULL); + if (ret) { + errno = ret; + perror("pthread_join"); + abort(); + } + } + + atomic_store(&thread_args.stop, 1); + ret = pthread_join(manager_thread, NULL); + if (ret) { + errno = ret; + perror("pthread_join"); + abort(); + } +} +#else /* RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV */ +void test_membarrier(void) +{ + fprintf(stderr, "rseq_offset_deref_addv is not implemented on this architecture. " + "Skipping membarrier test.\n"); +} +#endif + +static void show_usage(int argc, char **argv) +{ + printf("Usage : %s <OPTIONS>\n", + argv[0]); + printf("OPTIONS:\n"); + printf(" [-1 loops] Number of loops for delay injection 1\n"); + printf(" [-2 loops] Number of loops for delay injection 2\n"); + printf(" [-3 loops] Number of loops for delay injection 3\n"); + printf(" [-4 loops] Number of loops for delay injection 4\n"); + printf(" [-5 loops] Number of loops for delay injection 5\n"); + printf(" [-6 loops] Number of loops for delay injection 6\n"); + printf(" [-7 loops] Number of loops for delay injection 7 (-1 to enable -m)\n"); + printf(" [-8 loops] Number of loops for delay injection 8 (-1 to enable -m)\n"); + printf(" [-9 loops] Number of loops for delay injection 9 (-1 to enable -m)\n"); + printf(" [-m N] Yield/sleep/kill every modulo N (default 0: disabled) (>= 0)\n"); + printf(" [-y] Yield\n"); + printf(" [-k] Kill thread with signal\n"); + printf(" [-s S] S: =0: disabled (default), >0: sleep time (ms)\n"); + printf(" [-t N] Number of threads (default 200)\n"); + printf(" [-r N] Number of repetitions per thread (default 5000)\n"); + printf(" [-d] Disable rseq system call (no initialization)\n"); + printf(" [-D M] Disable rseq for each M threads\n"); + printf(" [-T test] Choose test: (s)pinlock, (l)ist, (b)uffer, (m)emcpy, (i)ncrement, membarrie(r)\n"); + printf(" [-M] Push into buffer and memcpy buffer with memory barriers.\n"); + printf(" [-v] Verbose output.\n"); + printf(" [-h] Show this help.\n"); + printf("\n"); +} + +int main(int argc, char **argv) +{ + int i; + + for (i = 1; i < argc; i++) { + if (argv[i][0] != '-') + continue; + switch (argv[i][1]) { + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + if (argc < i + 2) { + show_usage(argc, argv); + goto error; + } + loop_cnt[argv[i][1] - '0'] = atol(argv[i + 1]); + i++; + break; + case 'm': + if (argc < i + 2) { + show_usage(argc, argv); + goto error; + } + opt_modulo = atol(argv[i + 1]); + if (opt_modulo < 0) { + show_usage(argc, argv); + goto error; + } + i++; + break; + case 's': + if (argc < i + 2) { + show_usage(argc, argv); + goto error; + } + opt_sleep = atol(argv[i + 1]); + if (opt_sleep < 0) { + show_usage(argc, argv); + goto error; + } + i++; + break; + case 'y': + opt_yield = 1; + break; + case 'k': + opt_signal = 1; + break; + case 'd': + opt_disable_rseq = 1; + break; + case 'D': + if (argc < i + 2) { + show_usage(argc, argv); + goto error; + } + opt_disable_mod = atol(argv[i + 1]); + if (opt_disable_mod < 0) { + show_usage(argc, argv); + goto error; + } + i++; + break; + case 't': + if (argc < i + 2) { + show_usage(argc, argv); + goto error; + } + opt_threads = atol(argv[i + 1]); + if (opt_threads < 0) { + show_usage(argc, argv); + goto error; + } + i++; + break; + case 'r': + if (argc < i + 2) { + show_usage(argc, argv); + goto error; + } + opt_reps = atoll(argv[i + 1]); + if (opt_reps < 0) { + show_usage(argc, argv); + goto error; + } + i++; + break; + case 'h': + show_usage(argc, argv); + goto end; + case 'T': + if (argc < i + 2) { + show_usage(argc, argv); + goto error; + } + opt_test = *argv[i + 1]; + switch (opt_test) { + case 's': + case 'l': + case 'i': + case 'b': + case 'm': + case 'r': + break; + default: + show_usage(argc, argv); + goto error; + } + i++; + break; + case 'v': + verbose = 1; + break; + case 'M': + opt_mb = 1; + break; + default: + show_usage(argc, argv); + goto error; + } + } + + loop_cnt_1 = loop_cnt[1]; + loop_cnt_2 = loop_cnt[2]; + loop_cnt_3 = loop_cnt[3]; + loop_cnt_4 = loop_cnt[4]; + loop_cnt_5 = loop_cnt[5]; + loop_cnt_6 = loop_cnt[6]; + + if (set_signal_handler()) + goto error; + + if (!opt_disable_rseq && rseq_register_current_thread()) + goto error; + switch (opt_test) { + case 's': + printf_verbose("spinlock\n"); + test_percpu_spinlock(); + break; + case 'l': + printf_verbose("linked list\n"); + test_percpu_list(); + break; + case 'b': + printf_verbose("buffer\n"); + test_percpu_buffer(); + break; + case 'm': + printf_verbose("memcpy buffer\n"); + test_percpu_memcpy_buffer(); + break; + case 'i': + printf_verbose("counter increment\n"); + test_percpu_inc(); + break; + case 'r': + printf_verbose("membarrier\n"); + test_membarrier(); + break; + } + if (!opt_disable_rseq && rseq_unregister_current_thread()) + abort(); +end: + return 0; + +error: + return -1; +} diff --git a/tools/testing/selftests/rseq/rseq-abi.h b/tools/testing/selftests/rseq/rseq-abi.h new file mode 100644 index 000000000..a8c44d9af --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-abi.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +#ifndef _RSEQ_ABI_H +#define _RSEQ_ABI_H + +/* + * rseq-abi.h + * + * Restartable sequences system call API + * + * Copyright (c) 2015-2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> + */ + +#include <linux/types.h> +#include <asm/byteorder.h> + +enum rseq_abi_cpu_id_state { + RSEQ_ABI_CPU_ID_UNINITIALIZED = -1, + RSEQ_ABI_CPU_ID_REGISTRATION_FAILED = -2, +}; + +enum rseq_abi_flags { + RSEQ_ABI_FLAG_UNREGISTER = (1 << 0), +}; + +enum rseq_abi_cs_flags_bit { + RSEQ_ABI_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0, + RSEQ_ABI_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1, + RSEQ_ABI_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2, +}; + +enum rseq_abi_cs_flags { + RSEQ_ABI_CS_FLAG_NO_RESTART_ON_PREEMPT = + (1U << RSEQ_ABI_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT), + RSEQ_ABI_CS_FLAG_NO_RESTART_ON_SIGNAL = + (1U << RSEQ_ABI_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT), + RSEQ_ABI_CS_FLAG_NO_RESTART_ON_MIGRATE = + (1U << RSEQ_ABI_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT), +}; + +/* + * struct rseq_abi_cs is aligned on 4 * 8 bytes to ensure it is always + * contained within a single cache-line. It is usually declared as + * link-time constant data. + */ +struct rseq_abi_cs { + /* Version of this structure. */ + __u32 version; + /* enum rseq_abi_cs_flags */ + __u32 flags; + __u64 start_ip; + /* Offset from start_ip. */ + __u64 post_commit_offset; + __u64 abort_ip; +} __attribute__((aligned(4 * sizeof(__u64)))); + +/* + * struct rseq_abi is aligned on 4 * 8 bytes to ensure it is always + * contained within a single cache-line. + * + * A single struct rseq_abi per thread is allowed. + */ +struct rseq_abi { + /* + * Restartable sequences cpu_id_start field. Updated by the + * kernel. Read by user-space with single-copy atomicity + * semantics. This field should only be read by the thread which + * registered this data structure. Aligned on 32-bit. Always + * contains a value in the range of possible CPUs, although the + * value may not be the actual current CPU (e.g. if rseq is not + * initialized). This CPU number value should always be compared + * against the value of the cpu_id field before performing a rseq + * commit or returning a value read from a data structure indexed + * using the cpu_id_start value. + */ + __u32 cpu_id_start; + /* + * Restartable sequences cpu_id field. Updated by the kernel. + * Read by user-space with single-copy atomicity semantics. This + * field should only be read by the thread which registered this + * data structure. Aligned on 32-bit. Values + * RSEQ_CPU_ID_UNINITIALIZED and RSEQ_CPU_ID_REGISTRATION_FAILED + * have a special semantic: the former means "rseq uninitialized", + * and latter means "rseq initialization failed". This value is + * meant to be read within rseq critical sections and compared + * with the cpu_id_start value previously read, before performing + * the commit instruction, or read and compared with the + * cpu_id_start value before returning a value loaded from a data + * structure indexed using the cpu_id_start value. + */ + __u32 cpu_id; + /* + * Restartable sequences rseq_cs field. + * + * Contains NULL when no critical section is active for the current + * thread, or holds a pointer to the currently active struct rseq_cs. + * + * Updated by user-space, which sets the address of the currently + * active rseq_cs at the beginning of assembly instruction sequence + * block, and set to NULL by the kernel when it restarts an assembly + * instruction sequence block, as well as when the kernel detects that + * it is preempting or delivering a signal outside of the range + * targeted by the rseq_cs. Also needs to be set to NULL by user-space + * before reclaiming memory that contains the targeted struct rseq_cs. + * + * Read and set by the kernel. Set by user-space with single-copy + * atomicity semantics. This field should only be updated by the + * thread which registered this data structure. Aligned on 64-bit. + */ + union { + __u64 ptr64; + + /* + * The "arch" field provides architecture accessor for + * the ptr field based on architecture pointer size and + * endianness. + */ + struct { +#ifdef __LP64__ + __u64 ptr; +#elif defined(__BYTE_ORDER) ? (__BYTE_ORDER == __BIG_ENDIAN) : defined(__BIG_ENDIAN) + __u32 padding; /* Initialized to zero. */ + __u32 ptr; +#else + __u32 ptr; + __u32 padding; /* Initialized to zero. */ +#endif + } arch; + } rseq_cs; + + /* + * Restartable sequences flags field. + * + * This field should only be updated by the thread which + * registered this data structure. Read by the kernel. + * Mainly used for single-stepping through rseq critical sections + * with debuggers. + * + * - RSEQ_ABI_CS_FLAG_NO_RESTART_ON_PREEMPT + * Inhibit instruction sequence block restart on preemption + * for this thread. + * - RSEQ_ABI_CS_FLAG_NO_RESTART_ON_SIGNAL + * Inhibit instruction sequence block restart on signal + * delivery for this thread. + * - RSEQ_ABI_CS_FLAG_NO_RESTART_ON_MIGRATE + * Inhibit instruction sequence block restart on migration for + * this thread. + */ + __u32 flags; +} __attribute__((aligned(4 * sizeof(__u64)))); + +#endif /* _RSEQ_ABI_H */ diff --git a/tools/testing/selftests/rseq/rseq-arm.h b/tools/testing/selftests/rseq/rseq-arm.h new file mode 100644 index 000000000..893a11eca --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-arm.h @@ -0,0 +1,827 @@ +/* SPDX-License-Identifier: LGPL-2.1 OR MIT */ +/* + * rseq-arm.h + * + * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> + */ + +/* + * - ARM little endian + * + * RSEQ_SIG uses the udf A32 instruction with an uncommon immediate operand + * value 0x5de3. This traps if user-space reaches this instruction by mistake, + * and the uncommon operand ensures the kernel does not move the instruction + * pointer to attacker-controlled code on rseq abort. + * + * The instruction pattern in the A32 instruction set is: + * + * e7f5def3 udf #24035 ; 0x5de3 + * + * This translates to the following instruction pattern in the T16 instruction + * set: + * + * little endian: + * def3 udf #243 ; 0xf3 + * e7f5 b.n <7f5> + * + * - ARMv6+ big endian (BE8): + * + * ARMv6+ -mbig-endian generates mixed endianness code vs data: little-endian + * code and big-endian data. The data value of the signature needs to have its + * byte order reversed to generate the trap instruction: + * + * Data: 0xf3def5e7 + * + * Translates to this A32 instruction pattern: + * + * e7f5def3 udf #24035 ; 0x5de3 + * + * Translates to this T16 instruction pattern: + * + * def3 udf #243 ; 0xf3 + * e7f5 b.n <7f5> + * + * - Prior to ARMv6 big endian (BE32): + * + * Prior to ARMv6, -mbig-endian generates big-endian code and data + * (which match), so the endianness of the data representation of the + * signature should not be reversed. However, the choice between BE32 + * and BE8 is done by the linker, so we cannot know whether code and + * data endianness will be mixed before the linker is invoked. So rather + * than try to play tricks with the linker, the rseq signature is simply + * data (not a trap instruction) prior to ARMv6 on big endian. This is + * why the signature is expressed as data (.word) rather than as + * instruction (.inst) in assembler. + */ + +#ifdef __ARMEB__ +#define RSEQ_SIG 0xf3def5e7 /* udf #24035 ; 0x5de3 (ARMv6+) */ +#else +#define RSEQ_SIG 0xe7f5def3 /* udf #24035 ; 0x5de3 */ +#endif + +#define rseq_smp_mb() __asm__ __volatile__ ("dmb" ::: "memory", "cc") +#define rseq_smp_rmb() __asm__ __volatile__ ("dmb" ::: "memory", "cc") +#define rseq_smp_wmb() __asm__ __volatile__ ("dmb" ::: "memory", "cc") + +#define rseq_smp_load_acquire(p) \ +__extension__ ({ \ + __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \ + rseq_smp_mb(); \ + ____p1; \ +}) + +#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb() + +#define rseq_smp_store_release(p, v) \ +do { \ + rseq_smp_mb(); \ + RSEQ_WRITE_ONCE(*p, v); \ +} while (0) + +#ifdef RSEQ_SKIP_FASTPATH +#include "rseq-skip.h" +#else /* !RSEQ_SKIP_FASTPATH */ + +#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \ + post_commit_offset, abort_ip) \ + ".pushsection __rseq_cs, \"aw\"\n\t" \ + ".balign 32\n\t" \ + __rseq_str(label) ":\n\t" \ + ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \ + ".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \ + ".popsection\n\t" \ + ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \ + ".word " __rseq_str(label) "b, 0x0\n\t" \ + ".popsection\n\t" + +#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \ + __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \ + (post_commit_ip - start_ip), abort_ip) + +/* + * Exit points of a rseq critical section consist of all instructions outside + * of the critical section where a critical section can either branch to or + * reach through the normal course of its execution. The abort IP and the + * post-commit IP are already part of the __rseq_cs section and should not be + * explicitly defined as additional exit points. Knowing all exit points is + * useful to assist debuggers stepping over the critical section. + */ +#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \ + ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \ + ".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(exit_ip) ", 0x0\n\t" \ + ".popsection\n\t" + +#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \ + RSEQ_INJECT_ASM(1) \ + "adr r0, " __rseq_str(cs_label) "\n\t" \ + "str r0, %[" __rseq_str(rseq_cs) "]\n\t" \ + __rseq_str(label) ":\n\t" + +#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \ + RSEQ_INJECT_ASM(2) \ + "ldr r0, %[" __rseq_str(current_cpu_id) "]\n\t" \ + "cmp %[" __rseq_str(cpu_id) "], r0\n\t" \ + "bne " __rseq_str(label) "\n\t" + +#define __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \ + abort_label, version, flags, \ + start_ip, post_commit_offset, abort_ip) \ + ".balign 32\n\t" \ + __rseq_str(table_label) ":\n\t" \ + ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \ + ".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \ + ".word " __rseq_str(RSEQ_SIG) "\n\t" \ + __rseq_str(label) ":\n\t" \ + teardown \ + "b %l[" __rseq_str(abort_label) "]\n\t" + +#define RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, abort_label, \ + start_ip, post_commit_ip, abort_ip) \ + __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \ + abort_label, 0x0, 0x0, start_ip, \ + (post_commit_ip - start_ip), abort_ip) + +#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \ + __rseq_str(label) ":\n\t" \ + teardown \ + "b %l[" __rseq_str(cmpfail_label) "]\n\t" + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + "ldr r0, %[v]\n\t" + "cmp %[expect], r0\n\t" + "bne %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + "ldr r0, %[v]\n\t" + "cmp %[expect], r0\n\t" + "bne %l[error2]\n\t" +#endif + /* final store */ + "str %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(5) + "b 5f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) + "5:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "r0", "memory", "cc" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, + long voffp, intptr_t *load, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + "ldr r0, %[v]\n\t" + "cmp %[expectnot], r0\n\t" + "beq %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + "ldr r0, %[v]\n\t" + "cmp %[expectnot], r0\n\t" + "beq %l[error2]\n\t" +#endif + "str r0, %[load]\n\t" + "add r0, %[voffp]\n\t" + "ldr r0, [r0]\n\t" + /* final store */ + "str r0, %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(5) + "b 5f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) + "5:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* final store input */ + [v] "m" (*v), + [expectnot] "r" (expectnot), + [voffp] "Ir" (voffp), + [load] "m" (*load) + RSEQ_INJECT_INPUT + : "r0", "memory", "cc" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_addv(intptr_t *v, intptr_t count, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) +#endif + "ldr r0, %[v]\n\t" + "add r0, %[count]\n\t" + /* final store */ + "str r0, %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(4) + "b 5f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) + "5:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + [v] "m" (*v), + [count] "Ir" (count) + RSEQ_INJECT_INPUT + : "r0", "memory", "cc" + RSEQ_INJECT_CLOBBER + : abort +#ifdef RSEQ_COMPARE_TWICE + , error1 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + "ldr r0, %[v]\n\t" + "cmp %[expect], r0\n\t" + "bne %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + "ldr r0, %[v]\n\t" + "cmp %[expect], r0\n\t" + "bne %l[error2]\n\t" +#endif + /* try store */ + "str %[newv2], %[v2]\n\t" + RSEQ_INJECT_ASM(5) + /* final store */ + "str %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + "b 5f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) + "5:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* try store input */ + [v2] "m" (*v2), + [newv2] "r" (newv2), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "r0", "memory", "cc" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + "ldr r0, %[v]\n\t" + "cmp %[expect], r0\n\t" + "bne %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + "ldr r0, %[v]\n\t" + "cmp %[expect], r0\n\t" + "bne %l[error2]\n\t" +#endif + /* try store */ + "str %[newv2], %[v2]\n\t" + RSEQ_INJECT_ASM(5) + "dmb\n\t" /* full mb provides store-release */ + /* final store */ + "str %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + "b 5f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) + "5:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* try store input */ + [v2] "m" (*v2), + [newv2] "r" (newv2), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "r0", "memory", "cc" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t expect2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + "ldr r0, %[v]\n\t" + "cmp %[expect], r0\n\t" + "bne %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) + "ldr r0, %[v2]\n\t" + "cmp %[expect2], r0\n\t" + "bne %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(5) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + "ldr r0, %[v]\n\t" + "cmp %[expect], r0\n\t" + "bne %l[error2]\n\t" + "ldr r0, %[v2]\n\t" + "cmp %[expect2], r0\n\t" + "bne %l[error3]\n\t" +#endif + /* final store */ + "str %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + "b 5f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) + "5:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* cmp2 input */ + [v2] "m" (*v2), + [expect2] "r" (expect2), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "r0", "memory", "cc" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2, error3 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("1st expected value comparison failed"); +error3: + rseq_after_asm_goto(); + rseq_bug("2nd expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + uint32_t rseq_scratch[3]; + + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + "str %[src], %[rseq_scratch0]\n\t" + "str %[dst], %[rseq_scratch1]\n\t" + "str %[len], %[rseq_scratch2]\n\t" + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + "ldr r0, %[v]\n\t" + "cmp %[expect], r0\n\t" + "bne 5f\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f) + "ldr r0, %[v]\n\t" + "cmp %[expect], r0\n\t" + "bne 7f\n\t" +#endif + /* try memcpy */ + "cmp %[len], #0\n\t" \ + "beq 333f\n\t" \ + "222:\n\t" \ + "ldrb %%r0, [%[src]]\n\t" \ + "strb %%r0, [%[dst]]\n\t" \ + "adds %[src], #1\n\t" \ + "adds %[dst], #1\n\t" \ + "subs %[len], #1\n\t" \ + "bne 222b\n\t" \ + "333:\n\t" \ + RSEQ_INJECT_ASM(5) + /* final store */ + "str %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + /* teardown */ + "ldr %[len], %[rseq_scratch2]\n\t" + "ldr %[dst], %[rseq_scratch1]\n\t" + "ldr %[src], %[rseq_scratch0]\n\t" + "b 8f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, + /* teardown */ + "ldr %[len], %[rseq_scratch2]\n\t" + "ldr %[dst], %[rseq_scratch1]\n\t" + "ldr %[src], %[rseq_scratch0]\n\t", + abort, 1b, 2b, 4f) + RSEQ_ASM_DEFINE_CMPFAIL(5, + /* teardown */ + "ldr %[len], %[rseq_scratch2]\n\t" + "ldr %[dst], %[rseq_scratch1]\n\t" + "ldr %[src], %[rseq_scratch0]\n\t", + cmpfail) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_CMPFAIL(6, + /* teardown */ + "ldr %[len], %[rseq_scratch2]\n\t" + "ldr %[dst], %[rseq_scratch1]\n\t" + "ldr %[src], %[rseq_scratch0]\n\t", + error1) + RSEQ_ASM_DEFINE_CMPFAIL(7, + /* teardown */ + "ldr %[len], %[rseq_scratch2]\n\t" + "ldr %[dst], %[rseq_scratch1]\n\t" + "ldr %[src], %[rseq_scratch0]\n\t", + error2) +#endif + "8:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv), + /* try memcpy input */ + [dst] "r" (dst), + [src] "r" (src), + [len] "r" (len), + [rseq_scratch0] "m" (rseq_scratch[0]), + [rseq_scratch1] "m" (rseq_scratch[1]), + [rseq_scratch2] "m" (rseq_scratch[2]) + RSEQ_INJECT_INPUT + : "r0", "memory", "cc" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + uint32_t rseq_scratch[3]; + + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + "str %[src], %[rseq_scratch0]\n\t" + "str %[dst], %[rseq_scratch1]\n\t" + "str %[len], %[rseq_scratch2]\n\t" + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + "ldr r0, %[v]\n\t" + "cmp %[expect], r0\n\t" + "bne 5f\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f) + "ldr r0, %[v]\n\t" + "cmp %[expect], r0\n\t" + "bne 7f\n\t" +#endif + /* try memcpy */ + "cmp %[len], #0\n\t" \ + "beq 333f\n\t" \ + "222:\n\t" \ + "ldrb %%r0, [%[src]]\n\t" \ + "strb %%r0, [%[dst]]\n\t" \ + "adds %[src], #1\n\t" \ + "adds %[dst], #1\n\t" \ + "subs %[len], #1\n\t" \ + "bne 222b\n\t" \ + "333:\n\t" \ + RSEQ_INJECT_ASM(5) + "dmb\n\t" /* full mb provides store-release */ + /* final store */ + "str %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + /* teardown */ + "ldr %[len], %[rseq_scratch2]\n\t" + "ldr %[dst], %[rseq_scratch1]\n\t" + "ldr %[src], %[rseq_scratch0]\n\t" + "b 8f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, + /* teardown */ + "ldr %[len], %[rseq_scratch2]\n\t" + "ldr %[dst], %[rseq_scratch1]\n\t" + "ldr %[src], %[rseq_scratch0]\n\t", + abort, 1b, 2b, 4f) + RSEQ_ASM_DEFINE_CMPFAIL(5, + /* teardown */ + "ldr %[len], %[rseq_scratch2]\n\t" + "ldr %[dst], %[rseq_scratch1]\n\t" + "ldr %[src], %[rseq_scratch0]\n\t", + cmpfail) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_CMPFAIL(6, + /* teardown */ + "ldr %[len], %[rseq_scratch2]\n\t" + "ldr %[dst], %[rseq_scratch1]\n\t" + "ldr %[src], %[rseq_scratch0]\n\t", + error1) + RSEQ_ASM_DEFINE_CMPFAIL(7, + /* teardown */ + "ldr %[len], %[rseq_scratch2]\n\t" + "ldr %[dst], %[rseq_scratch1]\n\t" + "ldr %[src], %[rseq_scratch0]\n\t", + error2) +#endif + "8:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv), + /* try memcpy input */ + [dst] "r" (dst), + [src] "r" (src), + [len] "r" (len), + [rseq_scratch0] "m" (rseq_scratch[0]), + [rseq_scratch1] "m" (rseq_scratch[1]), + [rseq_scratch2] "m" (rseq_scratch[2]) + RSEQ_INJECT_INPUT + : "r0", "memory", "cc" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +#endif /* !RSEQ_SKIP_FASTPATH */ diff --git a/tools/testing/selftests/rseq/rseq-arm64.h b/tools/testing/selftests/rseq/rseq-arm64.h new file mode 100644 index 000000000..cbe190a4d --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-arm64.h @@ -0,0 +1,695 @@ +/* SPDX-License-Identifier: LGPL-2.1 OR MIT */ +/* + * rseq-arm64.h + * + * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> + * (C) Copyright 2018 - Will Deacon <will.deacon@arm.com> + */ + +/* + * aarch64 -mbig-endian generates mixed endianness code vs data: + * little-endian code and big-endian data. Ensure the RSEQ_SIG signature + * matches code endianness. + */ +#define RSEQ_SIG_CODE 0xd428bc00 /* BRK #0x45E0. */ + +#ifdef __AARCH64EB__ +#define RSEQ_SIG_DATA 0x00bc28d4 /* BRK #0x45E0. */ +#else +#define RSEQ_SIG_DATA RSEQ_SIG_CODE +#endif + +#define RSEQ_SIG RSEQ_SIG_DATA + +#define rseq_smp_mb() __asm__ __volatile__ ("dmb ish" ::: "memory") +#define rseq_smp_rmb() __asm__ __volatile__ ("dmb ishld" ::: "memory") +#define rseq_smp_wmb() __asm__ __volatile__ ("dmb ishst" ::: "memory") + +#define rseq_smp_load_acquire(p) \ +__extension__ ({ \ + __typeof(*p) ____p1; \ + switch (sizeof(*p)) { \ + case 1: \ + asm volatile ("ldarb %w0, %1" \ + : "=r" (*(__u8 *)p) \ + : "Q" (*p) : "memory"); \ + break; \ + case 2: \ + asm volatile ("ldarh %w0, %1" \ + : "=r" (*(__u16 *)p) \ + : "Q" (*p) : "memory"); \ + break; \ + case 4: \ + asm volatile ("ldar %w0, %1" \ + : "=r" (*(__u32 *)p) \ + : "Q" (*p) : "memory"); \ + break; \ + case 8: \ + asm volatile ("ldar %0, %1" \ + : "=r" (*(__u64 *)p) \ + : "Q" (*p) : "memory"); \ + break; \ + } \ + ____p1; \ +}) + +#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb() + +#define rseq_smp_store_release(p, v) \ +do { \ + switch (sizeof(*p)) { \ + case 1: \ + asm volatile ("stlrb %w1, %0" \ + : "=Q" (*p) \ + : "r" ((__u8)v) \ + : "memory"); \ + break; \ + case 2: \ + asm volatile ("stlrh %w1, %0" \ + : "=Q" (*p) \ + : "r" ((__u16)v) \ + : "memory"); \ + break; \ + case 4: \ + asm volatile ("stlr %w1, %0" \ + : "=Q" (*p) \ + : "r" ((__u32)v) \ + : "memory"); \ + break; \ + case 8: \ + asm volatile ("stlr %1, %0" \ + : "=Q" (*p) \ + : "r" ((__u64)v) \ + : "memory"); \ + break; \ + } \ +} while (0) + +#ifdef RSEQ_SKIP_FASTPATH +#include "rseq-skip.h" +#else /* !RSEQ_SKIP_FASTPATH */ + +#define RSEQ_ASM_TMP_REG32 "w15" +#define RSEQ_ASM_TMP_REG "x15" +#define RSEQ_ASM_TMP_REG_2 "x14" + +#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \ + post_commit_offset, abort_ip) \ + " .pushsection __rseq_cs, \"aw\"\n" \ + " .balign 32\n" \ + __rseq_str(label) ":\n" \ + " .long " __rseq_str(version) ", " __rseq_str(flags) "\n" \ + " .quad " __rseq_str(start_ip) ", " \ + __rseq_str(post_commit_offset) ", " \ + __rseq_str(abort_ip) "\n" \ + " .popsection\n\t" \ + " .pushsection __rseq_cs_ptr_array, \"aw\"\n" \ + " .quad " __rseq_str(label) "b\n" \ + " .popsection\n" + +#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \ + __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \ + (post_commit_ip - start_ip), abort_ip) + +/* + * Exit points of a rseq critical section consist of all instructions outside + * of the critical section where a critical section can either branch to or + * reach through the normal course of its execution. The abort IP and the + * post-commit IP are already part of the __rseq_cs section and should not be + * explicitly defined as additional exit points. Knowing all exit points is + * useful to assist debuggers stepping over the critical section. + */ +#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \ + " .pushsection __rseq_exit_point_array, \"aw\"\n" \ + " .quad " __rseq_str(start_ip) ", " __rseq_str(exit_ip) "\n" \ + " .popsection\n" + +#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \ + RSEQ_INJECT_ASM(1) \ + " adrp " RSEQ_ASM_TMP_REG ", " __rseq_str(cs_label) "\n" \ + " add " RSEQ_ASM_TMP_REG ", " RSEQ_ASM_TMP_REG \ + ", :lo12:" __rseq_str(cs_label) "\n" \ + " str " RSEQ_ASM_TMP_REG ", %[" __rseq_str(rseq_cs) "]\n" \ + __rseq_str(label) ":\n" + +#define RSEQ_ASM_DEFINE_ABORT(label, abort_label) \ + " b 222f\n" \ + " .inst " __rseq_str(RSEQ_SIG_CODE) "\n" \ + __rseq_str(label) ":\n" \ + " b %l[" __rseq_str(abort_label) "]\n" \ + "222:\n" + +#define RSEQ_ASM_OP_STORE(value, var) \ + " str %[" __rseq_str(value) "], %[" __rseq_str(var) "]\n" + +#define RSEQ_ASM_OP_STORE_RELEASE(value, var) \ + " stlr %[" __rseq_str(value) "], %[" __rseq_str(var) "]\n" + +#define RSEQ_ASM_OP_FINAL_STORE(value, var, post_commit_label) \ + RSEQ_ASM_OP_STORE(value, var) \ + __rseq_str(post_commit_label) ":\n" + +#define RSEQ_ASM_OP_FINAL_STORE_RELEASE(value, var, post_commit_label) \ + RSEQ_ASM_OP_STORE_RELEASE(value, var) \ + __rseq_str(post_commit_label) ":\n" + +#define RSEQ_ASM_OP_CMPEQ(var, expect, label) \ + " ldr " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n" \ + " sub " RSEQ_ASM_TMP_REG ", " RSEQ_ASM_TMP_REG \ + ", %[" __rseq_str(expect) "]\n" \ + " cbnz " RSEQ_ASM_TMP_REG ", " __rseq_str(label) "\n" + +#define RSEQ_ASM_OP_CMPEQ32(var, expect, label) \ + " ldr " RSEQ_ASM_TMP_REG32 ", %[" __rseq_str(var) "]\n" \ + " sub " RSEQ_ASM_TMP_REG32 ", " RSEQ_ASM_TMP_REG32 \ + ", %w[" __rseq_str(expect) "]\n" \ + " cbnz " RSEQ_ASM_TMP_REG32 ", " __rseq_str(label) "\n" + +#define RSEQ_ASM_OP_CMPNE(var, expect, label) \ + " ldr " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n" \ + " sub " RSEQ_ASM_TMP_REG ", " RSEQ_ASM_TMP_REG \ + ", %[" __rseq_str(expect) "]\n" \ + " cbz " RSEQ_ASM_TMP_REG ", " __rseq_str(label) "\n" + +#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \ + RSEQ_INJECT_ASM(2) \ + RSEQ_ASM_OP_CMPEQ32(current_cpu_id, cpu_id, label) + +#define RSEQ_ASM_OP_R_LOAD(var) \ + " ldr " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n" + +#define RSEQ_ASM_OP_R_STORE(var) \ + " str " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n" + +#define RSEQ_ASM_OP_R_LOAD_OFF(offset) \ + " ldr " RSEQ_ASM_TMP_REG ", [" RSEQ_ASM_TMP_REG \ + ", %[" __rseq_str(offset) "]]\n" + +#define RSEQ_ASM_OP_R_ADD(count) \ + " add " RSEQ_ASM_TMP_REG ", " RSEQ_ASM_TMP_REG \ + ", %[" __rseq_str(count) "]\n" + +#define RSEQ_ASM_OP_R_FINAL_STORE(var, post_commit_label) \ + " str " RSEQ_ASM_TMP_REG ", %[" __rseq_str(var) "]\n" \ + __rseq_str(post_commit_label) ":\n" + +#define RSEQ_ASM_OP_R_BAD_MEMCPY(dst, src, len) \ + " cbz %[" __rseq_str(len) "], 333f\n" \ + " mov " RSEQ_ASM_TMP_REG_2 ", %[" __rseq_str(len) "]\n" \ + "222: sub " RSEQ_ASM_TMP_REG_2 ", " RSEQ_ASM_TMP_REG_2 ", #1\n" \ + " ldrb " RSEQ_ASM_TMP_REG32 ", [%[" __rseq_str(src) "]" \ + ", " RSEQ_ASM_TMP_REG_2 "]\n" \ + " strb " RSEQ_ASM_TMP_REG32 ", [%[" __rseq_str(dst) "]" \ + ", " RSEQ_ASM_TMP_REG_2 "]\n" \ + " cbnz " RSEQ_ASM_TMP_REG_2 ", 222b\n" \ + "333:\n" + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f) + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2]) +#endif + RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) +#endif + RSEQ_ASM_OP_FINAL_STORE(newv, v, 3) + RSEQ_INJECT_ASM(5) + RSEQ_ASM_DEFINE_ABORT(4, abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + [v] "Qo" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "memory", RSEQ_ASM_TMP_REG + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, + long voffp, intptr_t *load, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f) + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2]) +#endif + RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + RSEQ_ASM_OP_CMPNE(v, expectnot, %l[cmpfail]) + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + RSEQ_ASM_OP_CMPNE(v, expectnot, %l[error2]) +#endif + RSEQ_ASM_OP_R_LOAD(v) + RSEQ_ASM_OP_R_STORE(load) + RSEQ_ASM_OP_R_LOAD_OFF(voffp) + RSEQ_ASM_OP_R_FINAL_STORE(v, 3) + RSEQ_INJECT_ASM(5) + RSEQ_ASM_DEFINE_ABORT(4, abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + [v] "Qo" (*v), + [expectnot] "r" (expectnot), + [load] "Qo" (*load), + [voffp] "r" (voffp) + RSEQ_INJECT_INPUT + : "memory", RSEQ_ASM_TMP_REG + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_addv(intptr_t *v, intptr_t count, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1]) +#endif + RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) +#endif + RSEQ_ASM_OP_R_LOAD(v) + RSEQ_ASM_OP_R_ADD(count) + RSEQ_ASM_OP_R_FINAL_STORE(v, 3) + RSEQ_INJECT_ASM(4) + RSEQ_ASM_DEFINE_ABORT(4, abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + [v] "Qo" (*v), + [count] "r" (count) + RSEQ_INJECT_INPUT + : "memory", RSEQ_ASM_TMP_REG + : abort +#ifdef RSEQ_COMPARE_TWICE + , error1 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f) + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2]) +#endif + RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) +#endif + RSEQ_ASM_OP_STORE(newv2, v2) + RSEQ_INJECT_ASM(5) + RSEQ_ASM_OP_FINAL_STORE(newv, v, 3) + RSEQ_INJECT_ASM(6) + RSEQ_ASM_DEFINE_ABORT(4, abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + [expect] "r" (expect), + [v] "Qo" (*v), + [newv] "r" (newv), + [v2] "Qo" (*v2), + [newv2] "r" (newv2) + RSEQ_INJECT_INPUT + : "memory", RSEQ_ASM_TMP_REG + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f) + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2]) +#endif + RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) +#endif + RSEQ_ASM_OP_STORE(newv2, v2) + RSEQ_INJECT_ASM(5) + RSEQ_ASM_OP_FINAL_STORE_RELEASE(newv, v, 3) + RSEQ_INJECT_ASM(6) + RSEQ_ASM_DEFINE_ABORT(4, abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + [expect] "r" (expect), + [v] "Qo" (*v), + [newv] "r" (newv), + [v2] "Qo" (*v2), + [newv2] "r" (newv2) + RSEQ_INJECT_INPUT + : "memory", RSEQ_ASM_TMP_REG + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t expect2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f) + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2]) + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error3]) +#endif + RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) + RSEQ_INJECT_ASM(4) + RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[cmpfail]) + RSEQ_INJECT_ASM(5) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) + RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[error3]) +#endif + RSEQ_ASM_OP_FINAL_STORE(newv, v, 3) + RSEQ_INJECT_ASM(6) + RSEQ_ASM_DEFINE_ABORT(4, abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + [v] "Qo" (*v), + [expect] "r" (expect), + [v2] "Qo" (*v2), + [expect2] "r" (expect2), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "memory", RSEQ_ASM_TMP_REG + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2, error3 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +error3: + rseq_after_asm_goto(); + rseq_bug("2nd expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f) + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2]) +#endif + RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) +#endif + RSEQ_ASM_OP_R_BAD_MEMCPY(dst, src, len) + RSEQ_INJECT_ASM(5) + RSEQ_ASM_OP_FINAL_STORE(newv, v, 3) + RSEQ_INJECT_ASM(6) + RSEQ_ASM_DEFINE_ABORT(4, abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + [expect] "r" (expect), + [v] "Qo" (*v), + [newv] "r" (newv), + [dst] "r" (dst), + [src] "r" (src), + [len] "r" (len) + RSEQ_INJECT_INPUT + : "memory", RSEQ_ASM_TMP_REG, RSEQ_ASM_TMP_REG_2 + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f) + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2]) +#endif + RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) +#endif + RSEQ_ASM_OP_R_BAD_MEMCPY(dst, src, len) + RSEQ_INJECT_ASM(5) + RSEQ_ASM_OP_FINAL_STORE_RELEASE(newv, v, 3) + RSEQ_INJECT_ASM(6) + RSEQ_ASM_DEFINE_ABORT(4, abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + [expect] "r" (expect), + [v] "Qo" (*v), + [newv] "r" (newv), + [dst] "r" (dst), + [src] "r" (src), + [len] "r" (len) + RSEQ_INJECT_INPUT + : "memory", RSEQ_ASM_TMP_REG, RSEQ_ASM_TMP_REG_2 + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +#endif /* !RSEQ_SKIP_FASTPATH */ diff --git a/tools/testing/selftests/rseq/rseq-generic-thread-pointer.h b/tools/testing/selftests/rseq/rseq-generic-thread-pointer.h new file mode 100644 index 000000000..38c584661 --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-generic-thread-pointer.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: LGPL-2.1-only OR MIT */ +/* + * rseq-generic-thread-pointer.h + * + * (C) Copyright 2021 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> + */ + +#ifndef _RSEQ_GENERIC_THREAD_POINTER +#define _RSEQ_GENERIC_THREAD_POINTER + +#ifdef __cplusplus +extern "C" { +#endif + +/* Use gcc builtin thread pointer. */ +static inline void *rseq_thread_pointer(void) +{ + return __builtin_thread_pointer(); +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/tools/testing/selftests/rseq/rseq-mips.h b/tools/testing/selftests/rseq/rseq-mips.h new file mode 100644 index 000000000..878739fae --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-mips.h @@ -0,0 +1,777 @@ +/* SPDX-License-Identifier: LGPL-2.1 OR MIT */ +/* + * Author: Paul Burton <paul.burton@mips.com> + * (C) Copyright 2018 MIPS Tech LLC + * + * Based on rseq-arm.h: + * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> + */ + +/* + * RSEQ_SIG uses the break instruction. The instruction pattern is: + * + * On MIPS: + * 0350000d break 0x350 + * + * On nanoMIPS: + * 00100350 break 0x350 + * + * On microMIPS: + * 0000d407 break 0x350 + * + * For nanoMIPS32 and microMIPS, the instruction stream is encoded as 16-bit + * halfwords, so the signature halfwords need to be swapped accordingly for + * little-endian. + */ +#if defined(__nanomips__) +# ifdef __MIPSEL__ +# define RSEQ_SIG 0x03500010 +# else +# define RSEQ_SIG 0x00100350 +# endif +#elif defined(__mips_micromips) +# ifdef __MIPSEL__ +# define RSEQ_SIG 0xd4070000 +# else +# define RSEQ_SIG 0x0000d407 +# endif +#elif defined(__mips__) +# define RSEQ_SIG 0x0350000d +#else +/* Unknown MIPS architecture. */ +#endif + +#define rseq_smp_mb() __asm__ __volatile__ ("sync" ::: "memory") +#define rseq_smp_rmb() rseq_smp_mb() +#define rseq_smp_wmb() rseq_smp_mb() + +#define rseq_smp_load_acquire(p) \ +__extension__ ({ \ + __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \ + rseq_smp_mb(); \ + ____p1; \ +}) + +#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb() + +#define rseq_smp_store_release(p, v) \ +do { \ + rseq_smp_mb(); \ + RSEQ_WRITE_ONCE(*p, v); \ +} while (0) + +#ifdef RSEQ_SKIP_FASTPATH +#include "rseq-skip.h" +#else /* !RSEQ_SKIP_FASTPATH */ + +#if _MIPS_SZLONG == 64 +# define LONG ".dword" +# define LONG_LA "dla" +# define LONG_L "ld" +# define LONG_S "sd" +# define LONG_ADDI "daddiu" +# define U32_U64_PAD(x) x +#elif _MIPS_SZLONG == 32 +# define LONG ".word" +# define LONG_LA "la" +# define LONG_L "lw" +# define LONG_S "sw" +# define LONG_ADDI "addiu" +# ifdef __BIG_ENDIAN +# define U32_U64_PAD(x) "0x0, " x +# else +# define U32_U64_PAD(x) x ", 0x0" +# endif +#else +# error unsupported _MIPS_SZLONG +#endif + +#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \ + post_commit_offset, abort_ip) \ + ".pushsection __rseq_cs, \"aw\"\n\t" \ + ".balign 32\n\t" \ + __rseq_str(label) ":\n\t" \ + ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \ + LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \ + LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \ + LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \ + ".popsection\n\t" \ + ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \ + LONG " " U32_U64_PAD(__rseq_str(label) "b") "\n\t" \ + ".popsection\n\t" + +#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \ + __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \ + (post_commit_ip - start_ip), abort_ip) + +/* + * Exit points of a rseq critical section consist of all instructions outside + * of the critical section where a critical section can either branch to or + * reach through the normal course of its execution. The abort IP and the + * post-commit IP are already part of the __rseq_cs section and should not be + * explicitly defined as additional exit points. Knowing all exit points is + * useful to assist debuggers stepping over the critical section. + */ +#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \ + ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \ + LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \ + LONG " " U32_U64_PAD(__rseq_str(exit_ip)) "\n\t" \ + ".popsection\n\t" + +#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \ + RSEQ_INJECT_ASM(1) \ + LONG_LA " $4, " __rseq_str(cs_label) "\n\t" \ + LONG_S " $4, %[" __rseq_str(rseq_cs) "]\n\t" \ + __rseq_str(label) ":\n\t" + +#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \ + RSEQ_INJECT_ASM(2) \ + "lw $4, %[" __rseq_str(current_cpu_id) "]\n\t" \ + "bne $4, %[" __rseq_str(cpu_id) "], " __rseq_str(label) "\n\t" + +#define __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \ + abort_label, version, flags, \ + start_ip, post_commit_offset, abort_ip) \ + ".balign 32\n\t" \ + __rseq_str(table_label) ":\n\t" \ + ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \ + LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \ + LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \ + LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \ + ".word " __rseq_str(RSEQ_SIG) "\n\t" \ + __rseq_str(label) ":\n\t" \ + teardown \ + "b %l[" __rseq_str(abort_label) "]\n\t" + +#define RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, abort_label, \ + start_ip, post_commit_ip, abort_ip) \ + __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \ + abort_label, 0x0, 0x0, start_ip, \ + (post_commit_ip - start_ip), abort_ip) + +#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \ + __rseq_str(label) ":\n\t" \ + teardown \ + "b %l[" __rseq_str(cmpfail_label) "]\n\t" + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], %l[error2]\n\t" +#endif + /* final store */ + LONG_S " %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(5) + "b 5f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) + "5:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "$4", "memory" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + return 0; +abort: + RSEQ_INJECT_FAILED + return -1; +cmpfail: + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_bug("cpu_id comparison failed"); +error2: + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, + long voffp, intptr_t *load, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + LONG_L " $4, %[v]\n\t" + "beq $4, %[expectnot], %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + LONG_L " $4, %[v]\n\t" + "beq $4, %[expectnot], %l[error2]\n\t" +#endif + LONG_S " $4, %[load]\n\t" + LONG_ADDI " $4, %[voffp]\n\t" + LONG_L " $4, 0($4)\n\t" + /* final store */ + LONG_S " $4, %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(5) + "b 5f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) + "5:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* final store input */ + [v] "m" (*v), + [expectnot] "r" (expectnot), + [voffp] "Ir" (voffp), + [load] "m" (*load) + RSEQ_INJECT_INPUT + : "$4", "memory" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + return 0; +abort: + RSEQ_INJECT_FAILED + return -1; +cmpfail: + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_bug("cpu_id comparison failed"); +error2: + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_addv(intptr_t *v, intptr_t count, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) +#endif + LONG_L " $4, %[v]\n\t" + LONG_ADDI " $4, %[count]\n\t" + /* final store */ + LONG_S " $4, %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(4) + "b 5f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) + "5:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + [v] "m" (*v), + [count] "Ir" (count) + RSEQ_INJECT_INPUT + : "$4", "memory" + RSEQ_INJECT_CLOBBER + : abort +#ifdef RSEQ_COMPARE_TWICE + , error1 +#endif + ); + return 0; +abort: + RSEQ_INJECT_FAILED + return -1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_bug("cpu_id comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], %l[error2]\n\t" +#endif + /* try store */ + LONG_S " %[newv2], %[v2]\n\t" + RSEQ_INJECT_ASM(5) + /* final store */ + LONG_S " %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + "b 5f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) + "5:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* try store input */ + [v2] "m" (*v2), + [newv2] "r" (newv2), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "$4", "memory" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + return 0; +abort: + RSEQ_INJECT_FAILED + return -1; +cmpfail: + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_bug("cpu_id comparison failed"); +error2: + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], %l[error2]\n\t" +#endif + /* try store */ + LONG_S " %[newv2], %[v2]\n\t" + RSEQ_INJECT_ASM(5) + "sync\n\t" /* full sync provides store-release */ + /* final store */ + LONG_S " %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + "b 5f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) + "5:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* try store input */ + [v2] "m" (*v2), + [newv2] "r" (newv2), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "$4", "memory" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + return 0; +abort: + RSEQ_INJECT_FAILED + return -1; +cmpfail: + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_bug("cpu_id comparison failed"); +error2: + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t expect2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) + LONG_L " $4, %[v2]\n\t" + "bne $4, %[expect2], %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(5) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], %l[error2]\n\t" + LONG_L " $4, %[v2]\n\t" + "bne $4, %[expect2], %l[error3]\n\t" +#endif + /* final store */ + LONG_S " %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + "b 5f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f) + "5:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* cmp2 input */ + [v2] "m" (*v2), + [expect2] "r" (expect2), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "$4", "memory" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2, error3 +#endif + ); + return 0; +abort: + RSEQ_INJECT_FAILED + return -1; +cmpfail: + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_bug("cpu_id comparison failed"); +error2: + rseq_bug("1st expected value comparison failed"); +error3: + rseq_bug("2nd expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + uintptr_t rseq_scratch[3]; + + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + LONG_S " %[src], %[rseq_scratch0]\n\t" + LONG_S " %[dst], %[rseq_scratch1]\n\t" + LONG_S " %[len], %[rseq_scratch2]\n\t" + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], 5f\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], 7f\n\t" +#endif + /* try memcpy */ + "beqz %[len], 333f\n\t" \ + "222:\n\t" \ + "lb $4, 0(%[src])\n\t" \ + "sb $4, 0(%[dst])\n\t" \ + LONG_ADDI " %[src], 1\n\t" \ + LONG_ADDI " %[dst], 1\n\t" \ + LONG_ADDI " %[len], -1\n\t" \ + "bnez %[len], 222b\n\t" \ + "333:\n\t" \ + RSEQ_INJECT_ASM(5) + /* final store */ + LONG_S " %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t" + "b 8f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + abort, 1b, 2b, 4f) + RSEQ_ASM_DEFINE_CMPFAIL(5, + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + cmpfail) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_CMPFAIL(6, + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + error1) + RSEQ_ASM_DEFINE_CMPFAIL(7, + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + error2) +#endif + "8:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv), + /* try memcpy input */ + [dst] "r" (dst), + [src] "r" (src), + [len] "r" (len), + [rseq_scratch0] "m" (rseq_scratch[0]), + [rseq_scratch1] "m" (rseq_scratch[1]), + [rseq_scratch2] "m" (rseq_scratch[2]) + RSEQ_INJECT_INPUT + : "$4", "memory" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + return 0; +abort: + RSEQ_INJECT_FAILED + return -1; +cmpfail: + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_bug("cpu_id comparison failed"); +error2: + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + uintptr_t rseq_scratch[3]; + + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + LONG_S " %[src], %[rseq_scratch0]\n\t" + LONG_S " %[dst], %[rseq_scratch1]\n\t" + LONG_S " %[len], %[rseq_scratch2]\n\t" + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], 5f\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f) + LONG_L " $4, %[v]\n\t" + "bne $4, %[expect], 7f\n\t" +#endif + /* try memcpy */ + "beqz %[len], 333f\n\t" \ + "222:\n\t" \ + "lb $4, 0(%[src])\n\t" \ + "sb $4, 0(%[dst])\n\t" \ + LONG_ADDI " %[src], 1\n\t" \ + LONG_ADDI " %[dst], 1\n\t" \ + LONG_ADDI " %[len], -1\n\t" \ + "bnez %[len], 222b\n\t" \ + "333:\n\t" \ + RSEQ_INJECT_ASM(5) + "sync\n\t" /* full sync provides store-release */ + /* final store */ + LONG_S " %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t" + "b 8f\n\t" + RSEQ_ASM_DEFINE_ABORT(3, 4, + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + abort, 1b, 2b, 4f) + RSEQ_ASM_DEFINE_CMPFAIL(5, + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + cmpfail) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_CMPFAIL(6, + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + error1) + RSEQ_ASM_DEFINE_CMPFAIL(7, + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + error2) +#endif + "8:\n\t" + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv), + /* try memcpy input */ + [dst] "r" (dst), + [src] "r" (src), + [len] "r" (len), + [rseq_scratch0] "m" (rseq_scratch[0]), + [rseq_scratch1] "m" (rseq_scratch[1]), + [rseq_scratch2] "m" (rseq_scratch[2]) + RSEQ_INJECT_INPUT + : "$4", "memory" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + return 0; +abort: + RSEQ_INJECT_FAILED + return -1; +cmpfail: + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_bug("cpu_id comparison failed"); +error2: + rseq_bug("expected value comparison failed"); +#endif +} + +#endif /* !RSEQ_SKIP_FASTPATH */ diff --git a/tools/testing/selftests/rseq/rseq-ppc-thread-pointer.h b/tools/testing/selftests/rseq/rseq-ppc-thread-pointer.h new file mode 100644 index 000000000..263eee84f --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-ppc-thread-pointer.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: LGPL-2.1-only OR MIT */ +/* + * rseq-ppc-thread-pointer.h + * + * (C) Copyright 2021 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> + */ + +#ifndef _RSEQ_PPC_THREAD_POINTER +#define _RSEQ_PPC_THREAD_POINTER + +#ifdef __cplusplus +extern "C" { +#endif + +static inline void *rseq_thread_pointer(void) +{ +#ifdef __powerpc64__ + register void *__result asm ("r13"); +#else + register void *__result asm ("r2"); +#endif + asm ("" : "=r" (__result)); + return __result; +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/tools/testing/selftests/rseq/rseq-ppc.h b/tools/testing/selftests/rseq/rseq-ppc.h new file mode 100644 index 000000000..bab8e0b9f --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-ppc.h @@ -0,0 +1,791 @@ +/* SPDX-License-Identifier: LGPL-2.1 OR MIT */ +/* + * rseq-ppc.h + * + * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> + * (C) Copyright 2016-2018 - Boqun Feng <boqun.feng@gmail.com> + */ + +/* + * RSEQ_SIG is used with the following trap instruction: + * + * powerpc-be: 0f e5 00 0b twui r5,11 + * powerpc64-le: 0b 00 e5 0f twui r5,11 + * powerpc64-be: 0f e5 00 0b twui r5,11 + */ + +#define RSEQ_SIG 0x0fe5000b + +#define rseq_smp_mb() __asm__ __volatile__ ("sync" ::: "memory", "cc") +#define rseq_smp_lwsync() __asm__ __volatile__ ("lwsync" ::: "memory", "cc") +#define rseq_smp_rmb() rseq_smp_lwsync() +#define rseq_smp_wmb() rseq_smp_lwsync() + +#define rseq_smp_load_acquire(p) \ +__extension__ ({ \ + __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \ + rseq_smp_lwsync(); \ + ____p1; \ +}) + +#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_lwsync() + +#define rseq_smp_store_release(p, v) \ +do { \ + rseq_smp_lwsync(); \ + RSEQ_WRITE_ONCE(*p, v); \ +} while (0) + +#ifdef RSEQ_SKIP_FASTPATH +#include "rseq-skip.h" +#else /* !RSEQ_SKIP_FASTPATH */ + +/* + * The __rseq_cs_ptr_array and __rseq_cs sections can be used by debuggers to + * better handle single-stepping through the restartable critical sections. + */ + +#ifdef __PPC64__ + +#define RSEQ_STORE_LONG(arg) "std%U[" __rseq_str(arg) "]%X[" __rseq_str(arg) "] " /* To memory ("m" constraint) */ +#define RSEQ_STORE_INT(arg) "stw%U[" __rseq_str(arg) "]%X[" __rseq_str(arg) "] " /* To memory ("m" constraint) */ +#define RSEQ_LOAD_LONG(arg) "ld%U[" __rseq_str(arg) "]%X[" __rseq_str(arg) "] " /* From memory ("m" constraint) */ +#define RSEQ_LOAD_INT(arg) "lwz%U[" __rseq_str(arg) "]%X[" __rseq_str(arg) "] " /* From memory ("m" constraint) */ +#define RSEQ_LOADX_LONG "ldx " /* From base register ("b" constraint) */ +#define RSEQ_CMP_LONG "cmpd " +#define RSEQ_CMP_LONG_INT "cmpdi " + +#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \ + start_ip, post_commit_offset, abort_ip) \ + ".pushsection __rseq_cs, \"aw\"\n\t" \ + ".balign 32\n\t" \ + __rseq_str(label) ":\n\t" \ + ".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \ + ".quad " __rseq_str(start_ip) ", " __rseq_str(post_commit_offset) ", " __rseq_str(abort_ip) "\n\t" \ + ".popsection\n\t" \ + ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \ + ".quad " __rseq_str(label) "b\n\t" \ + ".popsection\n\t" + +#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \ + RSEQ_INJECT_ASM(1) \ + "lis %%r17, (" __rseq_str(cs_label) ")@highest\n\t" \ + "ori %%r17, %%r17, (" __rseq_str(cs_label) ")@higher\n\t" \ + "rldicr %%r17, %%r17, 32, 31\n\t" \ + "oris %%r17, %%r17, (" __rseq_str(cs_label) ")@high\n\t" \ + "ori %%r17, %%r17, (" __rseq_str(cs_label) ")@l\n\t" \ + "std %%r17, %[" __rseq_str(rseq_cs) "]\n\t" \ + __rseq_str(label) ":\n\t" + +/* + * Exit points of a rseq critical section consist of all instructions outside + * of the critical section where a critical section can either branch to or + * reach through the normal course of its execution. The abort IP and the + * post-commit IP are already part of the __rseq_cs section and should not be + * explicitly defined as additional exit points. Knowing all exit points is + * useful to assist debuggers stepping over the critical section. + */ +#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \ + ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \ + ".quad " __rseq_str(start_ip) ", " __rseq_str(exit_ip) "\n\t" \ + ".popsection\n\t" + +#else /* #ifdef __PPC64__ */ + +#define RSEQ_STORE_LONG(arg) "stw%U[" __rseq_str(arg) "]%X[" __rseq_str(arg) "] " /* To memory ("m" constraint) */ +#define RSEQ_STORE_INT(arg) RSEQ_STORE_LONG(arg) /* To memory ("m" constraint) */ +#define RSEQ_LOAD_LONG(arg) "lwz%U[" __rseq_str(arg) "]%X[" __rseq_str(arg) "] " /* From memory ("m" constraint) */ +#define RSEQ_LOAD_INT(arg) RSEQ_LOAD_LONG(arg) /* From memory ("m" constraint) */ +#define RSEQ_LOADX_LONG "lwzx " /* From base register ("b" constraint) */ +#define RSEQ_CMP_LONG "cmpw " +#define RSEQ_CMP_LONG_INT "cmpwi " + +#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \ + start_ip, post_commit_offset, abort_ip) \ + ".pushsection __rseq_cs, \"aw\"\n\t" \ + ".balign 32\n\t" \ + __rseq_str(label) ":\n\t" \ + ".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \ + /* 32-bit only supported on BE */ \ + ".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) "\n\t" \ + ".popsection\n\t" \ + ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \ + ".long 0x0, " __rseq_str(label) "b\n\t" \ + ".popsection\n\t" + +/* + * Exit points of a rseq critical section consist of all instructions outside + * of the critical section where a critical section can either branch to or + * reach through the normal course of its execution. The abort IP and the + * post-commit IP are already part of the __rseq_cs section and should not be + * explicitly defined as additional exit points. Knowing all exit points is + * useful to assist debuggers stepping over the critical section. + */ +#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \ + ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \ + /* 32-bit only supported on BE */ \ + ".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(exit_ip) "\n\t" \ + ".popsection\n\t" + +#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \ + RSEQ_INJECT_ASM(1) \ + "lis %%r17, (" __rseq_str(cs_label) ")@ha\n\t" \ + "addi %%r17, %%r17, (" __rseq_str(cs_label) ")@l\n\t" \ + RSEQ_STORE_INT(rseq_cs) "%%r17, %[" __rseq_str(rseq_cs) "]\n\t" \ + __rseq_str(label) ":\n\t" + +#endif /* #ifdef __PPC64__ */ + +#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \ + __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \ + (post_commit_ip - start_ip), abort_ip) + +#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \ + RSEQ_INJECT_ASM(2) \ + RSEQ_LOAD_INT(current_cpu_id) "%%r17, %[" __rseq_str(current_cpu_id) "]\n\t" \ + "cmpw cr7, %[" __rseq_str(cpu_id) "], %%r17\n\t" \ + "bne- cr7, " __rseq_str(label) "\n\t" + +#define RSEQ_ASM_DEFINE_ABORT(label, abort_label) \ + ".pushsection __rseq_failure, \"ax\"\n\t" \ + ".long " __rseq_str(RSEQ_SIG) "\n\t" \ + __rseq_str(label) ":\n\t" \ + "b %l[" __rseq_str(abort_label) "]\n\t" \ + ".popsection\n\t" + +/* + * RSEQ_ASM_OPs: asm operations for rseq + * RSEQ_ASM_OP_R_*: has hard-code registers in it + * RSEQ_ASM_OP_* (else): doesn't have hard-code registers(unless cr7) + */ +#define RSEQ_ASM_OP_CMPEQ(var, expect, label) \ + RSEQ_LOAD_LONG(var) "%%r17, %[" __rseq_str(var) "]\n\t" \ + RSEQ_CMP_LONG "cr7, %%r17, %[" __rseq_str(expect) "]\n\t" \ + "bne- cr7, " __rseq_str(label) "\n\t" + +#define RSEQ_ASM_OP_CMPNE(var, expectnot, label) \ + RSEQ_LOAD_LONG(var) "%%r17, %[" __rseq_str(var) "]\n\t" \ + RSEQ_CMP_LONG "cr7, %%r17, %[" __rseq_str(expectnot) "]\n\t" \ + "beq- cr7, " __rseq_str(label) "\n\t" + +#define RSEQ_ASM_OP_STORE(value, var) \ + RSEQ_STORE_LONG(var) "%[" __rseq_str(value) "], %[" __rseq_str(var) "]\n\t" + +/* Load @var to r17 */ +#define RSEQ_ASM_OP_R_LOAD(var) \ + RSEQ_LOAD_LONG(var) "%%r17, %[" __rseq_str(var) "]\n\t" + +/* Store r17 to @var */ +#define RSEQ_ASM_OP_R_STORE(var) \ + RSEQ_STORE_LONG(var) "%%r17, %[" __rseq_str(var) "]\n\t" + +/* Add @count to r17 */ +#define RSEQ_ASM_OP_R_ADD(count) \ + "add %%r17, %[" __rseq_str(count) "], %%r17\n\t" + +/* Load (r17 + voffp) to r17 */ +#define RSEQ_ASM_OP_R_LOADX(voffp) \ + RSEQ_LOADX_LONG "%%r17, %[" __rseq_str(voffp) "], %%r17\n\t" + +/* TODO: implement a faster memcpy. */ +#define RSEQ_ASM_OP_R_MEMCPY() \ + RSEQ_CMP_LONG_INT "%%r19, 0\n\t" \ + "beq 333f\n\t" \ + "addi %%r20, %%r20, -1\n\t" \ + "addi %%r21, %%r21, -1\n\t" \ + "222:\n\t" \ + "lbzu %%r18, 1(%%r20)\n\t" \ + "stbu %%r18, 1(%%r21)\n\t" \ + "addi %%r19, %%r19, -1\n\t" \ + RSEQ_CMP_LONG_INT "%%r19, 0\n\t" \ + "bne 222b\n\t" \ + "333:\n\t" \ + +#define RSEQ_ASM_OP_R_FINAL_STORE(var, post_commit_label) \ + RSEQ_STORE_LONG(var) "%%r17, %[" __rseq_str(var) "]\n\t" \ + __rseq_str(post_commit_label) ":\n\t" + +#define RSEQ_ASM_OP_FINAL_STORE(value, var, post_commit_label) \ + RSEQ_STORE_LONG(var) "%[" __rseq_str(value) "], %[" __rseq_str(var) "]\n\t" \ + __rseq_str(post_commit_label) ":\n\t" + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs) + /* cmp cpuid */ + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + /* cmp @v equal to @expect */ + RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + /* cmp cpuid */ + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + /* cmp @v equal to @expect */ + RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) +#endif + /* final store */ + RSEQ_ASM_OP_FINAL_STORE(newv, v, 2) + RSEQ_INJECT_ASM(5) + RSEQ_ASM_DEFINE_ABORT(4, abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "memory", "cc", "r17" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, + long voffp, intptr_t *load, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs) + /* cmp cpuid */ + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + /* cmp @v not equal to @expectnot */ + RSEQ_ASM_OP_CMPNE(v, expectnot, %l[cmpfail]) + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + /* cmp cpuid */ + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + /* cmp @v not equal to @expectnot */ + RSEQ_ASM_OP_CMPNE(v, expectnot, %l[error2]) +#endif + /* load the value of @v */ + RSEQ_ASM_OP_R_LOAD(v) + /* store it in @load */ + RSEQ_ASM_OP_R_STORE(load) + /* dereference voffp(v) */ + RSEQ_ASM_OP_R_LOADX(voffp) + /* final store the value at voffp(v) */ + RSEQ_ASM_OP_R_FINAL_STORE(v, 2) + RSEQ_INJECT_ASM(5) + RSEQ_ASM_DEFINE_ABORT(4, abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* final store input */ + [v] "m" (*v), + [expectnot] "r" (expectnot), + [voffp] "b" (voffp), + [load] "m" (*load) + RSEQ_INJECT_INPUT + : "memory", "cc", "r17" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_addv(intptr_t *v, intptr_t count, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs) + /* cmp cpuid */ + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) +#ifdef RSEQ_COMPARE_TWICE + /* cmp cpuid */ + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) +#endif + /* load the value of @v */ + RSEQ_ASM_OP_R_LOAD(v) + /* add @count to it */ + RSEQ_ASM_OP_R_ADD(count) + /* final store */ + RSEQ_ASM_OP_R_FINAL_STORE(v, 2) + RSEQ_INJECT_ASM(4) + RSEQ_ASM_DEFINE_ABORT(4, abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* final store input */ + [v] "m" (*v), + [count] "r" (count) + RSEQ_INJECT_INPUT + : "memory", "cc", "r17" + RSEQ_INJECT_CLOBBER + : abort +#ifdef RSEQ_COMPARE_TWICE + , error1 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs) + /* cmp cpuid */ + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + /* cmp @v equal to @expect */ + RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + /* cmp cpuid */ + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + /* cmp @v equal to @expect */ + RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) +#endif + /* try store */ + RSEQ_ASM_OP_STORE(newv2, v2) + RSEQ_INJECT_ASM(5) + /* final store */ + RSEQ_ASM_OP_FINAL_STORE(newv, v, 2) + RSEQ_INJECT_ASM(6) + RSEQ_ASM_DEFINE_ABORT(4, abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* try store input */ + [v2] "m" (*v2), + [newv2] "r" (newv2), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "memory", "cc", "r17" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs) + /* cmp cpuid */ + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + /* cmp @v equal to @expect */ + RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + /* cmp cpuid */ + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + /* cmp @v equal to @expect */ + RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) +#endif + /* try store */ + RSEQ_ASM_OP_STORE(newv2, v2) + RSEQ_INJECT_ASM(5) + /* for 'release' */ + "lwsync\n\t" + /* final store */ + RSEQ_ASM_OP_FINAL_STORE(newv, v, 2) + RSEQ_INJECT_ASM(6) + RSEQ_ASM_DEFINE_ABORT(4, abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* try store input */ + [v2] "m" (*v2), + [newv2] "r" (newv2), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "memory", "cc", "r17" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t expect2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs) + /* cmp cpuid */ + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + /* cmp @v equal to @expect */ + RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) + RSEQ_INJECT_ASM(4) + /* cmp @v2 equal to @expct2 */ + RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[cmpfail]) + RSEQ_INJECT_ASM(5) +#ifdef RSEQ_COMPARE_TWICE + /* cmp cpuid */ + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + /* cmp @v equal to @expect */ + RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) + /* cmp @v2 equal to @expct2 */ + RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[error3]) +#endif + /* final store */ + RSEQ_ASM_OP_FINAL_STORE(newv, v, 2) + RSEQ_INJECT_ASM(6) + RSEQ_ASM_DEFINE_ABORT(4, abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* cmp2 input */ + [v2] "m" (*v2), + [expect2] "r" (expect2), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "memory", "cc", "r17" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2, error3 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("1st expected value comparison failed"); +error3: + rseq_after_asm_goto(); + rseq_bug("2nd expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* setup for mempcy */ + "mr %%r19, %[len]\n\t" + "mr %%r20, %[src]\n\t" + "mr %%r21, %[dst]\n\t" + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs) + /* cmp cpuid */ + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + /* cmp @v equal to @expect */ + RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + /* cmp cpuid */ + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + /* cmp @v equal to @expect */ + RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) +#endif + /* try memcpy */ + RSEQ_ASM_OP_R_MEMCPY() + RSEQ_INJECT_ASM(5) + /* final store */ + RSEQ_ASM_OP_FINAL_STORE(newv, v, 2) + RSEQ_INJECT_ASM(6) + /* teardown */ + RSEQ_ASM_DEFINE_ABORT(4, abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv), + /* try memcpy input */ + [dst] "r" (dst), + [src] "r" (src), + [len] "r" (len) + RSEQ_INJECT_INPUT + : "memory", "cc", "r17", "r18", "r19", "r20", "r21" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* setup for mempcy */ + "mr %%r19, %[len]\n\t" + "mr %%r20, %[src]\n\t" + "mr %%r21, %[dst]\n\t" + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs) + /* cmp cpuid */ + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + /* cmp @v equal to @expect */ + RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail]) + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + /* cmp cpuid */ + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + /* cmp @v equal to @expect */ + RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2]) +#endif + /* try memcpy */ + RSEQ_ASM_OP_R_MEMCPY() + RSEQ_INJECT_ASM(5) + /* for 'release' */ + "lwsync\n\t" + /* final store */ + RSEQ_ASM_OP_FINAL_STORE(newv, v, 2) + RSEQ_INJECT_ASM(6) + /* teardown */ + RSEQ_ASM_DEFINE_ABORT(4, abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv), + /* try memcpy input */ + [dst] "r" (dst), + [src] "r" (src), + [len] "r" (len) + RSEQ_INJECT_INPUT + : "memory", "cc", "r17", "r18", "r19", "r20", "r21" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +#endif /* !RSEQ_SKIP_FASTPATH */ diff --git a/tools/testing/selftests/rseq/rseq-s390.h b/tools/testing/selftests/rseq/rseq-s390.h new file mode 100644 index 000000000..4e6dc5f0c --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-s390.h @@ -0,0 +1,610 @@ +/* SPDX-License-Identifier: LGPL-2.1 OR MIT */ + +/* + * RSEQ_SIG uses the trap4 instruction. As Linux does not make use of the + * access-register mode nor the linkage stack this instruction will always + * cause a special-operation exception (the trap-enabled bit in the DUCT + * is and will stay 0). The instruction pattern is + * b2 ff 0f ff trap4 4095(%r0) + */ +#define RSEQ_SIG 0xB2FF0FFF + +#define rseq_smp_mb() __asm__ __volatile__ ("bcr 15,0" ::: "memory") +#define rseq_smp_rmb() rseq_smp_mb() +#define rseq_smp_wmb() rseq_smp_mb() + +#define rseq_smp_load_acquire(p) \ +__extension__ ({ \ + __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \ + rseq_barrier(); \ + ____p1; \ +}) + +#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb() + +#define rseq_smp_store_release(p, v) \ +do { \ + rseq_barrier(); \ + RSEQ_WRITE_ONCE(*p, v); \ +} while (0) + +#ifdef RSEQ_SKIP_FASTPATH +#include "rseq-skip.h" +#else /* !RSEQ_SKIP_FASTPATH */ + +#ifdef __s390x__ + +#define LONG_L "lg" +#define LONG_S "stg" +#define LONG_LT_R "ltgr" +#define LONG_CMP "cg" +#define LONG_CMP_R "cgr" +#define LONG_ADDI "aghi" +#define LONG_ADD_R "agr" + +#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \ + start_ip, post_commit_offset, abort_ip) \ + ".pushsection __rseq_cs, \"aw\"\n\t" \ + ".balign 32\n\t" \ + __rseq_str(label) ":\n\t" \ + ".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \ + ".quad " __rseq_str(start_ip) ", " __rseq_str(post_commit_offset) ", " __rseq_str(abort_ip) "\n\t" \ + ".popsection\n\t" \ + ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \ + ".quad " __rseq_str(label) "b\n\t" \ + ".popsection\n\t" + +/* + * Exit points of a rseq critical section consist of all instructions outside + * of the critical section where a critical section can either branch to or + * reach through the normal course of its execution. The abort IP and the + * post-commit IP are already part of the __rseq_cs section and should not be + * explicitly defined as additional exit points. Knowing all exit points is + * useful to assist debuggers stepping over the critical section. + */ +#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \ + ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \ + ".quad " __rseq_str(start_ip) ", " __rseq_str(exit_ip) "\n\t" \ + ".popsection\n\t" + +#elif __s390__ + +#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \ + start_ip, post_commit_offset, abort_ip) \ + ".pushsection __rseq_cs, \"aw\"\n\t" \ + ".balign 32\n\t" \ + __rseq_str(label) ":\n\t" \ + ".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \ + ".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) "\n\t" \ + ".popsection\n\t" \ + ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \ + ".long 0x0, " __rseq_str(label) "b\n\t" \ + ".popsection\n\t" + +/* + * Exit points of a rseq critical section consist of all instructions outside + * of the critical section where a critical section can either branch to or + * reach through the normal course of its execution. The abort IP and the + * post-commit IP are already part of the __rseq_cs section and should not be + * explicitly defined as additional exit points. Knowing all exit points is + * useful to assist debuggers stepping over the critical section. + */ +#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \ + ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \ + ".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(exit_ip) "\n\t" \ + ".popsection\n\t" + +#define LONG_L "l" +#define LONG_S "st" +#define LONG_LT_R "ltr" +#define LONG_CMP "c" +#define LONG_CMP_R "cr" +#define LONG_ADDI "ahi" +#define LONG_ADD_R "ar" + +#endif + +#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \ + __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \ + (post_commit_ip - start_ip), abort_ip) + +#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \ + RSEQ_INJECT_ASM(1) \ + "larl %%r0, " __rseq_str(cs_label) "\n\t" \ + LONG_S " %%r0, %[" __rseq_str(rseq_cs) "]\n\t" \ + __rseq_str(label) ":\n\t" + +#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \ + RSEQ_INJECT_ASM(2) \ + "c %[" __rseq_str(cpu_id) "], %[" __rseq_str(current_cpu_id) "]\n\t" \ + "jnz " __rseq_str(label) "\n\t" + +#define RSEQ_ASM_DEFINE_ABORT(label, teardown, abort_label) \ + ".pushsection __rseq_failure, \"ax\"\n\t" \ + ".long " __rseq_str(RSEQ_SIG) "\n\t" \ + __rseq_str(label) ":\n\t" \ + teardown \ + "jg %l[" __rseq_str(abort_label) "]\n\t" \ + ".popsection\n\t" + +#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \ + ".pushsection __rseq_failure, \"ax\"\n\t" \ + __rseq_str(label) ":\n\t" \ + teardown \ + "jg %l[" __rseq_str(cmpfail_label) "]\n\t" \ + ".popsection\n\t" + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + LONG_CMP " %[expect], %[v]\n\t" + "jnz %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + LONG_CMP " %[expect], %[v]\n\t" + "jnz %l[error2]\n\t" +#endif + /* final store */ + LONG_S " %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(5) + RSEQ_ASM_DEFINE_ABORT(4, "", abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "memory", "cc", "r0" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +/* + * Compare @v against @expectnot. When it does _not_ match, load @v + * into @load, and store the content of *@v + voffp into @v. + */ +static inline __attribute__((always_inline)) +int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, + long voffp, intptr_t *load, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + LONG_L " %%r1, %[v]\n\t" + LONG_CMP_R " %%r1, %[expectnot]\n\t" + "je %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + LONG_L " %%r1, %[v]\n\t" + LONG_CMP_R " %%r1, %[expectnot]\n\t" + "je %l[error2]\n\t" +#endif + LONG_S " %%r1, %[load]\n\t" + LONG_ADD_R " %%r1, %[voffp]\n\t" + LONG_L " %%r1, 0(%%r1)\n\t" + /* final store */ + LONG_S " %%r1, %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(5) + RSEQ_ASM_DEFINE_ABORT(4, "", abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* final store input */ + [v] "m" (*v), + [expectnot] "r" (expectnot), + [voffp] "r" (voffp), + [load] "m" (*load) + RSEQ_INJECT_INPUT + : "memory", "cc", "r0", "r1" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_addv(intptr_t *v, intptr_t count, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) +#endif + LONG_L " %%r0, %[v]\n\t" + LONG_ADD_R " %%r0, %[count]\n\t" + /* final store */ + LONG_S " %%r0, %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(4) + RSEQ_ASM_DEFINE_ABORT(4, "", abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* final store input */ + [v] "m" (*v), + [count] "r" (count) + RSEQ_INJECT_INPUT + : "memory", "cc", "r0" + RSEQ_INJECT_CLOBBER + : abort +#ifdef RSEQ_COMPARE_TWICE + , error1 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + LONG_CMP " %[expect], %[v]\n\t" + "jnz %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + LONG_CMP " %[expect], %[v]\n\t" + "jnz %l[error2]\n\t" +#endif + /* try store */ + LONG_S " %[newv2], %[v2]\n\t" + RSEQ_INJECT_ASM(5) + /* final store */ + LONG_S " %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + RSEQ_ASM_DEFINE_ABORT(4, "", abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* try store input */ + [v2] "m" (*v2), + [newv2] "r" (newv2), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "memory", "cc", "r0" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +/* s390 is TSO. */ +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + return rseq_cmpeqv_trystorev_storev(v, expect, v2, newv2, newv, cpu); +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t expect2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + LONG_CMP " %[expect], %[v]\n\t" + "jnz %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) + LONG_CMP " %[expect2], %[v2]\n\t" + "jnz %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(5) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1]) + LONG_CMP " %[expect], %[v]\n\t" + "jnz %l[error2]\n\t" + LONG_CMP " %[expect2], %[v2]\n\t" + "jnz %l[error3]\n\t" +#endif + /* final store */ + LONG_S " %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + RSEQ_ASM_DEFINE_ABORT(4, "", abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* cmp2 input */ + [v2] "m" (*v2), + [expect2] "r" (expect2), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + RSEQ_INJECT_INPUT + : "memory", "cc", "r0" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2, error3 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("1st expected value comparison failed"); +error3: + rseq_after_asm_goto(); + rseq_bug("2nd expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + uint64_t rseq_scratch[3]; + + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + LONG_S " %[src], %[rseq_scratch0]\n\t" + LONG_S " %[dst], %[rseq_scratch1]\n\t" + LONG_S " %[len], %[rseq_scratch2]\n\t" + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs) + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f) + RSEQ_INJECT_ASM(3) + LONG_CMP " %[expect], %[v]\n\t" + "jnz 5f\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f) + LONG_CMP " %[expect], %[v]\n\t" + "jnz 7f\n\t" +#endif + /* try memcpy */ + LONG_LT_R " %[len], %[len]\n\t" + "jz 333f\n\t" + "222:\n\t" + "ic %%r0,0(%[src])\n\t" + "stc %%r0,0(%[dst])\n\t" + LONG_ADDI " %[src], 1\n\t" + LONG_ADDI " %[dst], 1\n\t" + LONG_ADDI " %[len], -1\n\t" + "jnz 222b\n\t" + "333:\n\t" + RSEQ_INJECT_ASM(5) + /* final store */ + LONG_S " %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + /* teardown */ + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t" + RSEQ_ASM_DEFINE_ABORT(4, + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + abort) + RSEQ_ASM_DEFINE_CMPFAIL(5, + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + cmpfail) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_CMPFAIL(6, + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + error1) + RSEQ_ASM_DEFINE_CMPFAIL(7, + LONG_L " %[len], %[rseq_scratch2]\n\t" + LONG_L " %[dst], %[rseq_scratch1]\n\t" + LONG_L " %[src], %[rseq_scratch0]\n\t", + error2) +#endif + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [current_cpu_id] "m" (rseq_get_abi()->cpu_id), + [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv), + /* try memcpy input */ + [dst] "r" (dst), + [src] "r" (src), + [len] "r" (len), + [rseq_scratch0] "m" (rseq_scratch[0]), + [rseq_scratch1] "m" (rseq_scratch[1]), + [rseq_scratch2] "m" (rseq_scratch[2]) + RSEQ_INJECT_INPUT + : "memory", "cc", "r0" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +/* s390 is TSO. */ +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + return rseq_cmpeqv_trymemcpy_storev(v, expect, dst, src, len, + newv, cpu); +} +#endif /* !RSEQ_SKIP_FASTPATH */ diff --git a/tools/testing/selftests/rseq/rseq-skip.h b/tools/testing/selftests/rseq/rseq-skip.h new file mode 100644 index 000000000..7b53dac1f --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-skip.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: LGPL-2.1 OR MIT */ +/* + * rseq-skip.h + * + * (C) Copyright 2017-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> + */ + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) +{ + return -1; +} + +static inline __attribute__((always_inline)) +int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, + long voffp, intptr_t *load, int cpu) +{ + return -1; +} + +static inline __attribute__((always_inline)) +int rseq_addv(intptr_t *v, intptr_t count, int cpu) +{ + return -1; +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + return -1; +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + return -1; +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t expect2, + intptr_t newv, int cpu) +{ + return -1; +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + return -1; +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + return -1; +} diff --git a/tools/testing/selftests/rseq/rseq-thread-pointer.h b/tools/testing/selftests/rseq/rseq-thread-pointer.h new file mode 100644 index 000000000..977c25d75 --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-thread-pointer.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: LGPL-2.1-only OR MIT */ +/* + * rseq-thread-pointer.h + * + * (C) Copyright 2021 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> + */ + +#ifndef _RSEQ_THREAD_POINTER +#define _RSEQ_THREAD_POINTER + +#if defined(__x86_64__) || defined(__i386__) +#include "rseq-x86-thread-pointer.h" +#elif defined(__PPC__) +#include "rseq-ppc-thread-pointer.h" +#else +#include "rseq-generic-thread-pointer.h" +#endif + +#endif diff --git a/tools/testing/selftests/rseq/rseq-x86-thread-pointer.h b/tools/testing/selftests/rseq/rseq-x86-thread-pointer.h new file mode 100644 index 000000000..d3133587d --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-x86-thread-pointer.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: LGPL-2.1-only OR MIT */ +/* + * rseq-x86-thread-pointer.h + * + * (C) Copyright 2021 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> + */ + +#ifndef _RSEQ_X86_THREAD_POINTER +#define _RSEQ_X86_THREAD_POINTER + +#include <features.h> + +#ifdef __cplusplus +extern "C" { +#endif + +#if __GNUC_PREREQ (11, 1) +static inline void *rseq_thread_pointer(void) +{ + return __builtin_thread_pointer(); +} +#else +static inline void *rseq_thread_pointer(void) +{ + void *__result; + +# ifdef __x86_64__ + __asm__ ("mov %%fs:0, %0" : "=r" (__result)); +# else + __asm__ ("mov %%gs:0, %0" : "=r" (__result)); +# endif + return __result; +} +#endif /* !GCC 11 */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/tools/testing/selftests/rseq/rseq-x86.h b/tools/testing/selftests/rseq/rseq-x86.h new file mode 100644 index 000000000..bd01dc41c --- /dev/null +++ b/tools/testing/selftests/rseq/rseq-x86.h @@ -0,0 +1,1365 @@ +/* SPDX-License-Identifier: LGPL-2.1 OR MIT */ +/* + * rseq-x86.h + * + * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> + */ + +#include <stdint.h> + +/* + * RSEQ_SIG is used with the following reserved undefined instructions, which + * trap in user-space: + * + * x86-32: 0f b9 3d 53 30 05 53 ud1 0x53053053,%edi + * x86-64: 0f b9 3d 53 30 05 53 ud1 0x53053053(%rip),%edi + */ +#define RSEQ_SIG 0x53053053 + +/* + * Due to a compiler optimization bug in gcc-8 with asm goto and TLS asm input + * operands, we cannot use "m" input operands, and rather pass the __rseq_abi + * address through a "r" input operand. + */ + +/* Offset of cpu_id and rseq_cs fields in struct rseq. */ +#define RSEQ_CPU_ID_OFFSET 4 +#define RSEQ_CS_OFFSET 8 + +#ifdef __x86_64__ + +#define RSEQ_ASM_TP_SEGMENT %%fs + +#define rseq_smp_mb() \ + __asm__ __volatile__ ("lock; addl $0,-128(%%rsp)" ::: "memory", "cc") +#define rseq_smp_rmb() rseq_barrier() +#define rseq_smp_wmb() rseq_barrier() + +#define rseq_smp_load_acquire(p) \ +__extension__ ({ \ + __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \ + rseq_barrier(); \ + ____p1; \ +}) + +#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb() + +#define rseq_smp_store_release(p, v) \ +do { \ + rseq_barrier(); \ + RSEQ_WRITE_ONCE(*p, v); \ +} while (0) + +#ifdef RSEQ_SKIP_FASTPATH +#include "rseq-skip.h" +#else /* !RSEQ_SKIP_FASTPATH */ + +#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \ + start_ip, post_commit_offset, abort_ip) \ + ".pushsection __rseq_cs, \"aw\"\n\t" \ + ".balign 32\n\t" \ + __rseq_str(label) ":\n\t" \ + ".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \ + ".quad " __rseq_str(start_ip) ", " __rseq_str(post_commit_offset) ", " __rseq_str(abort_ip) "\n\t" \ + ".popsection\n\t" \ + ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \ + ".quad " __rseq_str(label) "b\n\t" \ + ".popsection\n\t" + + +#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \ + __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \ + (post_commit_ip - start_ip), abort_ip) + +/* + * Exit points of a rseq critical section consist of all instructions outside + * of the critical section where a critical section can either branch to or + * reach through the normal course of its execution. The abort IP and the + * post-commit IP are already part of the __rseq_cs section and should not be + * explicitly defined as additional exit points. Knowing all exit points is + * useful to assist debuggers stepping over the critical section. + */ +#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \ + ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \ + ".quad " __rseq_str(start_ip) ", " __rseq_str(exit_ip) "\n\t" \ + ".popsection\n\t" + +#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \ + RSEQ_INJECT_ASM(1) \ + "leaq " __rseq_str(cs_label) "(%%rip), %%rax\n\t" \ + "movq %%rax, " __rseq_str(rseq_cs) "\n\t" \ + __rseq_str(label) ":\n\t" + +#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \ + RSEQ_INJECT_ASM(2) \ + "cmpl %[" __rseq_str(cpu_id) "], " __rseq_str(current_cpu_id) "\n\t" \ + "jnz " __rseq_str(label) "\n\t" + +#define RSEQ_ASM_DEFINE_ABORT(label, teardown, abort_label) \ + ".pushsection __rseq_failure, \"ax\"\n\t" \ + /* Disassembler-friendly signature: ud1 <sig>(%rip),%edi. */ \ + ".byte 0x0f, 0xb9, 0x3d\n\t" \ + ".long " __rseq_str(RSEQ_SIG) "\n\t" \ + __rseq_str(label) ":\n\t" \ + teardown \ + "jmp %l[" __rseq_str(abort_label) "]\n\t" \ + ".popsection\n\t" + +#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \ + ".pushsection __rseq_failure, \"ax\"\n\t" \ + __rseq_str(label) ":\n\t" \ + teardown \ + "jmp %l[" __rseq_str(cmpfail_label) "]\n\t" \ + ".popsection\n\t" + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) + RSEQ_INJECT_ASM(3) + "cmpq %[v], %[expect]\n\t" + "jnz %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) + "cmpq %[v], %[expect]\n\t" + "jnz %l[error2]\n\t" +#endif + /* final store */ + "movq %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(5) + RSEQ_ASM_DEFINE_ABORT(4, "", abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [rseq_offset] "r" (rseq_offset), + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + : "memory", "cc", "rax" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +/* + * Compare @v against @expectnot. When it does _not_ match, load @v + * into @load, and store the content of *@v + voffp into @v. + */ +static inline __attribute__((always_inline)) +int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, + long voffp, intptr_t *load, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) + RSEQ_INJECT_ASM(3) + "movq %[v], %%rbx\n\t" + "cmpq %%rbx, %[expectnot]\n\t" + "je %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) + "movq %[v], %%rbx\n\t" + "cmpq %%rbx, %[expectnot]\n\t" + "je %l[error2]\n\t" +#endif + "movq %%rbx, %[load]\n\t" + "addq %[voffp], %%rbx\n\t" + "movq (%%rbx), %%rbx\n\t" + /* final store */ + "movq %%rbx, %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(5) + RSEQ_ASM_DEFINE_ABORT(4, "", abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [rseq_offset] "r" (rseq_offset), + /* final store input */ + [v] "m" (*v), + [expectnot] "r" (expectnot), + [voffp] "er" (voffp), + [load] "m" (*load) + : "memory", "cc", "rax", "rbx" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_addv(intptr_t *v, intptr_t count, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) + RSEQ_INJECT_ASM(3) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) +#endif + /* final store */ + "addq %[count], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(4) + RSEQ_ASM_DEFINE_ABORT(4, "", abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [rseq_offset] "r" (rseq_offset), + /* final store input */ + [v] "m" (*v), + [count] "er" (count) + : "memory", "cc", "rax" + RSEQ_INJECT_CLOBBER + : abort +#ifdef RSEQ_COMPARE_TWICE + , error1 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +#endif +} + +#define RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV + +/* + * pval = *(ptr+off) + * *pval += inc; + */ +static inline __attribute__((always_inline)) +int rseq_offset_deref_addv(intptr_t *ptr, long off, intptr_t inc, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) + RSEQ_INJECT_ASM(3) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) +#endif + /* get p+v */ + "movq %[ptr], %%rbx\n\t" + "addq %[off], %%rbx\n\t" + /* get pv */ + "movq (%%rbx), %%rcx\n\t" + /* *pv += inc */ + "addq %[inc], (%%rcx)\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(4) + RSEQ_ASM_DEFINE_ABORT(4, "", abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [rseq_offset] "r" (rseq_offset), + /* final store input */ + [ptr] "m" (*ptr), + [off] "er" (off), + [inc] "er" (inc) + : "memory", "cc", "rax", "rbx", "rcx" + RSEQ_INJECT_CLOBBER + : abort +#ifdef RSEQ_COMPARE_TWICE + , error1 +#endif + ); + return 0; +abort: + RSEQ_INJECT_FAILED + return -1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_bug("cpu_id comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) + RSEQ_INJECT_ASM(3) + "cmpq %[v], %[expect]\n\t" + "jnz %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) + "cmpq %[v], %[expect]\n\t" + "jnz %l[error2]\n\t" +#endif + /* try store */ + "movq %[newv2], %[v2]\n\t" + RSEQ_INJECT_ASM(5) + /* final store */ + "movq %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + RSEQ_ASM_DEFINE_ABORT(4, "", abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [rseq_offset] "r" (rseq_offset), + /* try store input */ + [v2] "m" (*v2), + [newv2] "r" (newv2), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + : "memory", "cc", "rax" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +/* x86-64 is TSO. */ +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + return rseq_cmpeqv_trystorev_storev(v, expect, v2, newv2, newv, cpu); +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t expect2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) + RSEQ_INJECT_ASM(3) + "cmpq %[v], %[expect]\n\t" + "jnz %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) + "cmpq %[v2], %[expect2]\n\t" + "jnz %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(5) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) + "cmpq %[v], %[expect]\n\t" + "jnz %l[error2]\n\t" + "cmpq %[v2], %[expect2]\n\t" + "jnz %l[error3]\n\t" +#endif + /* final store */ + "movq %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + RSEQ_ASM_DEFINE_ABORT(4, "", abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [rseq_offset] "r" (rseq_offset), + /* cmp2 input */ + [v2] "m" (*v2), + [expect2] "r" (expect2), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + : "memory", "cc", "rax" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2, error3 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("1st expected value comparison failed"); +error3: + rseq_after_asm_goto(); + rseq_bug("2nd expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + uint64_t rseq_scratch[3]; + + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + "movq %[src], %[rseq_scratch0]\n\t" + "movq %[dst], %[rseq_scratch1]\n\t" + "movq %[len], %[rseq_scratch2]\n\t" + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) + RSEQ_INJECT_ASM(3) + "cmpq %[v], %[expect]\n\t" + "jnz 5f\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 6f) + "cmpq %[v], %[expect]\n\t" + "jnz 7f\n\t" +#endif + /* try memcpy */ + "test %[len], %[len]\n\t" \ + "jz 333f\n\t" \ + "222:\n\t" \ + "movb (%[src]), %%al\n\t" \ + "movb %%al, (%[dst])\n\t" \ + "inc %[src]\n\t" \ + "inc %[dst]\n\t" \ + "dec %[len]\n\t" \ + "jnz 222b\n\t" \ + "333:\n\t" \ + RSEQ_INJECT_ASM(5) + /* final store */ + "movq %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + /* teardown */ + "movq %[rseq_scratch2], %[len]\n\t" + "movq %[rseq_scratch1], %[dst]\n\t" + "movq %[rseq_scratch0], %[src]\n\t" + RSEQ_ASM_DEFINE_ABORT(4, + "movq %[rseq_scratch2], %[len]\n\t" + "movq %[rseq_scratch1], %[dst]\n\t" + "movq %[rseq_scratch0], %[src]\n\t", + abort) + RSEQ_ASM_DEFINE_CMPFAIL(5, + "movq %[rseq_scratch2], %[len]\n\t" + "movq %[rseq_scratch1], %[dst]\n\t" + "movq %[rseq_scratch0], %[src]\n\t", + cmpfail) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_CMPFAIL(6, + "movq %[rseq_scratch2], %[len]\n\t" + "movq %[rseq_scratch1], %[dst]\n\t" + "movq %[rseq_scratch0], %[src]\n\t", + error1) + RSEQ_ASM_DEFINE_CMPFAIL(7, + "movq %[rseq_scratch2], %[len]\n\t" + "movq %[rseq_scratch1], %[dst]\n\t" + "movq %[rseq_scratch0], %[src]\n\t", + error2) +#endif + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [rseq_offset] "r" (rseq_offset), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv), + /* try memcpy input */ + [dst] "r" (dst), + [src] "r" (src), + [len] "r" (len), + [rseq_scratch0] "m" (rseq_scratch[0]), + [rseq_scratch1] "m" (rseq_scratch[1]), + [rseq_scratch2] "m" (rseq_scratch[2]) + : "memory", "cc", "rax" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +/* x86-64 is TSO. */ +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + return rseq_cmpeqv_trymemcpy_storev(v, expect, dst, src, len, + newv, cpu); +} + +#endif /* !RSEQ_SKIP_FASTPATH */ + +#elif defined(__i386__) + +#define RSEQ_ASM_TP_SEGMENT %%gs + +#define rseq_smp_mb() \ + __asm__ __volatile__ ("lock; addl $0,-128(%%esp)" ::: "memory", "cc") +#define rseq_smp_rmb() \ + __asm__ __volatile__ ("lock; addl $0,-128(%%esp)" ::: "memory", "cc") +#define rseq_smp_wmb() \ + __asm__ __volatile__ ("lock; addl $0,-128(%%esp)" ::: "memory", "cc") + +#define rseq_smp_load_acquire(p) \ +__extension__ ({ \ + __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \ + rseq_smp_mb(); \ + ____p1; \ +}) + +#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb() + +#define rseq_smp_store_release(p, v) \ +do { \ + rseq_smp_mb(); \ + RSEQ_WRITE_ONCE(*p, v); \ +} while (0) + +#ifdef RSEQ_SKIP_FASTPATH +#include "rseq-skip.h" +#else /* !RSEQ_SKIP_FASTPATH */ + +/* + * Use eax as scratch register and take memory operands as input to + * lessen register pressure. Especially needed when compiling in O0. + */ +#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \ + start_ip, post_commit_offset, abort_ip) \ + ".pushsection __rseq_cs, \"aw\"\n\t" \ + ".balign 32\n\t" \ + __rseq_str(label) ":\n\t" \ + ".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \ + ".long " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \ + ".popsection\n\t" \ + ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \ + ".long " __rseq_str(label) "b, 0x0\n\t" \ + ".popsection\n\t" + +#define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \ + __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \ + (post_commit_ip - start_ip), abort_ip) + +/* + * Exit points of a rseq critical section consist of all instructions outside + * of the critical section where a critical section can either branch to or + * reach through the normal course of its execution. The abort IP and the + * post-commit IP are already part of the __rseq_cs section and should not be + * explicitly defined as additional exit points. Knowing all exit points is + * useful to assist debuggers stepping over the critical section. + */ +#define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \ + ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \ + ".long " __rseq_str(start_ip) ", 0x0, " __rseq_str(exit_ip) ", 0x0\n\t" \ + ".popsection\n\t" + +#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \ + RSEQ_INJECT_ASM(1) \ + "movl $" __rseq_str(cs_label) ", " __rseq_str(rseq_cs) "\n\t" \ + __rseq_str(label) ":\n\t" + +#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \ + RSEQ_INJECT_ASM(2) \ + "cmpl %[" __rseq_str(cpu_id) "], " __rseq_str(current_cpu_id) "\n\t" \ + "jnz " __rseq_str(label) "\n\t" + +#define RSEQ_ASM_DEFINE_ABORT(label, teardown, abort_label) \ + ".pushsection __rseq_failure, \"ax\"\n\t" \ + /* Disassembler-friendly signature: ud1 <sig>,%edi. */ \ + ".byte 0x0f, 0xb9, 0x3d\n\t" \ + ".long " __rseq_str(RSEQ_SIG) "\n\t" \ + __rseq_str(label) ":\n\t" \ + teardown \ + "jmp %l[" __rseq_str(abort_label) "]\n\t" \ + ".popsection\n\t" + +#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \ + ".pushsection __rseq_failure, \"ax\"\n\t" \ + __rseq_str(label) ":\n\t" \ + teardown \ + "jmp %l[" __rseq_str(cmpfail_label) "]\n\t" \ + ".popsection\n\t" + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) + RSEQ_INJECT_ASM(3) + "cmpl %[v], %[expect]\n\t" + "jnz %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) + "cmpl %[v], %[expect]\n\t" + "jnz %l[error2]\n\t" +#endif + /* final store */ + "movl %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(5) + RSEQ_ASM_DEFINE_ABORT(4, "", abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [rseq_offset] "r" (rseq_offset), + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + : "memory", "cc", "eax" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +/* + * Compare @v against @expectnot. When it does _not_ match, load @v + * into @load, and store the content of *@v + voffp into @v. + */ +static inline __attribute__((always_inline)) +int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot, + long voffp, intptr_t *load, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) + RSEQ_INJECT_ASM(3) + "movl %[v], %%ebx\n\t" + "cmpl %%ebx, %[expectnot]\n\t" + "je %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) + "movl %[v], %%ebx\n\t" + "cmpl %%ebx, %[expectnot]\n\t" + "je %l[error2]\n\t" +#endif + "movl %%ebx, %[load]\n\t" + "addl %[voffp], %%ebx\n\t" + "movl (%%ebx), %%ebx\n\t" + /* final store */ + "movl %%ebx, %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(5) + RSEQ_ASM_DEFINE_ABORT(4, "", abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [rseq_offset] "r" (rseq_offset), + /* final store input */ + [v] "m" (*v), + [expectnot] "r" (expectnot), + [voffp] "ir" (voffp), + [load] "m" (*load) + : "memory", "cc", "eax", "ebx" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_addv(intptr_t *v, intptr_t count, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) + RSEQ_INJECT_ASM(3) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) +#endif + /* final store */ + "addl %[count], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(4) + RSEQ_ASM_DEFINE_ABORT(4, "", abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [rseq_offset] "r" (rseq_offset), + /* final store input */ + [v] "m" (*v), + [count] "ir" (count) + : "memory", "cc", "eax" + RSEQ_INJECT_CLOBBER + : abort +#ifdef RSEQ_COMPARE_TWICE + , error1 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) + RSEQ_INJECT_ASM(3) + "cmpl %[v], %[expect]\n\t" + "jnz %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) + "cmpl %[v], %[expect]\n\t" + "jnz %l[error2]\n\t" +#endif + /* try store */ + "movl %[newv2], %%eax\n\t" + "movl %%eax, %[v2]\n\t" + RSEQ_INJECT_ASM(5) + /* final store */ + "movl %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + RSEQ_ASM_DEFINE_ABORT(4, "", abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [rseq_offset] "r" (rseq_offset), + /* try store input */ + [v2] "m" (*v2), + [newv2] "m" (newv2), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "r" (newv) + : "memory", "cc", "eax" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t newv2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) + RSEQ_INJECT_ASM(3) + "movl %[expect], %%eax\n\t" + "cmpl %[v], %%eax\n\t" + "jnz %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) + "movl %[expect], %%eax\n\t" + "cmpl %[v], %%eax\n\t" + "jnz %l[error2]\n\t" +#endif + /* try store */ + "movl %[newv2], %[v2]\n\t" + RSEQ_INJECT_ASM(5) + "lock; addl $0,-128(%%esp)\n\t" + /* final store */ + "movl %[newv], %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + RSEQ_ASM_DEFINE_ABORT(4, "", abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [rseq_offset] "r" (rseq_offset), + /* try store input */ + [v2] "m" (*v2), + [newv2] "r" (newv2), + /* final store input */ + [v] "m" (*v), + [expect] "m" (expect), + [newv] "r" (newv) + : "memory", "cc", "eax" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif + +} + +static inline __attribute__((always_inline)) +int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect, + intptr_t *v2, intptr_t expect2, + intptr_t newv, int cpu) +{ + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3]) +#endif + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) + RSEQ_INJECT_ASM(3) + "cmpl %[v], %[expect]\n\t" + "jnz %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(4) + "cmpl %[expect2], %[v2]\n\t" + "jnz %l[cmpfail]\n\t" + RSEQ_INJECT_ASM(5) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1]) + "cmpl %[v], %[expect]\n\t" + "jnz %l[error2]\n\t" + "cmpl %[expect2], %[v2]\n\t" + "jnz %l[error3]\n\t" +#endif + "movl %[newv], %%eax\n\t" + /* final store */ + "movl %%eax, %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + RSEQ_ASM_DEFINE_ABORT(4, "", abort) + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [rseq_offset] "r" (rseq_offset), + /* cmp2 input */ + [v2] "m" (*v2), + [expect2] "r" (expect2), + /* final store input */ + [v] "m" (*v), + [expect] "r" (expect), + [newv] "m" (newv) + : "memory", "cc", "eax" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2, error3 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("1st expected value comparison failed"); +error3: + rseq_after_asm_goto(); + rseq_bug("2nd expected value comparison failed"); +#endif +} + +/* TODO: implement a faster memcpy. */ +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + uint32_t rseq_scratch[3]; + + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + "movl %[src], %[rseq_scratch0]\n\t" + "movl %[dst], %[rseq_scratch1]\n\t" + "movl %[len], %[rseq_scratch2]\n\t" + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) + RSEQ_INJECT_ASM(3) + "movl %[expect], %%eax\n\t" + "cmpl %%eax, %[v]\n\t" + "jnz 5f\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 6f) + "movl %[expect], %%eax\n\t" + "cmpl %%eax, %[v]\n\t" + "jnz 7f\n\t" +#endif + /* try memcpy */ + "test %[len], %[len]\n\t" \ + "jz 333f\n\t" \ + "222:\n\t" \ + "movb (%[src]), %%al\n\t" \ + "movb %%al, (%[dst])\n\t" \ + "inc %[src]\n\t" \ + "inc %[dst]\n\t" \ + "dec %[len]\n\t" \ + "jnz 222b\n\t" \ + "333:\n\t" \ + RSEQ_INJECT_ASM(5) + "movl %[newv], %%eax\n\t" + /* final store */ + "movl %%eax, %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + /* teardown */ + "movl %[rseq_scratch2], %[len]\n\t" + "movl %[rseq_scratch1], %[dst]\n\t" + "movl %[rseq_scratch0], %[src]\n\t" + RSEQ_ASM_DEFINE_ABORT(4, + "movl %[rseq_scratch2], %[len]\n\t" + "movl %[rseq_scratch1], %[dst]\n\t" + "movl %[rseq_scratch0], %[src]\n\t", + abort) + RSEQ_ASM_DEFINE_CMPFAIL(5, + "movl %[rseq_scratch2], %[len]\n\t" + "movl %[rseq_scratch1], %[dst]\n\t" + "movl %[rseq_scratch0], %[src]\n\t", + cmpfail) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_CMPFAIL(6, + "movl %[rseq_scratch2], %[len]\n\t" + "movl %[rseq_scratch1], %[dst]\n\t" + "movl %[rseq_scratch0], %[src]\n\t", + error1) + RSEQ_ASM_DEFINE_CMPFAIL(7, + "movl %[rseq_scratch2], %[len]\n\t" + "movl %[rseq_scratch1], %[dst]\n\t" + "movl %[rseq_scratch0], %[src]\n\t", + error2) +#endif + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [rseq_offset] "r" (rseq_offset), + /* final store input */ + [v] "m" (*v), + [expect] "m" (expect), + [newv] "m" (newv), + /* try memcpy input */ + [dst] "r" (dst), + [src] "r" (src), + [len] "r" (len), + [rseq_scratch0] "m" (rseq_scratch[0]), + [rseq_scratch1] "m" (rseq_scratch[1]), + [rseq_scratch2] "m" (rseq_scratch[2]) + : "memory", "cc", "eax" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +/* TODO: implement a faster memcpy. */ +static inline __attribute__((always_inline)) +int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect, + void *dst, void *src, size_t len, + intptr_t newv, int cpu) +{ + uint32_t rseq_scratch[3]; + + RSEQ_INJECT_C(9) + + __asm__ __volatile__ goto ( + RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */ + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail]) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1]) + RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2]) +#endif + "movl %[src], %[rseq_scratch0]\n\t" + "movl %[dst], %[rseq_scratch1]\n\t" + "movl %[len], %[rseq_scratch2]\n\t" + /* Start rseq by storing table entry pointer into rseq_cs. */ + RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset])) + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f) + RSEQ_INJECT_ASM(3) + "movl %[expect], %%eax\n\t" + "cmpl %%eax, %[v]\n\t" + "jnz 5f\n\t" + RSEQ_INJECT_ASM(4) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 6f) + "movl %[expect], %%eax\n\t" + "cmpl %%eax, %[v]\n\t" + "jnz 7f\n\t" +#endif + /* try memcpy */ + "test %[len], %[len]\n\t" \ + "jz 333f\n\t" \ + "222:\n\t" \ + "movb (%[src]), %%al\n\t" \ + "movb %%al, (%[dst])\n\t" \ + "inc %[src]\n\t" \ + "inc %[dst]\n\t" \ + "dec %[len]\n\t" \ + "jnz 222b\n\t" \ + "333:\n\t" \ + RSEQ_INJECT_ASM(5) + "lock; addl $0,-128(%%esp)\n\t" + "movl %[newv], %%eax\n\t" + /* final store */ + "movl %%eax, %[v]\n\t" + "2:\n\t" + RSEQ_INJECT_ASM(6) + /* teardown */ + "movl %[rseq_scratch2], %[len]\n\t" + "movl %[rseq_scratch1], %[dst]\n\t" + "movl %[rseq_scratch0], %[src]\n\t" + RSEQ_ASM_DEFINE_ABORT(4, + "movl %[rseq_scratch2], %[len]\n\t" + "movl %[rseq_scratch1], %[dst]\n\t" + "movl %[rseq_scratch0], %[src]\n\t", + abort) + RSEQ_ASM_DEFINE_CMPFAIL(5, + "movl %[rseq_scratch2], %[len]\n\t" + "movl %[rseq_scratch1], %[dst]\n\t" + "movl %[rseq_scratch0], %[src]\n\t", + cmpfail) +#ifdef RSEQ_COMPARE_TWICE + RSEQ_ASM_DEFINE_CMPFAIL(6, + "movl %[rseq_scratch2], %[len]\n\t" + "movl %[rseq_scratch1], %[dst]\n\t" + "movl %[rseq_scratch0], %[src]\n\t", + error1) + RSEQ_ASM_DEFINE_CMPFAIL(7, + "movl %[rseq_scratch2], %[len]\n\t" + "movl %[rseq_scratch1], %[dst]\n\t" + "movl %[rseq_scratch0], %[src]\n\t", + error2) +#endif + : /* gcc asm goto does not allow outputs */ + : [cpu_id] "r" (cpu), + [rseq_offset] "r" (rseq_offset), + /* final store input */ + [v] "m" (*v), + [expect] "m" (expect), + [newv] "m" (newv), + /* try memcpy input */ + [dst] "r" (dst), + [src] "r" (src), + [len] "r" (len), + [rseq_scratch0] "m" (rseq_scratch[0]), + [rseq_scratch1] "m" (rseq_scratch[1]), + [rseq_scratch2] "m" (rseq_scratch[2]) + : "memory", "cc", "eax" + RSEQ_INJECT_CLOBBER + : abort, cmpfail +#ifdef RSEQ_COMPARE_TWICE + , error1, error2 +#endif + ); + rseq_after_asm_goto(); + return 0; +abort: + rseq_after_asm_goto(); + RSEQ_INJECT_FAILED + return -1; +cmpfail: + rseq_after_asm_goto(); + return 1; +#ifdef RSEQ_COMPARE_TWICE +error1: + rseq_after_asm_goto(); + rseq_bug("cpu_id comparison failed"); +error2: + rseq_after_asm_goto(); + rseq_bug("expected value comparison failed"); +#endif +} + +#endif /* !RSEQ_SKIP_FASTPATH */ + +#endif diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c new file mode 100644 index 000000000..e20191fb4 --- /dev/null +++ b/tools/testing/selftests/rseq/rseq.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: LGPL-2.1 +/* + * rseq.c + * + * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + */ + +#define _GNU_SOURCE +#include <errno.h> +#include <sched.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <syscall.h> +#include <assert.h> +#include <signal.h> +#include <limits.h> +#include <dlfcn.h> +#include <stddef.h> + +#include <linux/compiler.h> + +#include "../kselftest.h" +#include "rseq.h" + +/* + * Define weak versions to play nice with binaries that are statically linked + * against a libc that doesn't support registering its own rseq. + */ +__weak ptrdiff_t __rseq_offset; +__weak unsigned int __rseq_size; +__weak unsigned int __rseq_flags; + +static const ptrdiff_t *libc_rseq_offset_p = &__rseq_offset; +static const unsigned int *libc_rseq_size_p = &__rseq_size; +static const unsigned int *libc_rseq_flags_p = &__rseq_flags; + +/* Offset from the thread pointer to the rseq area. */ +ptrdiff_t rseq_offset; + +/* Size of the registered rseq area. 0 if the registration was + unsuccessful. */ +unsigned int rseq_size = -1U; + +/* Flags used during rseq registration. */ +unsigned int rseq_flags; + +static int rseq_ownership; + +static +__thread struct rseq_abi __rseq_abi __attribute__((tls_model("initial-exec"))) = { + .cpu_id = RSEQ_ABI_CPU_ID_UNINITIALIZED, +}; + +static int sys_rseq(struct rseq_abi *rseq_abi, uint32_t rseq_len, + int flags, uint32_t sig) +{ + return syscall(__NR_rseq, rseq_abi, rseq_len, flags, sig); +} + +int rseq_available(void) +{ + int rc; + + rc = sys_rseq(NULL, 0, 0, 0); + if (rc != -1) + abort(); + switch (errno) { + case ENOSYS: + return 0; + case EINVAL: + return 1; + default: + abort(); + } +} + +int rseq_register_current_thread(void) +{ + int rc; + + if (!rseq_ownership) { + /* Treat libc's ownership as a successful registration. */ + return 0; + } + rc = sys_rseq(&__rseq_abi, sizeof(struct rseq_abi), 0, RSEQ_SIG); + if (rc) + return -1; + assert(rseq_current_cpu_raw() >= 0); + return 0; +} + +int rseq_unregister_current_thread(void) +{ + int rc; + + if (!rseq_ownership) { + /* Treat libc's ownership as a successful unregistration. */ + return 0; + } + rc = sys_rseq(&__rseq_abi, sizeof(struct rseq_abi), RSEQ_ABI_FLAG_UNREGISTER, RSEQ_SIG); + if (rc) + return -1; + return 0; +} + +static __attribute__((constructor)) +void rseq_init(void) +{ + /* + * If the libc's registered rseq size isn't already valid, it may be + * because the binary is dynamically linked and not necessarily due to + * libc not having registered a restartable sequence. Try to find the + * symbols if that's the case. + */ + if (!*libc_rseq_size_p) { + libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset"); + libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size"); + libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags"); + } + if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p && + *libc_rseq_size_p != 0) { + /* rseq registration owned by glibc */ + rseq_offset = *libc_rseq_offset_p; + rseq_size = *libc_rseq_size_p; + rseq_flags = *libc_rseq_flags_p; + return; + } + if (!rseq_available()) + return; + rseq_ownership = 1; + rseq_offset = (void *)&__rseq_abi - rseq_thread_pointer(); + rseq_size = sizeof(struct rseq_abi); + rseq_flags = 0; +} + +static __attribute__((destructor)) +void rseq_exit(void) +{ + if (!rseq_ownership) + return; + rseq_offset = 0; + rseq_size = -1U; + rseq_ownership = 0; +} + +int32_t rseq_fallback_current_cpu(void) +{ + int32_t cpu; + + cpu = sched_getcpu(); + if (cpu < 0) { + perror("sched_getcpu()"); + abort(); + } + return cpu; +} diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h new file mode 100644 index 000000000..9d850b290 --- /dev/null +++ b/tools/testing/selftests/rseq/rseq.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: LGPL-2.1 OR MIT */ +/* + * rseq.h + * + * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> + */ + +#ifndef RSEQ_H +#define RSEQ_H + +#include <stdint.h> +#include <stdbool.h> +#include <pthread.h> +#include <signal.h> +#include <sched.h> +#include <errno.h> +#include <stdio.h> +#include <stdlib.h> +#include <stddef.h> +#include "rseq-abi.h" +#include "compiler.h" + +/* + * Empty code injection macros, override when testing. + * It is important to consider that the ASM injection macros need to be + * fully reentrant (e.g. do not modify the stack). + */ +#ifndef RSEQ_INJECT_ASM +#define RSEQ_INJECT_ASM(n) +#endif + +#ifndef RSEQ_INJECT_C +#define RSEQ_INJECT_C(n) +#endif + +#ifndef RSEQ_INJECT_INPUT +#define RSEQ_INJECT_INPUT +#endif + +#ifndef RSEQ_INJECT_CLOBBER +#define RSEQ_INJECT_CLOBBER +#endif + +#ifndef RSEQ_INJECT_FAILED +#define RSEQ_INJECT_FAILED +#endif + +#include "rseq-thread-pointer.h" + +/* Offset from the thread pointer to the rseq area. */ +extern ptrdiff_t rseq_offset; +/* Size of the registered rseq area. 0 if the registration was + unsuccessful. */ +extern unsigned int rseq_size; +/* Flags used during rseq registration. */ +extern unsigned int rseq_flags; + +static inline struct rseq_abi *rseq_get_abi(void) +{ + return (struct rseq_abi *) ((uintptr_t) rseq_thread_pointer() + rseq_offset); +} + +#define rseq_likely(x) __builtin_expect(!!(x), 1) +#define rseq_unlikely(x) __builtin_expect(!!(x), 0) +#define rseq_barrier() __asm__ __volatile__("" : : : "memory") + +#define RSEQ_ACCESS_ONCE(x) (*(__volatile__ __typeof__(x) *)&(x)) +#define RSEQ_WRITE_ONCE(x, v) __extension__ ({ RSEQ_ACCESS_ONCE(x) = (v); }) +#define RSEQ_READ_ONCE(x) RSEQ_ACCESS_ONCE(x) + +#define __rseq_str_1(x) #x +#define __rseq_str(x) __rseq_str_1(x) + +#define rseq_log(fmt, args...) \ + fprintf(stderr, fmt "(in %s() at " __FILE__ ":" __rseq_str(__LINE__)"\n", \ + ## args, __func__) + +#define rseq_bug(fmt, args...) \ + do { \ + rseq_log(fmt, ##args); \ + abort(); \ + } while (0) + +#if defined(__x86_64__) || defined(__i386__) +#include <rseq-x86.h> +#elif defined(__ARMEL__) +#include <rseq-arm.h> +#elif defined (__AARCH64EL__) +#include <rseq-arm64.h> +#elif defined(__PPC__) +#include <rseq-ppc.h> +#elif defined(__mips__) +#include <rseq-mips.h> +#elif defined(__s390__) +#include <rseq-s390.h> +#else +#error unsupported target +#endif + +/* + * Register rseq for the current thread. This needs to be called once + * by any thread which uses restartable sequences, before they start + * using restartable sequences, to ensure restartable sequences + * succeed. A restartable sequence executed from a non-registered + * thread will always fail. + */ +int rseq_register_current_thread(void); + +/* + * Unregister rseq for current thread. + */ +int rseq_unregister_current_thread(void); + +/* + * Restartable sequence fallback for reading the current CPU number. + */ +int32_t rseq_fallback_current_cpu(void); + +/* + * Values returned can be either the current CPU number, -1 (rseq is + * uninitialized), or -2 (rseq initialization has failed). + */ +static inline int32_t rseq_current_cpu_raw(void) +{ + return RSEQ_ACCESS_ONCE(rseq_get_abi()->cpu_id); +} + +/* + * Returns a possible CPU number, which is typically the current CPU. + * The returned CPU number can be used to prepare for an rseq critical + * section, which will confirm whether the cpu number is indeed the + * current one, and whether rseq is initialized. + * + * The CPU number returned by rseq_cpu_start should always be validated + * by passing it to a rseq asm sequence, or by comparing it to the + * return value of rseq_current_cpu_raw() if the rseq asm sequence + * does not need to be invoked. + */ +static inline uint32_t rseq_cpu_start(void) +{ + return RSEQ_ACCESS_ONCE(rseq_get_abi()->cpu_id_start); +} + +static inline uint32_t rseq_current_cpu(void) +{ + int32_t cpu; + + cpu = rseq_current_cpu_raw(); + if (rseq_unlikely(cpu < 0)) + cpu = rseq_fallback_current_cpu(); + return cpu; +} + +static inline void rseq_clear_rseq_cs(void) +{ + RSEQ_WRITE_ONCE(rseq_get_abi()->rseq_cs.arch.ptr, 0); +} + +/* + * rseq_prepare_unload() should be invoked by each thread executing a rseq + * critical section at least once between their last critical section and + * library unload of the library defining the rseq critical section (struct + * rseq_cs) or the code referred to by the struct rseq_cs start_ip and + * post_commit_offset fields. This also applies to use of rseq in code + * generated by JIT: rseq_prepare_unload() should be invoked at least once by + * each thread executing a rseq critical section before reclaim of the memory + * holding the struct rseq_cs or reclaim of the code pointed to by struct + * rseq_cs start_ip and post_commit_offset fields. + */ +static inline void rseq_prepare_unload(void) +{ + rseq_clear_rseq_cs(); +} + +#endif /* RSEQ_H_ */ diff --git a/tools/testing/selftests/rseq/run_param_test.sh b/tools/testing/selftests/rseq/run_param_test.sh new file mode 100755 index 000000000..f51bc83c9 --- /dev/null +++ b/tools/testing/selftests/rseq/run_param_test.sh @@ -0,0 +1,126 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ or MIT + +NR_CPUS=`grep '^processor' /proc/cpuinfo | wc -l` + +EXTRA_ARGS=${@} + +OLDIFS="$IFS" +IFS=$'\n' +TEST_LIST=( + "-T s" + "-T l" + "-T b" + "-T b -M" + "-T m" + "-T m -M" + "-T i" + "-T r" +) + +TEST_NAME=( + "spinlock" + "list" + "buffer" + "buffer with barrier" + "memcpy" + "memcpy with barrier" + "increment" + "membarrier" +) +IFS="$OLDIFS" + +REPS=1000 +SLOW_REPS=100 +NR_THREADS=$((6*${NR_CPUS})) + +function do_tests() +{ + local i=0 + while [ "$i" -lt "${#TEST_LIST[@]}" ]; do + echo "Running test ${TEST_NAME[$i]}" + ./param_test ${TEST_LIST[$i]} -r ${REPS} -t ${NR_THREADS} ${@} ${EXTRA_ARGS} || exit 1 + echo "Running compare-twice test ${TEST_NAME[$i]}" + ./param_test_compare_twice ${TEST_LIST[$i]} -r ${REPS} -t ${NR_THREADS} ${@} ${EXTRA_ARGS} || exit 1 + let "i++" + done +} + +echo "Default parameters" +do_tests + +echo "Loop injection: 10000 loops" + +OLDIFS="$IFS" +IFS=$'\n' +INJECT_LIST=( + "1" + "2" + "3" + "4" + "5" + "6" + "7" + "8" + "9" +) +IFS="$OLDIFS" + +NR_LOOPS=10000 + +i=0 +while [ "$i" -lt "${#INJECT_LIST[@]}" ]; do + echo "Injecting at <${INJECT_LIST[$i]}>" + do_tests -${INJECT_LIST[i]} ${NR_LOOPS} + let "i++" +done +NR_LOOPS= + +function inject_blocking() +{ + OLDIFS="$IFS" + IFS=$'\n' + INJECT_LIST=( + "7" + "8" + "9" + ) + IFS="$OLDIFS" + + NR_LOOPS=-1 + + i=0 + while [ "$i" -lt "${#INJECT_LIST[@]}" ]; do + echo "Injecting at <${INJECT_LIST[$i]}>" + do_tests -${INJECT_LIST[i]} -1 ${@} + let "i++" + done + NR_LOOPS= +} + +echo "Yield injection (25%)" +inject_blocking -m 4 -y + +echo "Yield injection (50%)" +inject_blocking -m 2 -y + +echo "Yield injection (100%)" +inject_blocking -m 1 -y + +echo "Kill injection (25%)" +inject_blocking -m 4 -k + +echo "Kill injection (50%)" +inject_blocking -m 2 -k + +echo "Kill injection (100%)" +inject_blocking -m 1 -k + +echo "Sleep injection (1ms, 25%)" +inject_blocking -m 4 -s 1 + +echo "Sleep injection (1ms, 50%)" +inject_blocking -m 2 -s 1 + +echo "Sleep injection (1ms, 100%)" +inject_blocking -m 1 -s 1 diff --git a/tools/testing/selftests/rseq/settings b/tools/testing/selftests/rseq/settings new file mode 100644 index 000000000..e7b941753 --- /dev/null +++ b/tools/testing/selftests/rseq/settings @@ -0,0 +1 @@ +timeout=0 |