diff options
Diffstat (limited to 'tools/testing/selftests/rcutorture/formal/srcu-cbmc/src')
15 files changed, 856 insertions, 0 deletions
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/assume.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/assume.h new file mode 100644 index 000000000..570a49d9d --- /dev/null +++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/assume.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ASSUME_H +#define ASSUME_H + +/* Provide an assumption macro that can be disabled for gcc. */ +#ifdef RUN +#define assume(x) \ + do { \ + /* Evaluate x to suppress warnings. */ \ + (void) (x); \ + } while (0) + +#else +#define assume(x) __CPROVER_assume(x) +#endif + +#endif diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h new file mode 100644 index 000000000..3f95a768a --- /dev/null +++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef BARRIERS_H +#define BARRIERS_H + +#define barrier() __asm__ __volatile__("" : : : "memory") + +#ifdef RUN +#define smp_mb() __sync_synchronize() +#define smp_mb__after_unlock_lock() __sync_synchronize() +#else +/* + * Copied from CBMC's implementation of __sync_synchronize(), which + * seems to be disabled by default. + */ +#define smp_mb() __CPROVER_fence("WWfence", "RRfence", "RWfence", "WRfence", \ + "WWcumul", "RRcumul", "RWcumul", "WRcumul") +#define smp_mb__after_unlock_lock() __CPROVER_fence("WWfence", "RRfence", "RWfence", "WRfence", \ + "WWcumul", "RRcumul", "RWcumul", "WRcumul") +#endif + +/* + * Allow memory barriers to be disabled in either the read or write side + * of SRCU individually. + */ + +#ifndef NO_SYNC_SMP_MB +#define sync_smp_mb() smp_mb() +#else +#define sync_smp_mb() do {} while (0) +#endif + +#ifndef NO_READ_SIDE_SMP_MB +#define rs_smp_mb() smp_mb() +#else +#define rs_smp_mb() do {} while (0) +#endif + +#define READ_ONCE(x) (*(volatile typeof(x) *) &(x)) +#define WRITE_ONCE(x) ((*(volatile typeof(x) *) &(x)) = (val)) + +#endif diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/bug_on.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/bug_on.h new file mode 100644 index 000000000..5e7912c6a --- /dev/null +++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/bug_on.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef BUG_ON_H +#define BUG_ON_H + +#include <assert.h> + +#define BUG() assert(0) +#define BUG_ON(x) assert(!(x)) + +/* Does it make sense to treat warnings as errors? */ +#define WARN() BUG() +#define WARN_ON(x) (BUG_ON(x), false) + +#endif diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/combined_source.c b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/combined_source.c new file mode 100644 index 000000000..e67ee5b3d --- /dev/null +++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/combined_source.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <config.h> + +/* Include all source files. */ + +#include "include_srcu.c" + +#include "preempt.c" +#include "misc.c" + +/* Used by test.c files */ +#include <pthread.h> +#include <stdlib.h> +#include <linux/srcu.h> diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/config.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/config.h new file mode 100644 index 000000000..283d71033 --- /dev/null +++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/config.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* "Cheater" definitions based on restricted Kconfig choices. */ + +#undef CONFIG_TINY_RCU +#undef __CHECKER__ +#undef CONFIG_DEBUG_LOCK_ALLOC +#undef CONFIG_DEBUG_OBJECTS_RCU_HEAD +#undef CONFIG_HOTPLUG_CPU +#undef CONFIG_MODULES +#undef CONFIG_NO_HZ_FULL_SYSIDLE +#undef CONFIG_PREEMPT_COUNT +#undef CONFIG_PREEMPT_RCU +#undef CONFIG_PROVE_RCU +#undef CONFIG_RCU_NOCB_CPU +#undef CONFIG_RCU_NOCB_CPU_ALL +#undef CONFIG_RCU_STALL_COMMON +#undef CONFIG_RCU_TRACE +#undef CONFIG_RCU_USER_QS +#undef CONFIG_TASKS_RCU +#define CONFIG_TREE_RCU + +#define CONFIG_GENERIC_ATOMIC64 + +#if NR_CPUS > 1 +#define CONFIG_SMP +#else +#undef CONFIG_SMP +#endif diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/include_srcu.c b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/include_srcu.c new file mode 100644 index 000000000..e5202d4cf --- /dev/null +++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/include_srcu.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <config.h> + +#include <assert.h> +#include <errno.h> +#include <inttypes.h> +#include <pthread.h> +#include <stddef.h> +#include <string.h> +#include <sys/types.h> + +#include "int_typedefs.h" + +#include "barriers.h" +#include "bug_on.h" +#include "locks.h" +#include "misc.h" +#include "preempt.h" +#include "percpu.h" +#include "workqueues.h" + +#ifdef USE_SIMPLE_SYNC_SRCU +#define synchronize_srcu(sp) synchronize_srcu_original(sp) +#endif + +#include <srcu.c> + +#ifdef USE_SIMPLE_SYNC_SRCU +#undef synchronize_srcu + +#include "simple_sync_srcu.c" +#endif diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/int_typedefs.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/int_typedefs.h new file mode 100644 index 000000000..0dd27aa51 --- /dev/null +++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/int_typedefs.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef INT_TYPEDEFS_H +#define INT_TYPEDEFS_H + +#include <inttypes.h> + +typedef int8_t s8; +typedef uint8_t u8; +typedef int16_t s16; +typedef uint16_t u16; +typedef int32_t s32; +typedef uint32_t u32; +typedef int64_t s64; +typedef uint64_t u64; + +typedef int8_t __s8; +typedef uint8_t __u8; +typedef int16_t __s16; +typedef uint16_t __u16; +typedef int32_t __s32; +typedef uint32_t __u32; +typedef int64_t __s64; +typedef uint64_t __u64; + +#define S8_C(x) INT8_C(x) +#define U8_C(x) UINT8_C(x) +#define S16_C(x) INT16_C(x) +#define U16_C(x) UINT16_C(x) +#define S32_C(x) INT32_C(x) +#define U32_C(x) UINT32_C(x) +#define S64_C(x) INT64_C(x) +#define U64_C(x) UINT64_C(x) + +#endif diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h new file mode 100644 index 000000000..cf6938d67 --- /dev/null +++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LOCKS_H +#define LOCKS_H + +#include <limits.h> +#include <pthread.h> +#include <stdbool.h> + +#include "assume.h" +#include "bug_on.h" +#include "preempt.h" + +int nondet_int(void); + +#define __acquire(x) +#define __acquires(x) +#define __release(x) +#define __releases(x) + +/* Only use one lock mechanism. Select which one. */ +#ifdef PTHREAD_LOCK +struct lock_impl { + pthread_mutex_t mutex; +}; + +static inline void lock_impl_lock(struct lock_impl *lock) +{ + BUG_ON(pthread_mutex_lock(&lock->mutex)); +} + +static inline void lock_impl_unlock(struct lock_impl *lock) +{ + BUG_ON(pthread_mutex_unlock(&lock->mutex)); +} + +static inline bool lock_impl_trylock(struct lock_impl *lock) +{ + int err = pthread_mutex_trylock(&lock->mutex); + + if (!err) + return true; + else if (err == EBUSY) + return false; + BUG(); +} + +static inline void lock_impl_init(struct lock_impl *lock) +{ + pthread_mutex_init(&lock->mutex, NULL); +} + +#define LOCK_IMPL_INITIALIZER {.mutex = PTHREAD_MUTEX_INITIALIZER} + +#else /* !defined(PTHREAD_LOCK) */ +/* Spinlock that assumes that it always gets the lock immediately. */ + +struct lock_impl { + bool locked; +}; + +static inline bool lock_impl_trylock(struct lock_impl *lock) +{ +#ifdef RUN + /* TODO: Should this be a test and set? */ + return __sync_bool_compare_and_swap(&lock->locked, false, true); +#else + __CPROVER_atomic_begin(); + bool old_locked = lock->locked; + lock->locked = true; + __CPROVER_atomic_end(); + + /* Minimal barrier to prevent accesses leaking out of lock. */ + __CPROVER_fence("RRfence", "RWfence"); + + return !old_locked; +#endif +} + +static inline void lock_impl_lock(struct lock_impl *lock) +{ + /* + * CBMC doesn't support busy waiting, so just assume that the + * lock is available. + */ + assume(lock_impl_trylock(lock)); + + /* + * If the lock was already held by this thread then the assumption + * is unsatisfiable (deadlock). + */ +} + +static inline void lock_impl_unlock(struct lock_impl *lock) +{ +#ifdef RUN + BUG_ON(!__sync_bool_compare_and_swap(&lock->locked, true, false)); +#else + /* Minimal barrier to prevent accesses leaking out of lock. */ + __CPROVER_fence("RWfence", "WWfence"); + + __CPROVER_atomic_begin(); + bool old_locked = lock->locked; + lock->locked = false; + __CPROVER_atomic_end(); + + BUG_ON(!old_locked); +#endif +} + +static inline void lock_impl_init(struct lock_impl *lock) +{ + lock->locked = false; +} + +#define LOCK_IMPL_INITIALIZER {.locked = false} + +#endif /* !defined(PTHREAD_LOCK) */ + +/* + * Implement spinlocks using the lock mechanism. Wrap the lock to prevent mixing + * locks of different types. + */ +typedef struct { + struct lock_impl internal_lock; +} spinlock_t; + +#define SPIN_LOCK_UNLOCKED {.internal_lock = LOCK_IMPL_INITIALIZER} +#define __SPIN_LOCK_UNLOCKED(x) SPIN_LOCK_UNLOCKED +#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED + +static inline void spin_lock_init(spinlock_t *lock) +{ + lock_impl_init(&lock->internal_lock); +} + +static inline void spin_lock(spinlock_t *lock) +{ + /* + * Spin locks also need to be removed in order to eliminate all + * memory barriers. They are only used by the write side anyway. + */ +#ifndef NO_SYNC_SMP_MB + preempt_disable(); + lock_impl_lock(&lock->internal_lock); +#endif +} + +static inline void spin_unlock(spinlock_t *lock) +{ +#ifndef NO_SYNC_SMP_MB + lock_impl_unlock(&lock->internal_lock); + preempt_enable(); +#endif +} + +/* Don't bother with interrupts */ +#define spin_lock_irq(lock) spin_lock(lock) +#define spin_unlock_irq(lock) spin_unlock(lock) +#define spin_lock_irqsave(lock, flags) spin_lock(lock) +#define spin_unlock_irqrestore(lock, flags) spin_unlock(lock) + +/* + * This is supposed to return an int, but I think that a bool should work as + * well. + */ +static inline bool spin_trylock(spinlock_t *lock) +{ +#ifndef NO_SYNC_SMP_MB + preempt_disable(); + return lock_impl_trylock(&lock->internal_lock); +#else + return true; +#endif +} + +struct completion { + /* Hopefuly this won't overflow. */ + unsigned int count; +}; + +#define COMPLETION_INITIALIZER(x) {.count = 0} +#define DECLARE_COMPLETION(x) struct completion x = COMPLETION_INITIALIZER(x) +#define DECLARE_COMPLETION_ONSTACK(x) DECLARE_COMPLETION(x) + +static inline void init_completion(struct completion *c) +{ + c->count = 0; +} + +static inline void wait_for_completion(struct completion *c) +{ + unsigned int prev_count = __sync_fetch_and_sub(&c->count, 1); + + assume(prev_count); +} + +static inline void complete(struct completion *c) +{ + unsigned int prev_count = __sync_fetch_and_add(&c->count, 1); + + BUG_ON(prev_count == UINT_MAX); +} + +/* This function probably isn't very useful for CBMC. */ +static inline bool try_wait_for_completion(struct completion *c) +{ + BUG(); +} + +static inline bool completion_done(struct completion *c) +{ + return c->count; +} + +/* TODO: Implement complete_all */ +static inline void complete_all(struct completion *c) +{ + BUG(); +} + +#endif diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.c b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.c new file mode 100644 index 000000000..9440cc39e --- /dev/null +++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <config.h> + +#include "misc.h" +#include "bug_on.h" + +struct rcu_head; + +void wakeme_after_rcu(struct rcu_head *head) +{ + BUG(); +} diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.h new file mode 100644 index 000000000..aca50030f --- /dev/null +++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.h @@ -0,0 +1,58 @@ +#ifndef MISC_H +#define MISC_H + +#include "assume.h" +#include "int_typedefs.h" +#include "locks.h" + +#include <linux/types.h> + +/* Probably won't need to deal with bottom halves. */ +static inline void local_bh_disable(void) {} +static inline void local_bh_enable(void) {} + +#define MODULE_ALIAS(X) +#define module_param(...) +#define EXPORT_SYMBOL_GPL(x) + +#define container_of(ptr, type, member) ({ \ + const typeof(((type *)0)->member) *__mptr = (ptr); \ + (type *)((char *)__mptr - offsetof(type, member)); \ +}) + +#ifndef USE_SIMPLE_SYNC_SRCU +/* Abuse udelay to make sure that busy loops terminate. */ +#define udelay(x) assume(0) + +#else + +/* The simple custom synchronize_srcu is ok with try_check_zero failing. */ +#define udelay(x) do { } while (0) +#endif + +#define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ + do { } while (0) + +#define notrace + +/* Avoid including rcupdate.h */ +struct rcu_synchronize { + struct rcu_head head; + struct completion completion; +}; + +void wakeme_after_rcu(struct rcu_head *head); + +#define rcu_lock_acquire(a) do { } while (0) +#define rcu_lock_release(a) do { } while (0) +#define rcu_lockdep_assert(c, s) do { } while (0) +#define RCU_LOCKDEP_WARN(c, s) do { } while (0) + +/* Let CBMC non-deterministically choose switch between normal and expedited. */ +bool rcu_gp_is_normal(void); +bool rcu_gp_is_expedited(void); + +/* Do the same for old versions of rcu. */ +#define rcu_expedited (rcu_gp_is_expedited()) + +#endif diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h new file mode 100644 index 000000000..27e67a3f2 --- /dev/null +++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef PERCPU_H +#define PERCPU_H + +#include <stddef.h> +#include "bug_on.h" +#include "preempt.h" + +#define __percpu + +/* Maximum size of any percpu data. */ +#define PERCPU_OFFSET (4 * sizeof(long)) + +/* Ignore alignment, as CBMC doesn't care about false sharing. */ +#define alloc_percpu(type) __alloc_percpu(sizeof(type), 1) + +static inline void *__alloc_percpu(size_t size, size_t align) +{ + BUG(); + return NULL; +} + +static inline void free_percpu(void *ptr) +{ + BUG(); +} + +#define per_cpu_ptr(ptr, cpu) \ + ((typeof(ptr)) ((char *) (ptr) + PERCPU_OFFSET * cpu)) + +#define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1) +#define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1) +#define __this_cpu_sub(pcp, n) __this_cpu_add(pcp, -(typeof(pcp)) (n)) + +#define this_cpu_inc(pcp) this_cpu_add(pcp, 1) +#define this_cpu_dec(pcp) this_cpu_sub(pcp, 1) +#define this_cpu_sub(pcp, n) this_cpu_add(pcp, -(typeof(pcp)) (n)) + +/* Make CBMC use atomics to work around bug. */ +#ifdef RUN +#define THIS_CPU_ADD_HELPER(ptr, x) (*(ptr) += (x)) +#else +/* + * Split the atomic into a read and a write so that it has the least + * possible ordering. + */ +#define THIS_CPU_ADD_HELPER(ptr, x) \ + do { \ + typeof(ptr) this_cpu_add_helper_ptr = (ptr); \ + typeof(ptr) this_cpu_add_helper_x = (x); \ + typeof(*ptr) this_cpu_add_helper_temp; \ + __CPROVER_atomic_begin(); \ + this_cpu_add_helper_temp = *(this_cpu_add_helper_ptr); \ + __CPROVER_atomic_end(); \ + this_cpu_add_helper_temp += this_cpu_add_helper_x; \ + __CPROVER_atomic_begin(); \ + *(this_cpu_add_helper_ptr) = this_cpu_add_helper_temp; \ + __CPROVER_atomic_end(); \ + } while (0) +#endif + +/* + * For some reason CBMC needs an atomic operation even though this is percpu + * data. + */ +#define __this_cpu_add(pcp, n) \ + do { \ + BUG_ON(preemptible()); \ + THIS_CPU_ADD_HELPER(per_cpu_ptr(&(pcp), thread_cpu_id), \ + (typeof(pcp)) (n)); \ + } while (0) + +#define this_cpu_add(pcp, n) \ + do { \ + int this_cpu_add_impl_cpu = get_cpu(); \ + THIS_CPU_ADD_HELPER(per_cpu_ptr(&(pcp), this_cpu_add_impl_cpu), \ + (typeof(pcp)) (n)); \ + put_cpu(); \ + } while (0) + +/* + * This will cause a compiler warning because of the cast from char[][] to + * type*. This will cause a compile time error if type is too big. + */ +#define DEFINE_PER_CPU(type, name) \ + char name[NR_CPUS][PERCPU_OFFSET]; \ + typedef char percpu_too_big_##name \ + [sizeof(type) > PERCPU_OFFSET ? -1 : 1] + +#define for_each_possible_cpu(cpu) \ + for ((cpu) = 0; (cpu) < NR_CPUS; ++(cpu)) + +#endif diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.c b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.c new file mode 100644 index 000000000..b4083ae34 --- /dev/null +++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <config.h> + +#include "preempt.h" + +#include "assume.h" +#include "locks.h" + +/* Support NR_CPUS of at most 64 */ +#define CPU_PREEMPTION_LOCKS_INIT0 LOCK_IMPL_INITIALIZER +#define CPU_PREEMPTION_LOCKS_INIT1 \ + CPU_PREEMPTION_LOCKS_INIT0, CPU_PREEMPTION_LOCKS_INIT0 +#define CPU_PREEMPTION_LOCKS_INIT2 \ + CPU_PREEMPTION_LOCKS_INIT1, CPU_PREEMPTION_LOCKS_INIT1 +#define CPU_PREEMPTION_LOCKS_INIT3 \ + CPU_PREEMPTION_LOCKS_INIT2, CPU_PREEMPTION_LOCKS_INIT2 +#define CPU_PREEMPTION_LOCKS_INIT4 \ + CPU_PREEMPTION_LOCKS_INIT3, CPU_PREEMPTION_LOCKS_INIT3 +#define CPU_PREEMPTION_LOCKS_INIT5 \ + CPU_PREEMPTION_LOCKS_INIT4, CPU_PREEMPTION_LOCKS_INIT4 + +/* + * Simulate disabling preemption by locking a particular cpu. NR_CPUS + * should be the actual number of cpus, not just the maximum. + */ +struct lock_impl cpu_preemption_locks[NR_CPUS] = { + CPU_PREEMPTION_LOCKS_INIT0 +#if (NR_CPUS - 1) & 1 + , CPU_PREEMPTION_LOCKS_INIT0 +#endif +#if (NR_CPUS - 1) & 2 + , CPU_PREEMPTION_LOCKS_INIT1 +#endif +#if (NR_CPUS - 1) & 4 + , CPU_PREEMPTION_LOCKS_INIT2 +#endif +#if (NR_CPUS - 1) & 8 + , CPU_PREEMPTION_LOCKS_INIT3 +#endif +#if (NR_CPUS - 1) & 16 + , CPU_PREEMPTION_LOCKS_INIT4 +#endif +#if (NR_CPUS - 1) & 32 + , CPU_PREEMPTION_LOCKS_INIT5 +#endif +}; + +#undef CPU_PREEMPTION_LOCKS_INIT0 +#undef CPU_PREEMPTION_LOCKS_INIT1 +#undef CPU_PREEMPTION_LOCKS_INIT2 +#undef CPU_PREEMPTION_LOCKS_INIT3 +#undef CPU_PREEMPTION_LOCKS_INIT4 +#undef CPU_PREEMPTION_LOCKS_INIT5 + +__thread int thread_cpu_id; +__thread int preempt_disable_count; + +void preempt_disable(void) +{ + BUG_ON(preempt_disable_count < 0 || preempt_disable_count == INT_MAX); + + if (preempt_disable_count++) + return; + + thread_cpu_id = nondet_int(); + assume(thread_cpu_id >= 0); + assume(thread_cpu_id < NR_CPUS); + lock_impl_lock(&cpu_preemption_locks[thread_cpu_id]); +} + +void preempt_enable(void) +{ + BUG_ON(preempt_disable_count < 1); + + if (--preempt_disable_count) + return; + + lock_impl_unlock(&cpu_preemption_locks[thread_cpu_id]); +} diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.h new file mode 100644 index 000000000..f8b762cd2 --- /dev/null +++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef PREEMPT_H +#define PREEMPT_H + +#include <stdbool.h> + +#include "bug_on.h" + +/* This flag contains garbage if preempt_disable_count is 0. */ +extern __thread int thread_cpu_id; + +/* Support recursive preemption disabling. */ +extern __thread int preempt_disable_count; + +void preempt_disable(void); +void preempt_enable(void); + +static inline void preempt_disable_notrace(void) +{ + preempt_disable(); +} + +static inline void preempt_enable_no_resched(void) +{ + preempt_enable(); +} + +static inline void preempt_enable_notrace(void) +{ + preempt_enable(); +} + +static inline int preempt_count(void) +{ + return preempt_disable_count; +} + +static inline bool preemptible(void) +{ + return !preempt_count(); +} + +static inline int get_cpu(void) +{ + preempt_disable(); + return thread_cpu_id; +} + +static inline void put_cpu(void) +{ + preempt_enable(); +} + +static inline void might_sleep(void) +{ + BUG_ON(preempt_disable_count); +} + +#endif diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/simple_sync_srcu.c b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/simple_sync_srcu.c new file mode 100644 index 000000000..97f592048 --- /dev/null +++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/simple_sync_srcu.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <config.h> + +#include <assert.h> +#include <errno.h> +#include <inttypes.h> +#include <pthread.h> +#include <stddef.h> +#include <string.h> +#include <sys/types.h> + +#include "int_typedefs.h" + +#include "barriers.h" +#include "bug_on.h" +#include "locks.h" +#include "misc.h" +#include "preempt.h" +#include "percpu.h" +#include "workqueues.h" + +#include <linux/srcu.h> + +/* Functions needed from modify_srcu.c */ +bool try_check_zero(struct srcu_struct *sp, int idx, int trycount); +void srcu_flip(struct srcu_struct *sp); + +/* Simpler implementation of synchronize_srcu that ignores batching. */ +void synchronize_srcu(struct srcu_struct *sp) +{ + int idx; + /* + * This code assumes that try_check_zero will succeed anyway, + * so there is no point in multiple tries. + */ + const int trycount = 1; + + might_sleep(); + + /* Ignore the lock, as multiple writers aren't working yet anyway. */ + + idx = 1 ^ (sp->completed & 1); + + /* For comments see srcu_advance_batches. */ + + assume(try_check_zero(sp, idx, trycount)); + + srcu_flip(sp); + + assume(try_check_zero(sp, idx^1, trycount)); +} diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h new file mode 100644 index 000000000..28b960300 --- /dev/null +++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef WORKQUEUES_H +#define WORKQUEUES_H + +#include <stdbool.h> + +#include "barriers.h" +#include "bug_on.h" +#include "int_typedefs.h" + +#include <linux/types.h> + +/* Stub workqueue implementation. */ + +struct work_struct; +typedef void (*work_func_t)(struct work_struct *work); +void delayed_work_timer_fn(unsigned long __data); + +struct work_struct { +/* atomic_long_t data; */ + unsigned long data; + + struct list_head entry; + work_func_t func; +#ifdef CONFIG_LOCKDEP + struct lockdep_map lockdep_map; +#endif +}; + +struct timer_list { + struct hlist_node entry; + unsigned long expires; + void (*function)(unsigned long); + unsigned long data; + u32 flags; + int slack; +}; + +struct delayed_work { + struct work_struct work; + struct timer_list timer; + + /* target workqueue and CPU ->timer uses to queue ->work */ + struct workqueue_struct *wq; + int cpu; +}; + + +static inline bool schedule_work(struct work_struct *work) +{ + BUG(); + return true; +} + +static inline bool schedule_work_on(int cpu, struct work_struct *work) +{ + BUG(); + return true; +} + +static inline bool queue_work(struct workqueue_struct *wq, + struct work_struct *work) +{ + BUG(); + return true; +} + +static inline bool queue_delayed_work(struct workqueue_struct *wq, + struct delayed_work *dwork, + unsigned long delay) +{ + BUG(); + return true; +} + +#define INIT_WORK(w, f) \ + do { \ + (w)->data = 0; \ + (w)->func = (f); \ + } while (0) + +#define INIT_DELAYED_WORK(w, f) INIT_WORK(&(w)->work, (f)) + +#define __WORK_INITIALIZER(n, f) { \ + .data = 0, \ + .entry = { &(n).entry, &(n).entry }, \ + .func = f \ + } + +/* Don't bother initializing timer. */ +#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \ + .work = __WORK_INITIALIZER((n).work, (f)), \ + } + +#define DECLARE_WORK(n, f) \ + struct workqueue_struct n = __WORK_INITIALIZER + +#define DECLARE_DELAYED_WORK(n, f) \ + struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0) + +#define system_power_efficient_wq ((struct workqueue_struct *) NULL) + +#endif |