diff options
Diffstat (limited to '')
9 files changed, 362 insertions, 0 deletions
diff --git a/Documentation/litmus-tests/README b/Documentation/litmus-tests/README new file mode 100644 index 0000000000..658d37860d --- /dev/null +++ b/Documentation/litmus-tests/README @@ -0,0 +1,35 @@ +============ +LITMUS TESTS +============ + +Each subdirectory contains litmus tests that are typical to describe the +semantics of respective kernel APIs. +For more information about how to "run" a litmus test or how to generate +a kernel test module based on a litmus test, please see +tools/memory-model/README. + + +atomic (/atomic directory) +-------------------------- + +Atomic-RMW+mb__after_atomic-is-stronger-than-acquire.litmus + Test that an atomic RMW followed by a smp_mb__after_atomic() is + stronger than a normal acquire: both the read and write parts of + the RMW are ordered before the subsequential memory accesses. + +Atomic-RMW-ops-are-atomic-WRT-atomic_set.litmus + Test that atomic_set() cannot break the atomicity of atomic RMWs. + NOTE: Require herd7 7.56 or later which supports "(void)expr". + + +RCU (/rcu directory) +-------------------- + +MP+onceassign+derefonce.litmus (under tools/memory-model/litmus-tests/) + Demonstrates the use of rcu_assign_pointer() and rcu_dereference() to + ensure that an RCU reader will not see pre-initialization garbage. + +RCU+sync+read.litmus +RCU+sync+free.litmus + Both the above litmus tests demonstrate the RCU grace period guarantee + that an RCU read-side critical section can never span a grace period. diff --git a/Documentation/litmus-tests/atomic/Atomic-RMW+mb__after_atomic-is-stronger-than-acquire.litmus b/Documentation/litmus-tests/atomic/Atomic-RMW+mb__after_atomic-is-stronger-than-acquire.litmus new file mode 100644 index 0000000000..9a8e31a44b --- /dev/null +++ b/Documentation/litmus-tests/atomic/Atomic-RMW+mb__after_atomic-is-stronger-than-acquire.litmus @@ -0,0 +1,32 @@ +C Atomic-RMW+mb__after_atomic-is-stronger-than-acquire + +(* + * Result: Never + * + * Test that an atomic RMW followed by a smp_mb__after_atomic() is + * stronger than a normal acquire: both the read and write parts of + * the RMW are ordered before the subsequential memory accesses. + *) + +{ +} + +P0(int *x, atomic_t *y) +{ + int r0; + int r1; + + r0 = READ_ONCE(*x); + smp_rmb(); + r1 = atomic_read(y); +} + +P1(int *x, atomic_t *y) +{ + atomic_inc(y); + smp_mb__after_atomic(); + WRITE_ONCE(*x, 1); +} + +exists +(0:r0=1 /\ 0:r1=0) diff --git a/Documentation/litmus-tests/atomic/Atomic-RMW-ops-are-atomic-WRT-atomic_set.litmus b/Documentation/litmus-tests/atomic/Atomic-RMW-ops-are-atomic-WRT-atomic_set.litmus new file mode 100644 index 0000000000..ffd4d3e79c --- /dev/null +++ b/Documentation/litmus-tests/atomic/Atomic-RMW-ops-are-atomic-WRT-atomic_set.litmus @@ -0,0 +1,25 @@ +C Atomic-RMW-ops-are-atomic-WRT-atomic_set + +(* + * Result: Never + * + * Test that atomic_set() cannot break the atomicity of atomic RMWs. + * NOTE: This requires herd7 7.56 or later which supports "(void)expr". + *) + +{ + atomic_t v = ATOMIC_INIT(1); +} + +P0(atomic_t *v) +{ + (void)atomic_add_unless(v, 1, 0); +} + +P1(atomic_t *v) +{ + atomic_set(v, 0); +} + +exists +(v=2) diff --git a/Documentation/litmus-tests/locking/DCL-broken.litmus b/Documentation/litmus-tests/locking/DCL-broken.litmus new file mode 100644 index 0000000000..bfb7ba4316 --- /dev/null +++ b/Documentation/litmus-tests/locking/DCL-broken.litmus @@ -0,0 +1,54 @@ +C DCL-broken + +(* + * Result: Sometimes + * + * This litmus test demonstrates more than just locking is required to + * correctly implement double-checked locking. + *) + +{ + int flag; + int data; +} + +P0(int *flag, int *data, spinlock_t *lck) +{ + int r0; + int r1; + int r2; + + r0 = READ_ONCE(*flag); + if (r0 == 0) { + spin_lock(lck); + r1 = READ_ONCE(*flag); + if (r1 == 0) { + WRITE_ONCE(*data, 1); + WRITE_ONCE(*flag, 1); + } + spin_unlock(lck); + } + r2 = READ_ONCE(*data); +} + +P1(int *flag, int *data, spinlock_t *lck) +{ + int r0; + int r1; + int r2; + + r0 = READ_ONCE(*flag); + if (r0 == 0) { + spin_lock(lck); + r1 = READ_ONCE(*flag); + if (r1 == 0) { + WRITE_ONCE(*data, 1); + WRITE_ONCE(*flag, 1); + } + spin_unlock(lck); + } + r2 = READ_ONCE(*data); +} + +locations [flag;data;0:r0;0:r1;1:r0;1:r1] +exists (0:r2=0 \/ 1:r2=0) diff --git a/Documentation/litmus-tests/locking/DCL-fixed.litmus b/Documentation/litmus-tests/locking/DCL-fixed.litmus new file mode 100644 index 0000000000..d1b60bcb0c --- /dev/null +++ b/Documentation/litmus-tests/locking/DCL-fixed.litmus @@ -0,0 +1,55 @@ +C DCL-fixed + +(* + * Result: Never + * + * This litmus test demonstrates that double-checked locking can be + * reliable given proper use of smp_load_acquire() and smp_store_release() + * in addition to the locking. + *) + +{ + int flag; + int data; +} + +P0(int *flag, int *data, spinlock_t *lck) +{ + int r0; + int r1; + int r2; + + r0 = smp_load_acquire(flag); + if (r0 == 0) { + spin_lock(lck); + r1 = READ_ONCE(*flag); + if (r1 == 0) { + WRITE_ONCE(*data, 1); + smp_store_release(flag, 1); + } + spin_unlock(lck); + } + r2 = READ_ONCE(*data); +} + +P1(int *flag, int *data, spinlock_t *lck) +{ + int r0; + int r1; + int r2; + + r0 = smp_load_acquire(flag); + if (r0 == 0) { + spin_lock(lck); + r1 = READ_ONCE(*flag); + if (r1 == 0) { + WRITE_ONCE(*data, 1); + smp_store_release(flag, 1); + } + spin_unlock(lck); + } + r2 = READ_ONCE(*data); +} + +locations [flag;data;0:r0;0:r1;1:r0;1:r1] +exists (0:r2=0 \/ 1:r2=0) diff --git a/Documentation/litmus-tests/locking/RM-broken.litmus b/Documentation/litmus-tests/locking/RM-broken.litmus new file mode 100644 index 0000000000..b7ef30cedf --- /dev/null +++ b/Documentation/litmus-tests/locking/RM-broken.litmus @@ -0,0 +1,41 @@ +C RM-broken + +(* + * Result: DEADLOCK + * + * This litmus test demonstrates that the old "roach motel" approach + * to locking, where code can be freely moved into critical sections, + * cannot be used in the Linux kernel. + *) + +{ + int x; + atomic_t y; +} + +P0(int *x, atomic_t *y, spinlock_t *lck) +{ + int r2; + + spin_lock(lck); + r2 = atomic_inc_return(y); + WRITE_ONCE(*x, 1); + spin_unlock(lck); +} + +P1(int *x, atomic_t *y, spinlock_t *lck) +{ + int r0; + int r1; + int r2; + + spin_lock(lck); + r0 = READ_ONCE(*x); + r1 = READ_ONCE(*x); + r2 = atomic_inc_return(y); + spin_unlock(lck); +} + +locations [x;0:r2;1:r0;1:r1;1:r2] +filter (1:r0=0 /\ 1:r1=1) +exists (1:r2=1) diff --git a/Documentation/litmus-tests/locking/RM-fixed.litmus b/Documentation/litmus-tests/locking/RM-fixed.litmus new file mode 100644 index 0000000000..b628175596 --- /dev/null +++ b/Documentation/litmus-tests/locking/RM-fixed.litmus @@ -0,0 +1,41 @@ +C RM-fixed + +(* + * Result: Never + * + * This litmus test demonstrates that the old "roach motel" approach + * to locking, where code can be freely moved into critical sections, + * cannot be used in the Linux kernel. + *) + +{ + int x; + atomic_t y; +} + +P0(int *x, atomic_t *y, spinlock_t *lck) +{ + int r2; + + spin_lock(lck); + r2 = atomic_inc_return(y); + WRITE_ONCE(*x, 1); + spin_unlock(lck); +} + +P1(int *x, atomic_t *y, spinlock_t *lck) +{ + int r0; + int r1; + int r2; + + r0 = READ_ONCE(*x); + r1 = READ_ONCE(*x); + spin_lock(lck); + r2 = atomic_inc_return(y); + spin_unlock(lck); +} + +locations [x;0:r2;1:r0;1:r1;1:r2] +filter (1:r0=0 /\ 1:r1=1) +exists (1:r2=1) diff --git a/Documentation/litmus-tests/rcu/RCU+sync+free.litmus b/Documentation/litmus-tests/rcu/RCU+sync+free.litmus new file mode 100644 index 0000000000..4ee67e12f5 --- /dev/null +++ b/Documentation/litmus-tests/rcu/RCU+sync+free.litmus @@ -0,0 +1,42 @@ +C RCU+sync+free + +(* + * Result: Never + * + * This litmus test demonstrates that an RCU reader can never see a write that + * follows a grace period, if it did not see writes that precede that grace + * period. + * + * This is a typical pattern of RCU usage, where the write before the grace + * period assigns a pointer, and the writes following the grace period destroy + * the object that the pointer used to point to. + * + * This is one implication of the RCU grace-period guarantee, which says (among + * other things) that an RCU read-side critical section cannot span a grace period. + *) + +{ +int x = 1; +int *y = &x; +int z = 1; +} + +P0(int *x, int *z, int **y) +{ + int *r0; + int r1; + + rcu_read_lock(); + r0 = rcu_dereference(*y); + r1 = READ_ONCE(*r0); + rcu_read_unlock(); +} + +P1(int *x, int *z, int **y) +{ + rcu_assign_pointer(*y, z); + synchronize_rcu(); + WRITE_ONCE(*x, 0); +} + +exists (0:r0=x /\ 0:r1=0) diff --git a/Documentation/litmus-tests/rcu/RCU+sync+read.litmus b/Documentation/litmus-tests/rcu/RCU+sync+read.litmus new file mode 100644 index 0000000000..f341767202 --- /dev/null +++ b/Documentation/litmus-tests/rcu/RCU+sync+read.litmus @@ -0,0 +1,37 @@ +C RCU+sync+read + +(* + * Result: Never + * + * This litmus test demonstrates that after a grace period, an RCU updater always + * sees all stores done in prior RCU read-side critical sections. Such + * read-side critical sections would have ended before the grace period ended. + * + * This is one implication of the RCU grace-period guarantee, which says (among + * other things) that an RCU read-side critical section cannot span a grace period. + *) + +{ +int x = 0; +int y = 0; +} + +P0(int *x, int *y) +{ + rcu_read_lock(); + WRITE_ONCE(*x, 1); + WRITE_ONCE(*y, 1); + rcu_read_unlock(); +} + +P1(int *x, int *y) +{ + int r0; + int r1; + + r0 = READ_ONCE(*x); + synchronize_rcu(); + r1 = READ_ONCE(*y); +} + +exists (1:r0=1 /\ 1:r1=0) |