diff options
Diffstat (limited to 'tools/memory-model/litmus-tests')
36 files changed, 1377 insertions, 0 deletions
diff --git a/tools/memory-model/litmus-tests/.gitignore b/tools/memory-model/litmus-tests/.gitignore new file mode 100644 index 000000000..c492a1dda --- /dev/null +++ b/tools/memory-model/litmus-tests/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +*.litmus.out diff --git a/tools/memory-model/litmus-tests/CoRR+poonceonce+Once.litmus b/tools/memory-model/litmus-tests/CoRR+poonceonce+Once.litmus new file mode 100644 index 000000000..967f9f2a6 --- /dev/null +++ b/tools/memory-model/litmus-tests/CoRR+poonceonce+Once.litmus @@ -0,0 +1,26 @@ +C CoRR+poonceonce+Once + +(* + * Result: Never + * + * Test of read-read coherence, that is, whether or not two successive + * reads from the same variable are ordered. + *) + +{} + +P0(int *x) +{ + WRITE_ONCE(*x, 1); +} + +P1(int *x) +{ + int r0; + int r1; + + r0 = READ_ONCE(*x); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 1:r1=0) diff --git a/tools/memory-model/litmus-tests/CoRW+poonceonce+Once.litmus b/tools/memory-model/litmus-tests/CoRW+poonceonce+Once.litmus new file mode 100644 index 000000000..4635739f3 --- /dev/null +++ b/tools/memory-model/litmus-tests/CoRW+poonceonce+Once.litmus @@ -0,0 +1,25 @@ +C CoRW+poonceonce+Once + +(* + * Result: Never + * + * Test of read-write coherence, that is, whether or not a read from + * a given variable and a later write to that same variable are ordered. + *) + +{} + +P0(int *x) +{ + int r0; + + r0 = READ_ONCE(*x); + WRITE_ONCE(*x, 1); +} + +P1(int *x) +{ + WRITE_ONCE(*x, 2); +} + +exists (x=2 /\ 0:r0=2) diff --git a/tools/memory-model/litmus-tests/CoWR+poonceonce+Once.litmus b/tools/memory-model/litmus-tests/CoWR+poonceonce+Once.litmus new file mode 100644 index 000000000..bb068c92d --- /dev/null +++ b/tools/memory-model/litmus-tests/CoWR+poonceonce+Once.litmus @@ -0,0 +1,25 @@ +C CoWR+poonceonce+Once + +(* + * Result: Never + * + * Test of write-read coherence, that is, whether or not a write to a + * given variable and a later read from that same variable are ordered. + *) + +{} + +P0(int *x) +{ + int r0; + + WRITE_ONCE(*x, 1); + r0 = READ_ONCE(*x); +} + +P1(int *x) +{ + WRITE_ONCE(*x, 2); +} + +exists (x=1 /\ 0:r0=2) diff --git a/tools/memory-model/litmus-tests/CoWW+poonceonce.litmus b/tools/memory-model/litmus-tests/CoWW+poonceonce.litmus new file mode 100644 index 000000000..0d9f0a958 --- /dev/null +++ b/tools/memory-model/litmus-tests/CoWW+poonceonce.litmus @@ -0,0 +1,18 @@ +C CoWW+poonceonce + +(* + * Result: Never + * + * Test of write-write coherence, that is, whether or not two successive + * writes to the same variable are ordered. + *) + +{} + +P0(int *x) +{ + WRITE_ONCE(*x, 1); + WRITE_ONCE(*x, 2); +} + +exists (x=1) diff --git a/tools/memory-model/litmus-tests/IRIW+fencembonceonces+OnceOnce.litmus b/tools/memory-model/litmus-tests/IRIW+fencembonceonces+OnceOnce.litmus new file mode 100644 index 000000000..e729d2776 --- /dev/null +++ b/tools/memory-model/litmus-tests/IRIW+fencembonceonces+OnceOnce.litmus @@ -0,0 +1,45 @@ +C IRIW+fencembonceonces+OnceOnce + +(* + * Result: Never + * + * Test of independent reads from independent writes with smp_mb() + * between each pairs of reads. In other words, is smp_mb() sufficient to + * cause two different reading processes to agree on the order of a pair + * of writes, where each write is to a different variable by a different + * process? This litmus test exercises LKMM's "propagation" rule. + *) + +{} + +P0(int *x) +{ + WRITE_ONCE(*x, 1); +} + +P1(int *x, int *y) +{ + int r0; + int r1; + + r0 = READ_ONCE(*x); + smp_mb(); + r1 = READ_ONCE(*y); +} + +P2(int *y) +{ + WRITE_ONCE(*y, 1); +} + +P3(int *x, int *y) +{ + int r0; + int r1; + + r0 = READ_ONCE(*y); + smp_mb(); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 1:r1=0 /\ 3:r0=1 /\ 3:r1=0) diff --git a/tools/memory-model/litmus-tests/IRIW+poonceonces+OnceOnce.litmus b/tools/memory-model/litmus-tests/IRIW+poonceonces+OnceOnce.litmus new file mode 100644 index 000000000..4b54dd6a6 --- /dev/null +++ b/tools/memory-model/litmus-tests/IRIW+poonceonces+OnceOnce.litmus @@ -0,0 +1,43 @@ +C IRIW+poonceonces+OnceOnce + +(* + * Result: Sometimes + * + * Test of independent reads from independent writes with nothing + * between each pairs of reads. In other words, is anything at all + * needed to cause two different reading processes to agree on the order + * of a pair of writes, where each write is to a different variable by a + * different process? + *) + +{} + +P0(int *x) +{ + WRITE_ONCE(*x, 1); +} + +P1(int *x, int *y) +{ + int r0; + int r1; + + r0 = READ_ONCE(*x); + r1 = READ_ONCE(*y); +} + +P2(int *y) +{ + WRITE_ONCE(*y, 1); +} + +P3(int *x, int *y) +{ + int r0; + int r1; + + r0 = READ_ONCE(*y); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 1:r1=0 /\ 3:r0=1 /\ 3:r1=0) diff --git a/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus b/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus new file mode 100644 index 000000000..094d58df7 --- /dev/null +++ b/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus @@ -0,0 +1,40 @@ +C ISA2+pooncelock+pooncelock+pombonce + +(* + * Result: Never + * + * This test shows that write-write ordering provided by locks + * (in P0() and P1()) is visible to external process P2(). + *) + +{} + +P0(int *x, int *y, spinlock_t *mylock) +{ + spin_lock(mylock); + WRITE_ONCE(*x, 1); + WRITE_ONCE(*y, 1); + spin_unlock(mylock); +} + +P1(int *y, int *z, spinlock_t *mylock) +{ + int r0; + + spin_lock(mylock); + r0 = READ_ONCE(*y); + WRITE_ONCE(*z, 1); + spin_unlock(mylock); +} + +P2(int *x, int *z) +{ + int r1; + int r2; + + r2 = READ_ONCE(*z); + smp_mb(); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 2:r2=1 /\ 2:r1=0) diff --git a/tools/memory-model/litmus-tests/ISA2+poonceonces.litmus b/tools/memory-model/litmus-tests/ISA2+poonceonces.litmus new file mode 100644 index 000000000..b321aa6f4 --- /dev/null +++ b/tools/memory-model/litmus-tests/ISA2+poonceonces.litmus @@ -0,0 +1,37 @@ +C ISA2+poonceonces + +(* + * Result: Sometimes + * + * Given a release-acquire chain ordering the first process's store + * against the last process's load, is ordering preserved if all of the + * smp_store_release() invocations are replaced by WRITE_ONCE() and all + * of the smp_load_acquire() invocations are replaced by READ_ONCE()? + *) + +{} + +P0(int *x, int *y) +{ + WRITE_ONCE(*x, 1); + WRITE_ONCE(*y, 1); +} + +P1(int *y, int *z) +{ + int r0; + + r0 = READ_ONCE(*y); + WRITE_ONCE(*z, 1); +} + +P2(int *x, int *z) +{ + int r0; + int r1; + + r0 = READ_ONCE(*z); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 2:r0=1 /\ 2:r1=0) diff --git a/tools/memory-model/litmus-tests/ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus b/tools/memory-model/litmus-tests/ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus new file mode 100644 index 000000000..025b0462e --- /dev/null +++ b/tools/memory-model/litmus-tests/ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus @@ -0,0 +1,39 @@ +C ISA2+pooncerelease+poacquirerelease+poacquireonce + +(* + * Result: Never + * + * This litmus test demonstrates that a release-acquire chain suffices + * to order P0()'s initial write against P2()'s final read. The reason + * that the release-acquire chain suffices is because in all but one + * case (P2() to P0()), each process reads from the preceding process's + * write. In memory-model-speak, there is only one non-reads-from + * (AKA non-rf) link, so release-acquire is all that is needed. + *) + +{} + +P0(int *x, int *y) +{ + WRITE_ONCE(*x, 1); + smp_store_release(y, 1); +} + +P1(int *y, int *z) +{ + int r0; + + r0 = smp_load_acquire(y); + smp_store_release(z, 1); +} + +P2(int *x, int *z) +{ + int r0; + int r1; + + r0 = smp_load_acquire(z); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 2:r0=1 /\ 2:r1=0) diff --git a/tools/memory-model/litmus-tests/LB+fencembonceonce+ctrlonceonce.litmus b/tools/memory-model/litmus-tests/LB+fencembonceonce+ctrlonceonce.litmus new file mode 100644 index 000000000..4727f5aaf --- /dev/null +++ b/tools/memory-model/litmus-tests/LB+fencembonceonce+ctrlonceonce.litmus @@ -0,0 +1,34 @@ +C LB+fencembonceonce+ctrlonceonce + +(* + * Result: Never + * + * This litmus test demonstrates that lightweight ordering suffices for + * the load-buffering pattern, in other words, preventing all processes + * reading from the preceding process's write. In this example, the + * combination of a control dependency and a full memory barrier are enough + * to do the trick. (But the full memory barrier could be replaced with + * another control dependency and order would still be maintained.) + *) + +{} + +P0(int *x, int *y) +{ + int r0; + + r0 = READ_ONCE(*x); + if (r0) + WRITE_ONCE(*y, 1); +} + +P1(int *x, int *y) +{ + int r0; + + r0 = READ_ONCE(*y); + smp_mb(); + WRITE_ONCE(*x, 1); +} + +exists (0:r0=1 /\ 1:r0=1) diff --git a/tools/memory-model/litmus-tests/LB+poacquireonce+pooncerelease.litmus b/tools/memory-model/litmus-tests/LB+poacquireonce+pooncerelease.litmus new file mode 100644 index 000000000..07b9904b0 --- /dev/null +++ b/tools/memory-model/litmus-tests/LB+poacquireonce+pooncerelease.litmus @@ -0,0 +1,29 @@ +C LB+poacquireonce+pooncerelease + +(* + * Result: Never + * + * Does a release-acquire pair suffice for the load-buffering litmus + * test, where each process reads from one of two variables then writes + * to the other? + *) + +{} + +P0(int *x, int *y) +{ + int r0; + + r0 = READ_ONCE(*x); + smp_store_release(y, 1); +} + +P1(int *x, int *y) +{ + int r0; + + r0 = smp_load_acquire(y); + WRITE_ONCE(*x, 1); +} + +exists (0:r0=1 /\ 1:r0=1) diff --git a/tools/memory-model/litmus-tests/LB+poonceonces.litmus b/tools/memory-model/litmus-tests/LB+poonceonces.litmus new file mode 100644 index 000000000..74c49cb3c --- /dev/null +++ b/tools/memory-model/litmus-tests/LB+poonceonces.litmus @@ -0,0 +1,28 @@ +C LB+poonceonces + +(* + * Result: Sometimes + * + * Can the counter-intuitive outcome for the load-buffering pattern + * be prevented even with no explicit ordering? + *) + +{} + +P0(int *x, int *y) +{ + int r0; + + r0 = READ_ONCE(*x); + WRITE_ONCE(*y, 1); +} + +P1(int *x, int *y) +{ + int r0; + + r0 = READ_ONCE(*y); + WRITE_ONCE(*x, 1); +} + +exists (0:r0=1 /\ 1:r0=1) diff --git a/tools/memory-model/litmus-tests/LB+unlocklockonceonce+poacquireonce.litmus b/tools/memory-model/litmus-tests/LB+unlocklockonceonce+poacquireonce.litmus new file mode 100644 index 000000000..eb34123a6 --- /dev/null +++ b/tools/memory-model/litmus-tests/LB+unlocklockonceonce+poacquireonce.litmus @@ -0,0 +1,35 @@ +C LB+unlocklockonceonce+poacquireonce + +(* + * Result: Never + * + * If two locked critical sections execute on the same CPU, all accesses + * in the first must execute before any accesses in the second, even if the + * critical sections are protected by different locks. Note: Even when a + * write executes before a read, their memory effects can be reordered from + * the viewpoint of another CPU (the kind of reordering allowed by TSO). + *) + +{} + +P0(spinlock_t *s, spinlock_t *t, int *x, int *y) +{ + int r1; + + spin_lock(s); + r1 = READ_ONCE(*x); + spin_unlock(s); + spin_lock(t); + WRITE_ONCE(*y, 1); + spin_unlock(t); +} + +P1(int *x, int *y) +{ + int r2; + + r2 = smp_load_acquire(y); + WRITE_ONCE(*x, 1); +} + +exists (0:r1=1 /\ 1:r2=1) diff --git a/tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus b/tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus new file mode 100644 index 000000000..f8ca12298 --- /dev/null +++ b/tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus @@ -0,0 +1,30 @@ +C MP+fencewmbonceonce+fencermbonceonce + +(* + * Result: Never + * + * This litmus test demonstrates that smp_wmb() and smp_rmb() provide + * sufficient ordering for the message-passing pattern. However, it + * is usually better to use smp_store_release() and smp_load_acquire(). + *) + +{} + +P0(int *buf, int *flag) // Producer +{ + WRITE_ONCE(*buf, 1); + smp_wmb(); + WRITE_ONCE(*flag, 1); +} + +P1(int *buf, int *flag) // Consumer +{ + int r0; + int r1; + + r0 = READ_ONCE(*flag); + smp_rmb(); + r1 = READ_ONCE(*buf); +} + +exists (1:r0=1 /\ 1:r1=0) (* Bad outcome. *) diff --git a/tools/memory-model/litmus-tests/MP+onceassign+derefonce.litmus b/tools/memory-model/litmus-tests/MP+onceassign+derefonce.litmus new file mode 100644 index 000000000..d84160b9c --- /dev/null +++ b/tools/memory-model/litmus-tests/MP+onceassign+derefonce.litmus @@ -0,0 +1,33 @@ +C MP+onceassign+derefonce + +(* + * Result: Never + * + * This litmus test demonstrates that rcu_assign_pointer() and + * rcu_dereference() suffice to ensure that an RCU reader will not see + * pre-initialization garbage when it traverses an RCU-protected data + * structure containing a newly inserted element. + *) + +{ +p=y; +} + +P0(int *x, int **p) // Producer +{ + WRITE_ONCE(*x, 1); + rcu_assign_pointer(*p, x); +} + +P1(int *x, int **p) // Consumer +{ + int *r0; + int r1; + + rcu_read_lock(); + r0 = rcu_dereference(*p); + r1 = READ_ONCE(*r0); + rcu_read_unlock(); +} + +exists (1:r0=x /\ 1:r1=0) (* Bad outcome. *) diff --git a/tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus b/tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus new file mode 100644 index 000000000..ba91cc63e --- /dev/null +++ b/tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus @@ -0,0 +1,34 @@ +C MP+polockmbonce+poacquiresilsil + +(* + * Result: Never + * + * Do spinlocks combined with smp_mb__after_spinlock() provide order + * to outside observers using spin_is_locked() to sense the lock-held + * state, ordered by acquire? Note that when the first spin_is_locked() + * returns false and the second true, we know that the smp_load_acquire() + * executed before the lock was acquired (loosely speaking). + *) + +{} + +P0(spinlock_t *lo, int *x) // Producer +{ + spin_lock(lo); + smp_mb__after_spinlock(); + WRITE_ONCE(*x, 1); + spin_unlock(lo); +} + +P1(spinlock_t *lo, int *x) // Consumer +{ + int r1; + int r2; + int r3; + + r1 = smp_load_acquire(x); + r2 = spin_is_locked(lo); + r3 = spin_is_locked(lo); +} + +exists (1:r1=1 /\ 1:r2=0 /\ 1:r3=1) (* Bad outcome. *) diff --git a/tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus b/tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus new file mode 100644 index 000000000..a5ea3ed8f --- /dev/null +++ b/tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus @@ -0,0 +1,33 @@ +C MP+polockonce+poacquiresilsil + +(* + * Result: Sometimes + * + * Do spinlocks provide order to outside observers using spin_is_locked() + * to sense the lock-held state, ordered by acquire? Note that when the + * first spin_is_locked() returns false and the second true, we know that + * the smp_load_acquire() executed before the lock was acquired (loosely + * speaking). + *) + +{} + +P0(spinlock_t *lo, int *x) // Producer +{ + spin_lock(lo); + WRITE_ONCE(*x, 1); + spin_unlock(lo); +} + +P1(spinlock_t *lo, int *x) // Consumer +{ + int r1; + int r2; + int r3; + + r1 = smp_load_acquire(x); + r2 = spin_is_locked(lo); + r3 = spin_is_locked(lo); +} + +exists (1:r1=1 /\ 1:r2=0 /\ 1:r3=1) (* Bad outcome. *) diff --git a/tools/memory-model/litmus-tests/MP+polocks.litmus b/tools/memory-model/litmus-tests/MP+polocks.litmus new file mode 100644 index 000000000..e6af05f70 --- /dev/null +++ b/tools/memory-model/litmus-tests/MP+polocks.litmus @@ -0,0 +1,35 @@ +C MP+polocks + +(* + * Result: Never + * + * This litmus test demonstrates how lock acquisitions and releases can + * stand in for smp_load_acquire() and smp_store_release(), respectively. + * In other words, when holding a given lock (or indeed after releasing a + * given lock), a CPU is not only guaranteed to see the accesses that other + * CPUs made while previously holding that lock, it is also guaranteed + * to see all prior accesses by those other CPUs. + *) + +{} + +P0(int *buf, int *flag, spinlock_t *mylock) // Producer +{ + WRITE_ONCE(*buf, 1); + spin_lock(mylock); + WRITE_ONCE(*flag, 1); + spin_unlock(mylock); +} + +P1(int *buf, int *flag, spinlock_t *mylock) // Consumer +{ + int r0; + int r1; + + spin_lock(mylock); + r0 = READ_ONCE(*flag); + spin_unlock(mylock); + r1 = READ_ONCE(*buf); +} + +exists (1:r0=1 /\ 1:r1=0) (* Bad outcome. *) diff --git a/tools/memory-model/litmus-tests/MP+poonceonces.litmus b/tools/memory-model/litmus-tests/MP+poonceonces.litmus new file mode 100644 index 000000000..ba9c99c6c --- /dev/null +++ b/tools/memory-model/litmus-tests/MP+poonceonces.litmus @@ -0,0 +1,27 @@ +C MP+poonceonces + +(* + * Result: Sometimes + * + * Can the counter-intuitive message-passing outcome be prevented with + * no ordering at all? + *) + +{} + +P0(int *buf, int *flag) // Producer +{ + WRITE_ONCE(*buf, 1); + WRITE_ONCE(*flag, 1); +} + +P1(int *buf, int *flag) // Consumer +{ + int r0; + int r1; + + r0 = READ_ONCE(*flag); + r1 = READ_ONCE(*buf); +} + +exists (1:r0=1 /\ 1:r1=0) (* Bad outcome. *) diff --git a/tools/memory-model/litmus-tests/MP+pooncerelease+poacquireonce.litmus b/tools/memory-model/litmus-tests/MP+pooncerelease+poacquireonce.litmus new file mode 100644 index 000000000..f174bfe61 --- /dev/null +++ b/tools/memory-model/litmus-tests/MP+pooncerelease+poacquireonce.litmus @@ -0,0 +1,28 @@ +C MP+pooncerelease+poacquireonce + +(* + * Result: Never + * + * This litmus test demonstrates that smp_store_release() and + * smp_load_acquire() provide sufficient ordering for the message-passing + * pattern. + *) + +{} + +P0(int *buf, int *flag) // Producer +{ + WRITE_ONCE(*buf, 1); + smp_store_release(flag, 1); +} + +P1(int *buf, int *flag) // Consumer +{ + int r0; + int r1; + + r0 = smp_load_acquire(flag); + r1 = READ_ONCE(*buf); +} + +exists (1:r0=1 /\ 1:r1=0) (* Bad outcome. *) diff --git a/tools/memory-model/litmus-tests/MP+porevlocks.litmus b/tools/memory-model/litmus-tests/MP+porevlocks.litmus new file mode 100644 index 000000000..b95991411 --- /dev/null +++ b/tools/memory-model/litmus-tests/MP+porevlocks.litmus @@ -0,0 +1,35 @@ +C MP+porevlocks + +(* + * Result: Never + * + * This litmus test demonstrates how lock acquisitions and releases can + * stand in for smp_load_acquire() and smp_store_release(), respectively. + * In other words, when holding a given lock (or indeed after releasing a + * given lock), a CPU is not only guaranteed to see the accesses that other + * CPUs made while previously holding that lock, it is also guaranteed to + * see all prior accesses by those other CPUs. + *) + +{} + +P0(int *buf, int *flag, spinlock_t *mylock) // Consumer +{ + int r0; + int r1; + + r0 = READ_ONCE(*flag); + spin_lock(mylock); + r1 = READ_ONCE(*buf); + spin_unlock(mylock); +} + +P1(int *buf, int *flag, spinlock_t *mylock) // Producer +{ + spin_lock(mylock); + WRITE_ONCE(*buf, 1); + spin_unlock(mylock); + WRITE_ONCE(*flag, 1); +} + +exists (0:r0=1 /\ 0:r1=0) (* Bad outcome. *) diff --git a/tools/memory-model/litmus-tests/MP+unlocklockonceonce+fencermbonceonce.litmus b/tools/memory-model/litmus-tests/MP+unlocklockonceonce+fencermbonceonce.litmus new file mode 100644 index 000000000..2feb1398b --- /dev/null +++ b/tools/memory-model/litmus-tests/MP+unlocklockonceonce+fencermbonceonce.litmus @@ -0,0 +1,33 @@ +C MP+unlocklockonceonce+fencermbonceonce + +(* + * Result: Never + * + * If two locked critical sections execute on the same CPU, stores in the + * first must propagate to each CPU before stores in the second do, even if + * the critical sections are protected by different locks. + *) + +{} + +P0(spinlock_t *s, spinlock_t *t, int *x, int *y) +{ + spin_lock(s); + WRITE_ONCE(*x, 1); + spin_unlock(s); + spin_lock(t); + WRITE_ONCE(*y, 1); + spin_unlock(t); +} + +P1(int *x, int *y) +{ + int r1; + int r2; + + r1 = READ_ONCE(*y); + smp_rmb(); + r2 = READ_ONCE(*x); +} + +exists (1:r1=1 /\ 1:r2=0) diff --git a/tools/memory-model/litmus-tests/R+fencembonceonces.litmus b/tools/memory-model/litmus-tests/R+fencembonceonces.litmus new file mode 100644 index 000000000..222a0b850 --- /dev/null +++ b/tools/memory-model/litmus-tests/R+fencembonceonces.litmus @@ -0,0 +1,30 @@ +C R+fencembonceonces + +(* + * Result: Never + * + * This is the fully ordered (via smp_mb()) version of one of the classic + * counterintuitive litmus tests that illustrates the effects of store + * propagation delays. Note that weakening either of the barriers would + * cause the resulting test to be allowed. + *) + +{} + +P0(int *x, int *y) +{ + WRITE_ONCE(*x, 1); + smp_mb(); + WRITE_ONCE(*y, 1); +} + +P1(int *x, int *y) +{ + int r0; + + WRITE_ONCE(*y, 2); + smp_mb(); + r0 = READ_ONCE(*x); +} + +exists (y=2 /\ 1:r0=0) diff --git a/tools/memory-model/litmus-tests/R+poonceonces.litmus b/tools/memory-model/litmus-tests/R+poonceonces.litmus new file mode 100644 index 000000000..5386f128a --- /dev/null +++ b/tools/memory-model/litmus-tests/R+poonceonces.litmus @@ -0,0 +1,27 @@ +C R+poonceonces + +(* + * Result: Sometimes + * + * This is the unordered (thus lacking smp_mb()) version of one of the + * classic counterintuitive litmus tests that illustrates the effects of + * store propagation delays. + *) + +{} + +P0(int *x, int *y) +{ + WRITE_ONCE(*x, 1); + WRITE_ONCE(*y, 1); +} + +P1(int *x, int *y) +{ + int r0; + + WRITE_ONCE(*y, 2); + r0 = READ_ONCE(*x); +} + +exists (y=2 /\ 1:r0=0) diff --git a/tools/memory-model/litmus-tests/README b/tools/memory-model/litmus-tests/README new file mode 100644 index 000000000..d311a0ff1 --- /dev/null +++ b/tools/memory-model/litmus-tests/README @@ -0,0 +1,261 @@ +============ +LITMUS TESTS +============ + +CoRR+poonceonce+Once.litmus + Test of read-read coherence, that is, whether or not two + successive reads from the same variable are ordered. + +CoRW+poonceonce+Once.litmus + Test of read-write coherence, that is, whether or not a read + from a given variable followed by a write to that same variable + are ordered. + +CoWR+poonceonce+Once.litmus + Test of write-read coherence, that is, whether or not a write + to a given variable followed by a read from that same variable + are ordered. + +CoWW+poonceonce.litmus + Test of write-write coherence, that is, whether or not two + successive writes to the same variable are ordered. + +IRIW+fencembonceonces+OnceOnce.litmus + Test of independent reads from independent writes with smp_mb() + between each pairs of reads. In other words, is smp_mb() + sufficient to cause two different reading processes to agree on + the order of a pair of writes, where each write is to a different + variable by a different process? This litmus test is forbidden + by LKMM's propagation rule. + +IRIW+poonceonces+OnceOnce.litmus + Test of independent reads from independent writes with nothing + between each pairs of reads. In other words, is anything at all + needed to cause two different reading processes to agree on the + order of a pair of writes, where each write is to a different + variable by a different process? + +ISA2+pooncelock+pooncelock+pombonce.litmus + Tests whether the ordering provided by a lock-protected S + litmus test is visible to an external process whose accesses are + separated by smp_mb(). This addition of an external process to + S is otherwise known as ISA2. + +ISA2+poonceonces.litmus + As below, but with store-release replaced with WRITE_ONCE() + and load-acquire replaced with READ_ONCE(). + +ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus + Can a release-acquire chain order a prior store against + a later load? + +LB+fencembonceonce+ctrlonceonce.litmus + Does a control dependency and an smp_mb() suffice for the + load-buffering litmus test, where each process reads from one + of two variables then writes to the other? + +LB+poacquireonce+pooncerelease.litmus + Does a release-acquire pair suffice for the load-buffering + litmus test, where each process reads from one of two variables then + writes to the other? + +LB+poonceonces.litmus + As above, but with store-release replaced with WRITE_ONCE() + and load-acquire replaced with READ_ONCE(). + +LB+unlocklockonceonce+poacquireonce.litmus + Does a unlock+lock pair provides ordering guarantee between a + load and a store? + +MP+onceassign+derefonce.litmus + As below, but with rcu_assign_pointer() and an rcu_dereference(). + +MP+polockmbonce+poacquiresilsil.litmus + Protect the access with a lock and an smp_mb__after_spinlock() + in one process, and use an acquire load followed by a pair of + spin_is_locked() calls in the other process. + +MP+polockonce+poacquiresilsil.litmus + Protect the access with a lock in one process, and use an + acquire load followed by a pair of spin_is_locked() calls + in the other process. + +MP+polocks.litmus + As below, but with the second access of the writer process + and the first access of reader process protected by a lock. + +MP+poonceonces.litmus + As below, but without the smp_rmb() and smp_wmb(). + +MP+pooncerelease+poacquireonce.litmus + As below, but with a release-acquire chain. + +MP+porevlocks.litmus + As below, but with the first access of the writer process + and the second access of reader process protected by a lock. + +MP+unlocklockonceonce+fencermbonceonce.litmus + Does a unlock+lock pair provides ordering guarantee between a + store and another store? + +MP+fencewmbonceonce+fencermbonceonce.litmus + Does a smp_wmb() (between the stores) and an smp_rmb() (between + the loads) suffice for the message-passing litmus test, where one + process writes data and then a flag, and the other process reads + the flag and then the data. (This is similar to the ISA2 tests, + but with two processes instead of three.) + +R+fencembonceonces.litmus + This is the fully ordered (via smp_mb()) version of one of + the classic counterintuitive litmus tests that illustrates the + effects of store propagation delays. + +R+poonceonces.litmus + As above, but without the smp_mb() invocations. + +SB+fencembonceonces.litmus + This is the fully ordered (again, via smp_mb() version of store + buffering, which forms the core of Dekker's mutual-exclusion + algorithm. + +SB+poonceonces.litmus + As above, but without the smp_mb() invocations. + +SB+rfionceonce-poonceonces.litmus + This litmus test demonstrates that LKMM is not fully multicopy + atomic. (Neither is it other multicopy atomic.) This litmus test + also demonstrates the "locations" debugging aid, which designates + additional registers and locations to be printed out in the dump + of final states in the herd7 output. Without the "locations" + statement, only those registers and locations mentioned in the + "exists" clause will be printed. + +S+poonceonces.litmus + As below, but without the smp_wmb() and acquire load. + +S+fencewmbonceonce+poacquireonce.litmus + Can a smp_wmb(), instead of a release, and an acquire order + a prior store against a subsequent store? + +WRC+poonceonces+Once.litmus +WRC+pooncerelease+fencermbonceonce+Once.litmus + These two are members of an extension of the MP litmus-test + class in which the first write is moved to a separate process. + The second is forbidden because smp_store_release() is + A-cumulative in LKMM. + +Z6.0+pooncelock+pooncelock+pombonce.litmus + Is the ordering provided by a spin_unlock() and a subsequent + spin_lock() sufficient to make ordering apparent to accesses + by a process not holding the lock? + +Z6.0+pooncelock+poonceLock+pombonce.litmus + As above, but with smp_mb__after_spinlock() immediately + following the spin_lock(). + +Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus + Is the ordering provided by a release-acquire chain sufficient + to make ordering apparent to accesses by a process that does + not participate in that release-acquire chain? + +A great many more litmus tests are available here: + + https://github.com/paulmckrcu/litmus + +================== +LITMUS TEST NAMING +================== + +Litmus tests are usually named based on their contents, which means that +looking at the name tells you what the litmus test does. The naming +scheme covers litmus tests having a single cycle that passes through +each process exactly once, so litmus tests not fitting this description +are named on an ad-hoc basis. + +The structure of a litmus-test name is the litmus-test class, a plus +sign ("+"), and one string for each process, separated by plus signs. +The end of the name is ".litmus". + +The litmus-test classes may be found in the infamous test6.pdf: +https://www.cl.cam.ac.uk/~pes20/ppc-supplemental/test6.pdf +Each class defines the pattern of accesses and of the variables accessed. +For example, if the one process writes to a pair of variables, and +the other process reads from these same variables, the corresponding +litmus-test class is "MP" (message passing), which may be found on the +left-hand end of the second row of tests on page one of test6.pdf. + +The strings used to identify the actions carried out by each process are +complex due to a desire to have short(er) names. Thus, there is a tool to +generate these strings from a given litmus test's actions. For example, +consider the processes from SB+rfionceonce-poonceonces.litmus: + + P0(int *x, int *y) + { + int r1; + int r2; + + WRITE_ONCE(*x, 1); + r1 = READ_ONCE(*x); + r2 = READ_ONCE(*y); + } + + P1(int *x, int *y) + { + int r3; + int r4; + + WRITE_ONCE(*y, 1); + r3 = READ_ONCE(*y); + r4 = READ_ONCE(*x); + } + +The next step is to construct a space-separated list of descriptors, +interleaving descriptions of the relation between a pair of consecutive +accesses with descriptions of the second access in the pair. + +P0()'s WRITE_ONCE() is read by its first READ_ONCE(), which is a +reads-from link (rf) and internal to the P0() process. This is +"rfi", which is an abbreviation for "reads-from internal". Because +some of the tools string these abbreviations together with space +characters separating processes, the first character is capitalized, +resulting in "Rfi". + +P0()'s second access is a READ_ONCE(), as opposed to (for example) +smp_load_acquire(), so next is "Once". Thus far, we have "Rfi Once". + +P0()'s third access is also a READ_ONCE(), but to y rather than x. +This is related to P0()'s second access by program order ("po"), +to a different variable ("d"), and both accesses are reads ("RR"). +The resulting descriptor is "PodRR". Because P0()'s third access is +READ_ONCE(), we add another "Once" descriptor. + +A from-read ("fre") relation links P0()'s third to P1()'s first +access, and the resulting descriptor is "Fre". P1()'s first access is +WRITE_ONCE(), which as before gives the descriptor "Once". The string +thus far is thus "Rfi Once PodRR Once Fre Once". + +The remainder of P1() is similar to P0(), which means we add +"Rfi Once PodRR Once". Another fre links P1()'s last access to +P0()'s first access, which is WRITE_ONCE(), so we add "Fre Once". +The full string is thus: + + Rfi Once PodRR Once Fre Once Rfi Once PodRR Once Fre Once + +This string can be given to the "norm7" and "classify7" tools to +produce the name: + + $ norm7 -bell linux-kernel.bell \ + Rfi Once PodRR Once Fre Once Rfi Once PodRR Once Fre Once | \ + sed -e 's/:.*//g' + SB+rfionceonce-poonceonces + +Adding the ".litmus" suffix: SB+rfionceonce-poonceonces.litmus + +The descriptors that describe connections between consecutive accesses +within the cycle through a given litmus test can be provided by the herd7 +tool (Rfi, Po, Fre, and so on) or by the linux-kernel.bell file (Once, +Release, Acquire, and so on). + +To see the full list of descriptors, execute the following command: + + $ diyone7 -bell linux-kernel.bell -show edges diff --git a/tools/memory-model/litmus-tests/S+fencewmbonceonce+poacquireonce.litmus b/tools/memory-model/litmus-tests/S+fencewmbonceonce+poacquireonce.litmus new file mode 100644 index 000000000..18479823c --- /dev/null +++ b/tools/memory-model/litmus-tests/S+fencewmbonceonce+poacquireonce.litmus @@ -0,0 +1,27 @@ +C S+fencewmbonceonce+poacquireonce + +(* + * Result: Never + * + * Can a smp_wmb(), instead of a release, and an acquire order a prior + * store against a subsequent store? + *) + +{} + +P0(int *x, int *y) +{ + WRITE_ONCE(*x, 2); + smp_wmb(); + WRITE_ONCE(*y, 1); +} + +P1(int *x, int *y) +{ + int r0; + + r0 = smp_load_acquire(y); + WRITE_ONCE(*x, 1); +} + +exists (x=2 /\ 1:r0=1) diff --git a/tools/memory-model/litmus-tests/S+poonceonces.litmus b/tools/memory-model/litmus-tests/S+poonceonces.litmus new file mode 100644 index 000000000..8c9c2f81a --- /dev/null +++ b/tools/memory-model/litmus-tests/S+poonceonces.litmus @@ -0,0 +1,28 @@ +C S+poonceonces + +(* + * Result: Sometimes + * + * Starting with a two-process release-acquire chain ordering P0()'s + * first store against P1()'s final load, if the smp_store_release() + * is replaced by WRITE_ONCE() and the smp_load_acquire() replaced by + * READ_ONCE(), is ordering preserved? + *) + +{} + +P0(int *x, int *y) +{ + WRITE_ONCE(*x, 2); + WRITE_ONCE(*y, 1); +} + +P1(int *x, int *y) +{ + int r0; + + r0 = READ_ONCE(*y); + WRITE_ONCE(*x, 1); +} + +exists (x=2 /\ 1:r0=1) diff --git a/tools/memory-model/litmus-tests/SB+fencembonceonces.litmus b/tools/memory-model/litmus-tests/SB+fencembonceonces.litmus new file mode 100644 index 000000000..ed5fff18d --- /dev/null +++ b/tools/memory-model/litmus-tests/SB+fencembonceonces.litmus @@ -0,0 +1,32 @@ +C SB+fencembonceonces + +(* + * Result: Never + * + * This litmus test demonstrates that full memory barriers suffice to + * order the store-buffering pattern, where each process writes to the + * variable that the preceding process reads. (Locking and RCU can also + * suffice, but not much else.) + *) + +{} + +P0(int *x, int *y) +{ + int r0; + + WRITE_ONCE(*x, 1); + smp_mb(); + r0 = READ_ONCE(*y); +} + +P1(int *x, int *y) +{ + int r0; + + WRITE_ONCE(*y, 1); + smp_mb(); + r0 = READ_ONCE(*x); +} + +exists (0:r0=0 /\ 1:r0=0) diff --git a/tools/memory-model/litmus-tests/SB+poonceonces.litmus b/tools/memory-model/litmus-tests/SB+poonceonces.litmus new file mode 100644 index 000000000..10d550730 --- /dev/null +++ b/tools/memory-model/litmus-tests/SB+poonceonces.litmus @@ -0,0 +1,29 @@ +C SB+poonceonces + +(* + * Result: Sometimes + * + * This litmus test demonstrates that at least some ordering is required + * to order the store-buffering pattern, where each process writes to the + * variable that the preceding process reads. + *) + +{} + +P0(int *x, int *y) +{ + int r0; + + WRITE_ONCE(*x, 1); + r0 = READ_ONCE(*y); +} + +P1(int *x, int *y) +{ + int r0; + + WRITE_ONCE(*y, 1); + r0 = READ_ONCE(*x); +} + +exists (0:r0=0 /\ 1:r0=0) diff --git a/tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus b/tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus new file mode 100644 index 000000000..04a166036 --- /dev/null +++ b/tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus @@ -0,0 +1,32 @@ +C SB+rfionceonce-poonceonces + +(* + * Result: Sometimes + * + * This litmus test demonstrates that LKMM is not fully multicopy atomic. + *) + +{} + +P0(int *x, int *y) +{ + int r1; + int r2; + + WRITE_ONCE(*x, 1); + r1 = READ_ONCE(*x); + r2 = READ_ONCE(*y); +} + +P1(int *x, int *y) +{ + int r3; + int r4; + + WRITE_ONCE(*y, 1); + r3 = READ_ONCE(*y); + r4 = READ_ONCE(*x); +} + +locations [0:r1; 1:r3; x; y] (* Debug aid: Print things not in "exists". *) +exists (0:r2=0 /\ 1:r4=0) diff --git a/tools/memory-model/litmus-tests/WRC+poonceonces+Once.litmus b/tools/memory-model/litmus-tests/WRC+poonceonces+Once.litmus new file mode 100644 index 000000000..6a2bc12a1 --- /dev/null +++ b/tools/memory-model/litmus-tests/WRC+poonceonces+Once.litmus @@ -0,0 +1,35 @@ +C WRC+poonceonces+Once + +(* + * Result: Sometimes + * + * This litmus test is an extension of the message-passing pattern, + * where the first write is moved to a separate process. Note that this + * test has no ordering at all. + *) + +{} + +P0(int *x) +{ + WRITE_ONCE(*x, 1); +} + +P1(int *x, int *y) +{ + int r0; + + r0 = READ_ONCE(*x); + WRITE_ONCE(*y, 1); +} + +P2(int *x, int *y) +{ + int r0; + int r1; + + r0 = READ_ONCE(*y); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 2:r0=1 /\ 2:r1=0) diff --git a/tools/memory-model/litmus-tests/WRC+pooncerelease+fencermbonceonce+Once.litmus b/tools/memory-model/litmus-tests/WRC+pooncerelease+fencermbonceonce+Once.litmus new file mode 100644 index 000000000..e9947250d --- /dev/null +++ b/tools/memory-model/litmus-tests/WRC+pooncerelease+fencermbonceonce+Once.litmus @@ -0,0 +1,38 @@ +C WRC+pooncerelease+fencermbonceonce+Once + +(* + * Result: Never + * + * This litmus test is an extension of the message-passing pattern, where + * the first write is moved to a separate process. Because it features + * a release and a read memory barrier, it should be forbidden. More + * specifically, this litmus test is forbidden because smp_store_release() + * is A-cumulative in LKMM. + *) + +{} + +P0(int *x) +{ + WRITE_ONCE(*x, 1); +} + +P1(int *x, int *y) +{ + int r0; + + r0 = READ_ONCE(*x); + smp_store_release(y, 1); +} + +P2(int *x, int *y) +{ + int r0; + int r1; + + r0 = READ_ONCE(*y); + smp_rmb(); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 2:r0=1 /\ 2:r1=0) diff --git a/tools/memory-model/litmus-tests/Z6.0+pooncelock+poonceLock+pombonce.litmus b/tools/memory-model/litmus-tests/Z6.0+pooncelock+poonceLock+pombonce.litmus new file mode 100644 index 000000000..415248fb6 --- /dev/null +++ b/tools/memory-model/litmus-tests/Z6.0+pooncelock+poonceLock+pombonce.litmus @@ -0,0 +1,42 @@ +C Z6.0+pooncelock+poonceLock+pombonce + +(* + * Result: Never + * + * This litmus test demonstrates how smp_mb__after_spinlock() may be + * used to ensure that accesses in different critical sections for a + * given lock running on different CPUs are nevertheless seen in order + * by CPUs not holding that lock. + *) + +{} + +P0(int *x, int *y, spinlock_t *mylock) +{ + spin_lock(mylock); + WRITE_ONCE(*x, 1); + WRITE_ONCE(*y, 1); + spin_unlock(mylock); +} + +P1(int *y, int *z, spinlock_t *mylock) +{ + int r0; + + spin_lock(mylock); + smp_mb__after_spinlock(); + r0 = READ_ONCE(*y); + WRITE_ONCE(*z, 1); + spin_unlock(mylock); +} + +P2(int *x, int *z) +{ + int r1; + + WRITE_ONCE(*z, 2); + smp_mb(); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ z=2 /\ 2:r1=0) diff --git a/tools/memory-model/litmus-tests/Z6.0+pooncelock+pooncelock+pombonce.litmus b/tools/memory-model/litmus-tests/Z6.0+pooncelock+pooncelock+pombonce.litmus new file mode 100644 index 000000000..10a2aa04c --- /dev/null +++ b/tools/memory-model/litmus-tests/Z6.0+pooncelock+pooncelock+pombonce.litmus @@ -0,0 +1,40 @@ +C Z6.0+pooncelock+pooncelock+pombonce + +(* + * Result: Sometimes + * + * This example demonstrates that a pair of accesses made by different + * processes each while holding a given lock will not necessarily be + * seen as ordered by a third process not holding that lock. + *) + +{} + +P0(int *x, int *y, spinlock_t *mylock) +{ + spin_lock(mylock); + WRITE_ONCE(*x, 1); + WRITE_ONCE(*y, 1); + spin_unlock(mylock); +} + +P1(int *y, int *z, spinlock_t *mylock) +{ + int r0; + + spin_lock(mylock); + r0 = READ_ONCE(*y); + WRITE_ONCE(*z, 1); + spin_unlock(mylock); +} + +P2(int *x, int *z) +{ + int r1; + + WRITE_ONCE(*z, 2); + smp_mb(); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ z=2 /\ 2:r1=0) diff --git a/tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus b/tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus new file mode 100644 index 000000000..88e70b87a --- /dev/null +++ b/tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus @@ -0,0 +1,42 @@ +C Z6.0+pooncerelease+poacquirerelease+fencembonceonce + +(* + * Result: Sometimes + * + * This litmus test shows that a release-acquire chain, while sufficient + * when there is but one non-reads-from (AKA non-rf) link, does not suffice + * if there is more than one. Of the three processes, only P1() reads from + * P0's write, which means that there are two non-rf links: P1() to P2() + * is a write-to-write link (AKA a "coherence" or just "co" link) and P2() + * to P0() is a read-to-write link (AKA a "from-reads" or just "fr" link). + * When there are two or more non-rf links, you typically will need one + * full barrier for each non-rf link. (Exceptions include some cases + * involving locking.) + *) + +{} + +P0(int *x, int *y) +{ + WRITE_ONCE(*x, 1); + smp_store_release(y, 1); +} + +P1(int *y, int *z) +{ + int r0; + + r0 = smp_load_acquire(y); + smp_store_release(z, 1); +} + +P2(int *x, int *z) +{ + int r1; + + WRITE_ONCE(*z, 2); + smp_mb(); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ z=2 /\ 2:r1=0) |