diff options
Diffstat (limited to 'tools/memory-model')
58 files changed, 7553 insertions, 0 deletions
diff --git a/tools/memory-model/.gitignore b/tools/memory-model/.gitignore new file mode 100644 index 000000000..cf4cd66d8 --- /dev/null +++ b/tools/memory-model/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +litmus diff --git a/tools/memory-model/Documentation/cheatsheet.txt b/tools/memory-model/Documentation/cheatsheet.txt new file mode 100644 index 000000000..99d00870b --- /dev/null +++ b/tools/memory-model/Documentation/cheatsheet.txt @@ -0,0 +1,35 @@ + Prior Operation Subsequent Operation + --------------- --------------------------- + C Self R W RMW Self R W DR DW RMW SV + -- ---- - - --- ---- - - -- -- --- -- + +Relaxed store Y Y +Relaxed load Y Y Y Y +Relaxed RMW operation Y Y Y Y +rcu_dereference() Y Y Y Y +Successful *_acquire() R Y Y Y Y Y Y +Successful *_release() C Y Y Y W Y +smp_rmb() Y R Y Y R +smp_wmb() Y W Y Y W +smp_mb() & synchronize_rcu() CP Y Y Y Y Y Y Y Y +Successful full non-void RMW CP Y Y Y Y Y Y Y Y Y Y Y +smp_mb__before_atomic() CP Y Y Y a a a a Y +smp_mb__after_atomic() CP a a Y Y Y Y Y Y + + +Key: Relaxed: A relaxed operation is either READ_ONCE(), WRITE_ONCE(), + a *_relaxed() RMW operation, an unsuccessful RMW + operation, a non-value-returning RMW operation such + as atomic_inc(), or one of the atomic*_read() and + atomic*_set() family of operations. + C: Ordering is cumulative + P: Ordering propagates + R: Read, for example, READ_ONCE(), or read portion of RMW + W: Write, for example, WRITE_ONCE(), or write portion of RMW + Y: Provides ordering + a: Provides ordering given intervening RMW atomic operation + DR: Dependent read (address dependency) + DW: Dependent write (address, data, or control dependency) + RMW: Atomic read-modify-write operation + SELF: Orders self, as opposed to accesses before and/or after + SV: Orders later accesses to the same variable diff --git a/tools/memory-model/Documentation/explanation.txt b/tools/memory-model/Documentation/explanation.txt new file mode 100644 index 000000000..f9d610d5a --- /dev/null +++ b/tools/memory-model/Documentation/explanation.txt @@ -0,0 +1,2559 @@ +Explanation of the Linux-Kernel Memory Consistency Model +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:Author: Alan Stern <stern@rowland.harvard.edu> +:Created: October 2017 + +.. Contents + + 1. INTRODUCTION + 2. BACKGROUND + 3. A SIMPLE EXAMPLE + 4. A SELECTION OF MEMORY MODELS + 5. ORDERING AND CYCLES + 6. EVENTS + 7. THE PROGRAM ORDER RELATION: po AND po-loc + 8. A WARNING + 9. DEPENDENCY RELATIONS: data, addr, and ctrl + 10. THE READS-FROM RELATION: rf, rfi, and rfe + 11. CACHE COHERENCE AND THE COHERENCE ORDER RELATION: co, coi, and coe + 12. THE FROM-READS RELATION: fr, fri, and fre + 13. AN OPERATIONAL MODEL + 14. PROPAGATION ORDER RELATION: cumul-fence + 15. DERIVATION OF THE LKMM FROM THE OPERATIONAL MODEL + 16. SEQUENTIAL CONSISTENCY PER VARIABLE + 17. ATOMIC UPDATES: rmw + 18. THE PRESERVED PROGRAM ORDER RELATION: ppo + 19. AND THEN THERE WAS ALPHA + 20. THE HAPPENS-BEFORE RELATION: hb + 21. THE PROPAGATES-BEFORE RELATION: pb + 22. RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-order, rcu-fence, and rb + 23. LOCKING + 24. PLAIN ACCESSES AND DATA RACES + 25. ODDS AND ENDS + + + +INTRODUCTION +------------ + +The Linux-kernel memory consistency model (LKMM) is rather complex and +obscure. This is particularly evident if you read through the +linux-kernel.bell and linux-kernel.cat files that make up the formal +version of the model; they are extremely terse and their meanings are +far from clear. + +This document describes the ideas underlying the LKMM. It is meant +for people who want to understand how the model was designed. It does +not go into the details of the code in the .bell and .cat files; +rather, it explains in English what the code expresses symbolically. + +Sections 2 (BACKGROUND) through 5 (ORDERING AND CYCLES) are aimed +toward beginners; they explain what memory consistency models are and +the basic notions shared by all such models. People already familiar +with these concepts can skim or skip over them. Sections 6 (EVENTS) +through 12 (THE FROM_READS RELATION) describe the fundamental +relations used in many models. Starting in Section 13 (AN OPERATIONAL +MODEL), the workings of the LKMM itself are covered. + +Warning: The code examples in this document are not written in the +proper format for litmus tests. They don't include a header line, the +initializations are not enclosed in braces, the global variables are +not passed by pointers, and they don't have an "exists" clause at the +end. Converting them to the right format is left as an exercise for +the reader. + + +BACKGROUND +---------- + +A memory consistency model (or just memory model, for short) is +something which predicts, given a piece of computer code running on a +particular kind of system, what values may be obtained by the code's +load instructions. The LKMM makes these predictions for code running +as part of the Linux kernel. + +In practice, people tend to use memory models the other way around. +That is, given a piece of code and a collection of values specified +for the loads, the model will predict whether it is possible for the +code to run in such a way that the loads will indeed obtain the +specified values. Of course, this is just another way of expressing +the same idea. + +For code running on a uniprocessor system, the predictions are easy: +Each load instruction must obtain the value written by the most recent +store instruction accessing the same location (we ignore complicating +factors such as DMA and mixed-size accesses.) But on multiprocessor +systems, with multiple CPUs making concurrent accesses to shared +memory locations, things aren't so simple. + +Different architectures have differing memory models, and the Linux +kernel supports a variety of architectures. The LKMM has to be fairly +permissive, in the sense that any behavior allowed by one of these +architectures also has to be allowed by the LKMM. + + +A SIMPLE EXAMPLE +---------------- + +Here is a simple example to illustrate the basic concepts. Consider +some code running as part of a device driver for an input device. The +driver might contain an interrupt handler which collects data from the +device, stores it in a buffer, and sets a flag to indicate the buffer +is full. Running concurrently on a different CPU might be a part of +the driver code being executed by a process in the midst of a read(2) +system call. This code tests the flag to see whether the buffer is +ready, and if it is, copies the data back to userspace. The buffer +and the flag are memory locations shared between the two CPUs. + +We can abstract out the important pieces of the driver code as follows +(the reason for using WRITE_ONCE() and READ_ONCE() instead of simple +assignment statements is discussed later): + + int buf = 0, flag = 0; + + P0() + { + WRITE_ONCE(buf, 1); + WRITE_ONCE(flag, 1); + } + + P1() + { + int r1; + int r2 = 0; + + r1 = READ_ONCE(flag); + if (r1) + r2 = READ_ONCE(buf); + } + +Here the P0() function represents the interrupt handler running on one +CPU and P1() represents the read() routine running on another. The +value 1 stored in buf represents input data collected from the device. +Thus, P0 stores the data in buf and then sets flag. Meanwhile, P1 +reads flag into the private variable r1, and if it is set, reads the +data from buf into a second private variable r2 for copying to +userspace. (Presumably if flag is not set then the driver will wait a +while and try again.) + +This pattern of memory accesses, where one CPU stores values to two +shared memory locations and another CPU loads from those locations in +the opposite order, is widely known as the "Message Passing" or MP +pattern. It is typical of memory access patterns in the kernel. + +Please note that this example code is a simplified abstraction. Real +buffers are usually larger than a single integer, real device drivers +usually use sleep and wakeup mechanisms rather than polling for I/O +completion, and real code generally doesn't bother to copy values into +private variables before using them. All that is beside the point; +the idea here is simply to illustrate the overall pattern of memory +accesses by the CPUs. + +A memory model will predict what values P1 might obtain for its loads +from flag and buf, or equivalently, what values r1 and r2 might end up +with after the code has finished running. + +Some predictions are trivial. For instance, no sane memory model would +predict that r1 = 42 or r2 = -7, because neither of those values ever +gets stored in flag or buf. + +Some nontrivial predictions are nonetheless quite simple. For +instance, P1 might run entirely before P0 begins, in which case r1 and +r2 will both be 0 at the end. Or P0 might run entirely before P1 +begins, in which case r1 and r2 will both be 1. + +The interesting predictions concern what might happen when the two +routines run concurrently. One possibility is that P1 runs after P0's +store to buf but before the store to flag. In this case, r1 and r2 +will again both be 0. (If P1 had been designed to read buf +unconditionally then we would instead have r1 = 0 and r2 = 1.) + +However, the most interesting possibility is where r1 = 1 and r2 = 0. +If this were to occur it would mean the driver contains a bug, because +incorrect data would get sent to the user: 0 instead of 1. As it +happens, the LKMM does predict this outcome can occur, and the example +driver code shown above is indeed buggy. + + +A SELECTION OF MEMORY MODELS +---------------------------- + +The first widely cited memory model, and the simplest to understand, +is Sequential Consistency. According to this model, systems behave as +if each CPU executed its instructions in order but with unspecified +timing. In other words, the instructions from the various CPUs get +interleaved in a nondeterministic way, always according to some single +global order that agrees with the order of the instructions in the +program source for each CPU. The model says that the value obtained +by each load is simply the value written by the most recently executed +store to the same memory location, from any CPU. + +For the MP example code shown above, Sequential Consistency predicts +that the undesired result r1 = 1, r2 = 0 cannot occur. The reasoning +goes like this: + + Since r1 = 1, P0 must store 1 to flag before P1 loads 1 from + it, as loads can obtain values only from earlier stores. + + P1 loads from flag before loading from buf, since CPUs execute + their instructions in order. + + P1 must load 0 from buf before P0 stores 1 to it; otherwise r2 + would be 1 since a load obtains its value from the most recent + store to the same address. + + P0 stores 1 to buf before storing 1 to flag, since it executes + its instructions in order. + + Since an instruction (in this case, P0's store to flag) cannot + execute before itself, the specified outcome is impossible. + +However, real computer hardware almost never follows the Sequential +Consistency memory model; doing so would rule out too many valuable +performance optimizations. On ARM and PowerPC architectures, for +instance, the MP example code really does sometimes yield r1 = 1 and +r2 = 0. + +x86 and SPARC follow yet a different memory model: TSO (Total Store +Ordering). This model predicts that the undesired outcome for the MP +pattern cannot occur, but in other respects it differs from Sequential +Consistency. One example is the Store Buffer (SB) pattern, in which +each CPU stores to its own shared location and then loads from the +other CPU's location: + + int x = 0, y = 0; + + P0() + { + int r0; + + WRITE_ONCE(x, 1); + r0 = READ_ONCE(y); + } + + P1() + { + int r1; + + WRITE_ONCE(y, 1); + r1 = READ_ONCE(x); + } + +Sequential Consistency predicts that the outcome r0 = 0, r1 = 0 is +impossible. (Exercise: Figure out the reasoning.) But TSO allows +this outcome to occur, and in fact it does sometimes occur on x86 and +SPARC systems. + +The LKMM was inspired by the memory models followed by PowerPC, ARM, +x86, Alpha, and other architectures. However, it is different in +detail from each of them. + + +ORDERING AND CYCLES +------------------- + +Memory models are all about ordering. Often this is temporal ordering +(i.e., the order in which certain events occur) but it doesn't have to +be; consider for example the order of instructions in a program's +source code. We saw above that Sequential Consistency makes an +important assumption that CPUs execute instructions in the same order +as those instructions occur in the code, and there are many other +instances of ordering playing central roles in memory models. + +The counterpart to ordering is a cycle. Ordering rules out cycles: +It's not possible to have X ordered before Y, Y ordered before Z, and +Z ordered before X, because this would mean that X is ordered before +itself. The analysis of the MP example under Sequential Consistency +involved just such an impossible cycle: + + W: P0 stores 1 to flag executes before + X: P1 loads 1 from flag executes before + Y: P1 loads 0 from buf executes before + Z: P0 stores 1 to buf executes before + W: P0 stores 1 to flag. + +In short, if a memory model requires certain accesses to be ordered, +and a certain outcome for the loads in a piece of code can happen only +if those accesses would form a cycle, then the memory model predicts +that outcome cannot occur. + +The LKMM is defined largely in terms of cycles, as we will see. + + +EVENTS +------ + +The LKMM does not work directly with the C statements that make up +kernel source code. Instead it considers the effects of those +statements in a more abstract form, namely, events. The model +includes three types of events: + + Read events correspond to loads from shared memory, such as + calls to READ_ONCE(), smp_load_acquire(), or + rcu_dereference(). + + Write events correspond to stores to shared memory, such as + calls to WRITE_ONCE(), smp_store_release(), or atomic_set(). + + Fence events correspond to memory barriers (also known as + fences), such as calls to smp_rmb() or rcu_read_lock(). + +These categories are not exclusive; a read or write event can also be +a fence. This happens with functions like smp_load_acquire() or +spin_lock(). However, no single event can be both a read and a write. +Atomic read-modify-write accesses, such as atomic_inc() or xchg(), +correspond to a pair of events: a read followed by a write. (The +write event is omitted for executions where it doesn't occur, such as +a cmpxchg() where the comparison fails.) + +Other parts of the code, those which do not involve interaction with +shared memory, do not give rise to events. Thus, arithmetic and +logical computations, control-flow instructions, or accesses to +private memory or CPU registers are not of central interest to the +memory model. They only affect the model's predictions indirectly. +For example, an arithmetic computation might determine the value that +gets stored to a shared memory location (or in the case of an array +index, the address where the value gets stored), but the memory model +is concerned only with the store itself -- its value and its address +-- not the computation leading up to it. + +Events in the LKMM can be linked by various relations, which we will +describe in the following sections. The memory model requires certain +of these relations to be orderings, that is, it requires them not to +have any cycles. + + +THE PROGRAM ORDER RELATION: po AND po-loc +----------------------------------------- + +The most important relation between events is program order (po). You +can think of it as the order in which statements occur in the source +code after branches are taken into account and loops have been +unrolled. A better description might be the order in which +instructions are presented to a CPU's execution unit. Thus, we say +that X is po-before Y (written as "X ->po Y" in formulas) if X occurs +before Y in the instruction stream. + +This is inherently a single-CPU relation; two instructions executing +on different CPUs are never linked by po. Also, it is by definition +an ordering so it cannot have any cycles. + +po-loc is a sub-relation of po. It links two memory accesses when the +first comes before the second in program order and they access the +same memory location (the "-loc" suffix). + +Although this may seem straightforward, there is one subtle aspect to +program order we need to explain. The LKMM was inspired by low-level +architectural memory models which describe the behavior of machine +code, and it retains their outlook to a considerable extent. The +read, write, and fence events used by the model are close in spirit to +individual machine instructions. Nevertheless, the LKMM describes +kernel code written in C, and the mapping from C to machine code can +be extremely complex. + +Optimizing compilers have great freedom in the way they translate +source code to object code. They are allowed to apply transformations +that add memory accesses, eliminate accesses, combine them, split them +into pieces, or move them around. The use of READ_ONCE(), WRITE_ONCE(), +or one of the other atomic or synchronization primitives prevents a +large number of compiler optimizations. In particular, it is guaranteed +that the compiler will not remove such accesses from the generated code +(unless it can prove the accesses will never be executed), it will not +change the order in which they occur in the code (within limits imposed +by the C standard), and it will not introduce extraneous accesses. + +The MP and SB examples above used READ_ONCE() and WRITE_ONCE() rather +than ordinary memory accesses. Thanks to this usage, we can be certain +that in the MP example, the compiler won't reorder P0's write event to +buf and P0's write event to flag, and similarly for the other shared +memory accesses in the examples. + +Since private variables are not shared between CPUs, they can be +accessed normally without READ_ONCE() or WRITE_ONCE(). In fact, they +need not even be stored in normal memory at all -- in principle a +private variable could be stored in a CPU register (hence the convention +that these variables have names starting with the letter 'r'). + + +A WARNING +--------- + +The protections provided by READ_ONCE(), WRITE_ONCE(), and others are +not perfect; and under some circumstances it is possible for the +compiler to undermine the memory model. Here is an example. Suppose +both branches of an "if" statement store the same value to the same +location: + + r1 = READ_ONCE(x); + if (r1) { + WRITE_ONCE(y, 2); + ... /* do something */ + } else { + WRITE_ONCE(y, 2); + ... /* do something else */ + } + +For this code, the LKMM predicts that the load from x will always be +executed before either of the stores to y. However, a compiler could +lift the stores out of the conditional, transforming the code into +something resembling: + + r1 = READ_ONCE(x); + WRITE_ONCE(y, 2); + if (r1) { + ... /* do something */ + } else { + ... /* do something else */ + } + +Given this version of the code, the LKMM would predict that the load +from x could be executed after the store to y. Thus, the memory +model's original prediction could be invalidated by the compiler. + +Another issue arises from the fact that in C, arguments to many +operators and function calls can be evaluated in any order. For +example: + + r1 = f(5) + g(6); + +The object code might call f(5) either before or after g(6); the +memory model cannot assume there is a fixed program order relation +between them. (In fact, if the function calls are inlined then the +compiler might even interleave their object code.) + + +DEPENDENCY RELATIONS: data, addr, and ctrl +------------------------------------------ + +We say that two events are linked by a dependency relation when the +execution of the second event depends in some way on a value obtained +from memory by the first. The first event must be a read, and the +value it obtains must somehow affect what the second event does. +There are three kinds of dependencies: data, address (addr), and +control (ctrl). + +A read and a write event are linked by a data dependency if the value +obtained by the read affects the value stored by the write. As a very +simple example: + + int x, y; + + r1 = READ_ONCE(x); + WRITE_ONCE(y, r1 + 5); + +The value stored by the WRITE_ONCE obviously depends on the value +loaded by the READ_ONCE. Such dependencies can wind through +arbitrarily complicated computations, and a write can depend on the +values of multiple reads. + +A read event and another memory access event are linked by an address +dependency if the value obtained by the read affects the location +accessed by the other event. The second event can be either a read or +a write. Here's another simple example: + + int a[20]; + int i; + + r1 = READ_ONCE(i); + r2 = READ_ONCE(a[r1]); + +Here the location accessed by the second READ_ONCE() depends on the +index value loaded by the first. Pointer indirection also gives rise +to address dependencies, since the address of a location accessed +through a pointer will depend on the value read earlier from that +pointer. + +Finally, a read event and another memory access event are linked by a +control dependency if the value obtained by the read affects whether +the second event is executed at all. Simple example: + + int x, y; + + r1 = READ_ONCE(x); + if (r1) + WRITE_ONCE(y, 1984); + +Execution of the WRITE_ONCE() is controlled by a conditional expression +which depends on the value obtained by the READ_ONCE(); hence there is +a control dependency from the load to the store. + +It should be pretty obvious that events can only depend on reads that +come earlier in program order. Symbolically, if we have R ->data X, +R ->addr X, or R ->ctrl X (where R is a read event), then we must also +have R ->po X. It wouldn't make sense for a computation to depend +somehow on a value that doesn't get loaded from shared memory until +later in the code! + + +THE READS-FROM RELATION: rf, rfi, and rfe +----------------------------------------- + +The reads-from relation (rf) links a write event to a read event when +the value loaded by the read is the value that was stored by the +write. In colloquial terms, the load "reads from" the store. We +write W ->rf R to indicate that the load R reads from the store W. We +further distinguish the cases where the load and the store occur on +the same CPU (internal reads-from, or rfi) and where they occur on +different CPUs (external reads-from, or rfe). + +For our purposes, a memory location's initial value is treated as +though it had been written there by an imaginary initial store that +executes on a separate CPU before the main program runs. + +Usage of the rf relation implicitly assumes that loads will always +read from a single store. It doesn't apply properly in the presence +of load-tearing, where a load obtains some of its bits from one store +and some of them from another store. Fortunately, use of READ_ONCE() +and WRITE_ONCE() will prevent load-tearing; it's not possible to have: + + int x = 0; + + P0() + { + WRITE_ONCE(x, 0x1234); + } + + P1() + { + int r1; + + r1 = READ_ONCE(x); + } + +and end up with r1 = 0x1200 (partly from x's initial value and partly +from the value stored by P0). + +On the other hand, load-tearing is unavoidable when mixed-size +accesses are used. Consider this example: + + union { + u32 w; + u16 h[2]; + } x; + + P0() + { + WRITE_ONCE(x.h[0], 0x1234); + WRITE_ONCE(x.h[1], 0x5678); + } + + P1() + { + int r1; + + r1 = READ_ONCE(x.w); + } + +If r1 = 0x56781234 (little-endian!) at the end, then P1 must have read +from both of P0's stores. It is possible to handle mixed-size and +unaligned accesses in a memory model, but the LKMM currently does not +attempt to do so. It requires all accesses to be properly aligned and +of the location's actual size. + + +CACHE COHERENCE AND THE COHERENCE ORDER RELATION: co, coi, and coe +------------------------------------------------------------------ + +Cache coherence is a general principle requiring that in a +multi-processor system, the CPUs must share a consistent view of the +memory contents. Specifically, it requires that for each location in +shared memory, the stores to that location must form a single global +ordering which all the CPUs agree on (the coherence order), and this +ordering must be consistent with the program order for accesses to +that location. + +To put it another way, for any variable x, the coherence order (co) of +the stores to x is simply the order in which the stores overwrite one +another. The imaginary store which establishes x's initial value +comes first in the coherence order; the store which directly +overwrites the initial value comes second; the store which overwrites +that value comes third, and so on. + +You can think of the coherence order as being the order in which the +stores reach x's location in memory (or if you prefer a more +hardware-centric view, the order in which the stores get written to +x's cache line). We write W ->co W' if W comes before W' in the +coherence order, that is, if the value stored by W gets overwritten, +directly or indirectly, by the value stored by W'. + +Coherence order is required to be consistent with program order. This +requirement takes the form of four coherency rules: + + Write-write coherence: If W ->po-loc W' (i.e., W comes before + W' in program order and they access the same location), where W + and W' are two stores, then W ->co W'. + + Write-read coherence: If W ->po-loc R, where W is a store and R + is a load, then R must read from W or from some other store + which comes after W in the coherence order. + + Read-write coherence: If R ->po-loc W, where R is a load and W + is a store, then the store which R reads from must come before + W in the coherence order. + + Read-read coherence: If R ->po-loc R', where R and R' are two + loads, then either they read from the same store or else the + store read by R comes before the store read by R' in the + coherence order. + +This is sometimes referred to as sequential consistency per variable, +because it means that the accesses to any single memory location obey +the rules of the Sequential Consistency memory model. (According to +Wikipedia, sequential consistency per variable and cache coherence +mean the same thing except that cache coherence includes an extra +requirement that every store eventually becomes visible to every CPU.) + +Any reasonable memory model will include cache coherence. Indeed, our +expectation of cache coherence is so deeply ingrained that violations +of its requirements look more like hardware bugs than programming +errors: + + int x; + + P0() + { + WRITE_ONCE(x, 17); + WRITE_ONCE(x, 23); + } + +If the final value stored in x after this code ran was 17, you would +think your computer was broken. It would be a violation of the +write-write coherence rule: Since the store of 23 comes later in +program order, it must also come later in x's coherence order and +thus must overwrite the store of 17. + + int x = 0; + + P0() + { + int r1; + + r1 = READ_ONCE(x); + WRITE_ONCE(x, 666); + } + +If r1 = 666 at the end, this would violate the read-write coherence +rule: The READ_ONCE() load comes before the WRITE_ONCE() store in +program order, so it must not read from that store but rather from one +coming earlier in the coherence order (in this case, x's initial +value). + + int x = 0; + + P0() + { + WRITE_ONCE(x, 5); + } + + P1() + { + int r1, r2; + + r1 = READ_ONCE(x); + r2 = READ_ONCE(x); + } + +If r1 = 5 (reading from P0's store) and r2 = 0 (reading from the +imaginary store which establishes x's initial value) at the end, this +would violate the read-read coherence rule: The r1 load comes before +the r2 load in program order, so it must not read from a store that +comes later in the coherence order. + +(As a minor curiosity, if this code had used normal loads instead of +READ_ONCE() in P1, on Itanium it sometimes could end up with r1 = 5 +and r2 = 0! This results from parallel execution of the operations +encoded in Itanium's Very-Long-Instruction-Word format, and it is yet +another motivation for using READ_ONCE() when accessing shared memory +locations.) + +Just like the po relation, co is inherently an ordering -- it is not +possible for a store to directly or indirectly overwrite itself! And +just like with the rf relation, we distinguish between stores that +occur on the same CPU (internal coherence order, or coi) and stores +that occur on different CPUs (external coherence order, or coe). + +On the other hand, stores to different memory locations are never +related by co, just as instructions on different CPUs are never +related by po. Coherence order is strictly per-location, or if you +prefer, each location has its own independent coherence order. + + +THE FROM-READS RELATION: fr, fri, and fre +----------------------------------------- + +The from-reads relation (fr) can be a little difficult for people to +grok. It describes the situation where a load reads a value that gets +overwritten by a store. In other words, we have R ->fr W when the +value that R reads is overwritten (directly or indirectly) by W, or +equivalently, when R reads from a store which comes earlier than W in +the coherence order. + +For example: + + int x = 0; + + P0() + { + int r1; + + r1 = READ_ONCE(x); + WRITE_ONCE(x, 2); + } + +The value loaded from x will be 0 (assuming cache coherence!), and it +gets overwritten by the value 2. Thus there is an fr link from the +READ_ONCE() to the WRITE_ONCE(). If the code contained any later +stores to x, there would also be fr links from the READ_ONCE() to +them. + +As with rf, rfi, and rfe, we subdivide the fr relation into fri (when +the load and the store are on the same CPU) and fre (when they are on +different CPUs). + +Note that the fr relation is determined entirely by the rf and co +relations; it is not independent. Given a read event R and a write +event W for the same location, we will have R ->fr W if and only if +the write which R reads from is co-before W. In symbols, + + (R ->fr W) := (there exists W' with W' ->rf R and W' ->co W). + + +AN OPERATIONAL MODEL +-------------------- + +The LKMM is based on various operational memory models, meaning that +the models arise from an abstract view of how a computer system +operates. Here are the main ideas, as incorporated into the LKMM. + +The system as a whole is divided into the CPUs and a memory subsystem. +The CPUs are responsible for executing instructions (not necessarily +in program order), and they communicate with the memory subsystem. +For the most part, executing an instruction requires a CPU to perform +only internal operations. However, loads, stores, and fences involve +more. + +When CPU C executes a store instruction, it tells the memory subsystem +to store a certain value at a certain location. The memory subsystem +propagates the store to all the other CPUs as well as to RAM. (As a +special case, we say that the store propagates to its own CPU at the +time it is executed.) The memory subsystem also determines where the +store falls in the location's coherence order. In particular, it must +arrange for the store to be co-later than (i.e., to overwrite) any +other store to the same location which has already propagated to CPU C. + +When a CPU executes a load instruction R, it first checks to see +whether there are any as-yet unexecuted store instructions, for the +same location, that come before R in program order. If there are, it +uses the value of the po-latest such store as the value obtained by R, +and we say that the store's value is forwarded to R. Otherwise, the +CPU asks the memory subsystem for the value to load and we say that R +is satisfied from memory. The memory subsystem hands back the value +of the co-latest store to the location in question which has already +propagated to that CPU. + +(In fact, the picture needs to be a little more complicated than this. +CPUs have local caches, and propagating a store to a CPU really means +propagating it to the CPU's local cache. A local cache can take some +time to process the stores that it receives, and a store can't be used +to satisfy one of the CPU's loads until it has been processed. On +most architectures, the local caches process stores in +First-In-First-Out order, and consequently the processing delay +doesn't matter for the memory model. But on Alpha, the local caches +have a partitioned design that results in non-FIFO behavior. We will +discuss this in more detail later.) + +Note that load instructions may be executed speculatively and may be +restarted under certain circumstances. The memory model ignores these +premature executions; we simply say that the load executes at the +final time it is forwarded or satisfied. + +Executing a fence (or memory barrier) instruction doesn't require a +CPU to do anything special other than informing the memory subsystem +about the fence. However, fences do constrain the way CPUs and the +memory subsystem handle other instructions, in two respects. + +First, a fence forces the CPU to execute various instructions in +program order. Exactly which instructions are ordered depends on the +type of fence: + + Strong fences, including smp_mb() and synchronize_rcu(), force + the CPU to execute all po-earlier instructions before any + po-later instructions; + + smp_rmb() forces the CPU to execute all po-earlier loads + before any po-later loads; + + smp_wmb() forces the CPU to execute all po-earlier stores + before any po-later stores; + + Acquire fences, such as smp_load_acquire(), force the CPU to + execute the load associated with the fence (e.g., the load + part of an smp_load_acquire()) before any po-later + instructions; + + Release fences, such as smp_store_release(), force the CPU to + execute all po-earlier instructions before the store + associated with the fence (e.g., the store part of an + smp_store_release()). + +Second, some types of fence affect the way the memory subsystem +propagates stores. When a fence instruction is executed on CPU C: + + For each other CPU C', smp_wmb() forces all po-earlier stores + on C to propagate to C' before any po-later stores do. + + For each other CPU C', any store which propagates to C before + a release fence is executed (including all po-earlier + stores executed on C) is forced to propagate to C' before the + store associated with the release fence does. + + Any store which propagates to C before a strong fence is + executed (including all po-earlier stores on C) is forced to + propagate to all other CPUs before any instructions po-after + the strong fence are executed on C. + +The propagation ordering enforced by release fences and strong fences +affects stores from other CPUs that propagate to CPU C before the +fence is executed, as well as stores that are executed on C before the +fence. We describe this property by saying that release fences and +strong fences are A-cumulative. By contrast, smp_wmb() fences are not +A-cumulative; they only affect the propagation of stores that are +executed on C before the fence (i.e., those which precede the fence in +program order). + +rcu_read_lock(), rcu_read_unlock(), and synchronize_rcu() fences have +other properties which we discuss later. + + +PROPAGATION ORDER RELATION: cumul-fence +--------------------------------------- + +The fences which affect propagation order (i.e., strong, release, and +smp_wmb() fences) are collectively referred to as cumul-fences, even +though smp_wmb() isn't A-cumulative. The cumul-fence relation is +defined to link memory access events E and F whenever: + + E and F are both stores on the same CPU and an smp_wmb() fence + event occurs between them in program order; or + + F is a release fence and some X comes before F in program order, + where either X = E or else E ->rf X; or + + A strong fence event occurs between some X and F in program + order, where either X = E or else E ->rf X. + +The operational model requires that whenever W and W' are both stores +and W ->cumul-fence W', then W must propagate to any given CPU +before W' does. However, for different CPUs C and C', it does not +require W to propagate to C before W' propagates to C'. + + +DERIVATION OF THE LKMM FROM THE OPERATIONAL MODEL +------------------------------------------------- + +The LKMM is derived from the restrictions imposed by the design +outlined above. These restrictions involve the necessity of +maintaining cache coherence and the fact that a CPU can't operate on a +value before it knows what that value is, among other things. + +The formal version of the LKMM is defined by six requirements, or +axioms: + + Sequential consistency per variable: This requires that the + system obey the four coherency rules. + + Atomicity: This requires that atomic read-modify-write + operations really are atomic, that is, no other stores can + sneak into the middle of such an update. + + Happens-before: This requires that certain instructions are + executed in a specific order. + + Propagation: This requires that certain stores propagate to + CPUs and to RAM in a specific order. + + Rcu: This requires that RCU read-side critical sections and + grace periods obey the rules of RCU, in particular, the + Grace-Period Guarantee. + + Plain-coherence: This requires that plain memory accesses + (those not using READ_ONCE(), WRITE_ONCE(), etc.) must obey + the operational model's rules regarding cache coherence. + +The first and second are quite common; they can be found in many +memory models (such as those for C11/C++11). The "happens-before" and +"propagation" axioms have analogs in other memory models as well. The +"rcu" and "plain-coherence" axioms are specific to the LKMM. + +Each of these axioms is discussed below. + + +SEQUENTIAL CONSISTENCY PER VARIABLE +----------------------------------- + +According to the principle of cache coherence, the stores to any fixed +shared location in memory form a global ordering. We can imagine +inserting the loads from that location into this ordering, by placing +each load between the store that it reads from and the following +store. This leaves the relative positions of loads that read from the +same store unspecified; let's say they are inserted in program order, +first for CPU 0, then CPU 1, etc. + +You can check that the four coherency rules imply that the rf, co, fr, +and po-loc relations agree with this global ordering; in other words, +whenever we have X ->rf Y or X ->co Y or X ->fr Y or X ->po-loc Y, the +X event comes before the Y event in the global ordering. The LKMM's +"coherence" axiom expresses this by requiring the union of these +relations not to have any cycles. This means it must not be possible +to find events + + X0 -> X1 -> X2 -> ... -> Xn -> X0, + +where each of the links is either rf, co, fr, or po-loc. This has to +hold if the accesses to the fixed memory location can be ordered as +cache coherence demands. + +Although it is not obvious, it can be shown that the converse is also +true: This LKMM axiom implies that the four coherency rules are +obeyed. + + +ATOMIC UPDATES: rmw +------------------- + +What does it mean to say that a read-modify-write (rmw) update, such +as atomic_inc(&x), is atomic? It means that the memory location (x in +this case) does not get altered between the read and the write events +making up the atomic operation. In particular, if two CPUs perform +atomic_inc(&x) concurrently, it must be guaranteed that the final +value of x will be the initial value plus two. We should never have +the following sequence of events: + + CPU 0 loads x obtaining 13; + CPU 1 loads x obtaining 13; + CPU 0 stores 14 to x; + CPU 1 stores 14 to x; + +where the final value of x is wrong (14 rather than 15). + +In this example, CPU 0's increment effectively gets lost because it +occurs in between CPU 1's load and store. To put it another way, the +problem is that the position of CPU 0's store in x's coherence order +is between the store that CPU 1 reads from and the store that CPU 1 +performs. + +The same analysis applies to all atomic update operations. Therefore, +to enforce atomicity the LKMM requires that atomic updates follow this +rule: Whenever R and W are the read and write events composing an +atomic read-modify-write and W' is the write event which R reads from, +there must not be any stores coming between W' and W in the coherence +order. Equivalently, + + (R ->rmw W) implies (there is no X with R ->fr X and X ->co W), + +where the rmw relation links the read and write events making up each +atomic update. This is what the LKMM's "atomic" axiom says. + + +THE PRESERVED PROGRAM ORDER RELATION: ppo +----------------------------------------- + +There are many situations where a CPU is obliged to execute two +instructions in program order. We amalgamate them into the ppo (for +"preserved program order") relation, which links the po-earlier +instruction to the po-later instruction and is thus a sub-relation of +po. + +The operational model already includes a description of one such +situation: Fences are a source of ppo links. Suppose X and Y are +memory accesses with X ->po Y; then the CPU must execute X before Y if +any of the following hold: + + A strong (smp_mb() or synchronize_rcu()) fence occurs between + X and Y; + + X and Y are both stores and an smp_wmb() fence occurs between + them; + + X and Y are both loads and an smp_rmb() fence occurs between + them; + + X is also an acquire fence, such as smp_load_acquire(); + + Y is also a release fence, such as smp_store_release(). + +Another possibility, not mentioned earlier but discussed in the next +section, is: + + X and Y are both loads, X ->addr Y (i.e., there is an address + dependency from X to Y), and X is a READ_ONCE() or an atomic + access. + +Dependencies can also cause instructions to be executed in program +order. This is uncontroversial when the second instruction is a +store; either a data, address, or control dependency from a load R to +a store W will force the CPU to execute R before W. This is very +simply because the CPU cannot tell the memory subsystem about W's +store before it knows what value should be stored (in the case of a +data dependency), what location it should be stored into (in the case +of an address dependency), or whether the store should actually take +place (in the case of a control dependency). + +Dependencies to load instructions are more problematic. To begin with, +there is no such thing as a data dependency to a load. Next, a CPU +has no reason to respect a control dependency to a load, because it +can always satisfy the second load speculatively before the first, and +then ignore the result if it turns out that the second load shouldn't +be executed after all. And lastly, the real difficulties begin when +we consider address dependencies to loads. + +To be fair about it, all Linux-supported architectures do execute +loads in program order if there is an address dependency between them. +After all, a CPU cannot ask the memory subsystem to load a value from +a particular location before it knows what that location is. However, +the split-cache design used by Alpha can cause it to behave in a way +that looks as if the loads were executed out of order (see the next +section for more details). The kernel includes a workaround for this +problem when the loads come from READ_ONCE(), and therefore the LKMM +includes address dependencies to loads in the ppo relation. + +On the other hand, dependencies can indirectly affect the ordering of +two loads. This happens when there is a dependency from a load to a +store and a second, po-later load reads from that store: + + R ->dep W ->rfi R', + +where the dep link can be either an address or a data dependency. In +this situation we know it is possible for the CPU to execute R' before +W, because it can forward the value that W will store to R'. But it +cannot execute R' before R, because it cannot forward the value before +it knows what that value is, or that W and R' do access the same +location. However, if there is merely a control dependency between R +and W then the CPU can speculatively forward W to R' before executing +R; if the speculation turns out to be wrong then the CPU merely has to +restart or abandon R'. + +(In theory, a CPU might forward a store to a load when it runs across +an address dependency like this: + + r1 = READ_ONCE(ptr); + WRITE_ONCE(*r1, 17); + r2 = READ_ONCE(*r1); + +because it could tell that the store and the second load access the +same location even before it knows what the location's address is. +However, none of the architectures supported by the Linux kernel do +this.) + +Two memory accesses of the same location must always be executed in +program order if the second access is a store. Thus, if we have + + R ->po-loc W + +(the po-loc link says that R comes before W in program order and they +access the same location), the CPU is obliged to execute W after R. +If it executed W first then the memory subsystem would respond to R's +read request with the value stored by W (or an even later store), in +violation of the read-write coherence rule. Similarly, if we had + + W ->po-loc W' + +and the CPU executed W' before W, then the memory subsystem would put +W' before W in the coherence order. It would effectively cause W to +overwrite W', in violation of the write-write coherence rule. +(Interestingly, an early ARMv8 memory model, now obsolete, proposed +allowing out-of-order writes like this to occur. The model avoided +violating the write-write coherence rule by requiring the CPU not to +send the W write to the memory subsystem at all!) + + +AND THEN THERE WAS ALPHA +------------------------ + +As mentioned above, the Alpha architecture is unique in that it does +not appear to respect address dependencies to loads. This means that +code such as the following: + + int x = 0; + int y = -1; + int *ptr = &y; + + P0() + { + WRITE_ONCE(x, 1); + smp_wmb(); + WRITE_ONCE(ptr, &x); + } + + P1() + { + int *r1; + int r2; + + r1 = ptr; + r2 = READ_ONCE(*r1); + } + +can malfunction on Alpha systems (notice that P1 uses an ordinary load +to read ptr instead of READ_ONCE()). It is quite possible that r1 = &x +and r2 = 0 at the end, in spite of the address dependency. + +At first glance this doesn't seem to make sense. We know that the +smp_wmb() forces P0's store to x to propagate to P1 before the store +to ptr does. And since P1 can't execute its second load +until it knows what location to load from, i.e., after executing its +first load, the value x = 1 must have propagated to P1 before the +second load executed. So why doesn't r2 end up equal to 1? + +The answer lies in the Alpha's split local caches. Although the two +stores do reach P1's local cache in the proper order, it can happen +that the first store is processed by a busy part of the cache while +the second store is processed by an idle part. As a result, the x = 1 +value may not become available for P1's CPU to read until after the +ptr = &x value does, leading to the undesirable result above. The +final effect is that even though the two loads really are executed in +program order, it appears that they aren't. + +This could not have happened if the local cache had processed the +incoming stores in FIFO order. By contrast, other architectures +maintain at least the appearance of FIFO order. + +In practice, this difficulty is solved by inserting a special fence +between P1's two loads when the kernel is compiled for the Alpha +architecture. In fact, as of version 4.15, the kernel automatically +adds this fence after every READ_ONCE() and atomic load on Alpha. The +effect of the fence is to cause the CPU not to execute any po-later +instructions until after the local cache has finished processing all +the stores it has already received. Thus, if the code was changed to: + + P1() + { + int *r1; + int r2; + + r1 = READ_ONCE(ptr); + r2 = READ_ONCE(*r1); + } + +then we would never get r1 = &x and r2 = 0. By the time P1 executed +its second load, the x = 1 store would already be fully processed by +the local cache and available for satisfying the read request. Thus +we have yet another reason why shared data should always be read with +READ_ONCE() or another synchronization primitive rather than accessed +directly. + +The LKMM requires that smp_rmb(), acquire fences, and strong fences +share this property: They do not allow the CPU to execute any po-later +instructions (or po-later loads in the case of smp_rmb()) until all +outstanding stores have been processed by the local cache. In the +case of a strong fence, the CPU first has to wait for all of its +po-earlier stores to propagate to every other CPU in the system; then +it has to wait for the local cache to process all the stores received +as of that time -- not just the stores received when the strong fence +began. + +And of course, none of this matters for any architecture other than +Alpha. + + +THE HAPPENS-BEFORE RELATION: hb +------------------------------- + +The happens-before relation (hb) links memory accesses that have to +execute in a certain order. hb includes the ppo relation and two +others, one of which is rfe. + +W ->rfe R implies that W and R are on different CPUs. It also means +that W's store must have propagated to R's CPU before R executed; +otherwise R could not have read the value stored by W. Therefore W +must have executed before R, and so we have W ->hb R. + +The equivalent fact need not hold if W ->rfi R (i.e., W and R are on +the same CPU). As we have already seen, the operational model allows +W's value to be forwarded to R in such cases, meaning that R may well +execute before W does. + +It's important to understand that neither coe nor fre is included in +hb, despite their similarities to rfe. For example, suppose we have +W ->coe W'. This means that W and W' are stores to the same location, +they execute on different CPUs, and W comes before W' in the coherence +order (i.e., W' overwrites W). Nevertheless, it is possible for W' to +execute before W, because the decision as to which store overwrites +the other is made later by the memory subsystem. When the stores are +nearly simultaneous, either one can come out on top. Similarly, +R ->fre W means that W overwrites the value which R reads, but it +doesn't mean that W has to execute after R. All that's necessary is +for the memory subsystem not to propagate W to R's CPU until after R +has executed, which is possible if W executes shortly before R. + +The third relation included in hb is like ppo, in that it only links +events that are on the same CPU. However it is more difficult to +explain, because it arises only indirectly from the requirement of +cache coherence. The relation is called prop, and it links two events +on CPU C in situations where a store from some other CPU comes after +the first event in the coherence order and propagates to C before the +second event executes. + +This is best explained with some examples. The simplest case looks +like this: + + int x; + + P0() + { + int r1; + + WRITE_ONCE(x, 1); + r1 = READ_ONCE(x); + } + + P1() + { + WRITE_ONCE(x, 8); + } + +If r1 = 8 at the end then P0's accesses must have executed in program +order. We can deduce this from the operational model; if P0's load +had executed before its store then the value of the store would have +been forwarded to the load, so r1 would have ended up equal to 1, not +8. In this case there is a prop link from P0's write event to its read +event, because P1's store came after P0's store in x's coherence +order, and P1's store propagated to P0 before P0's load executed. + +An equally simple case involves two loads of the same location that +read from different stores: + + int x = 0; + + P0() + { + int r1, r2; + + r1 = READ_ONCE(x); + r2 = READ_ONCE(x); + } + + P1() + { + WRITE_ONCE(x, 9); + } + +If r1 = 0 and r2 = 9 at the end then P0's accesses must have executed +in program order. If the second load had executed before the first +then the x = 9 store must have been propagated to P0 before the first +load executed, and so r1 would have been 9 rather than 0. In this +case there is a prop link from P0's first read event to its second, +because P1's store overwrote the value read by P0's first load, and +P1's store propagated to P0 before P0's second load executed. + +Less trivial examples of prop all involve fences. Unlike the simple +examples above, they can require that some instructions are executed +out of program order. This next one should look familiar: + + int buf = 0, flag = 0; + + P0() + { + WRITE_ONCE(buf, 1); + smp_wmb(); + WRITE_ONCE(flag, 1); + } + + P1() + { + int r1; + int r2; + + r1 = READ_ONCE(flag); + r2 = READ_ONCE(buf); + } + +This is the MP pattern again, with an smp_wmb() fence between the two +stores. If r1 = 1 and r2 = 0 at the end then there is a prop link +from P1's second load to its first (backwards!). The reason is +similar to the previous examples: The value P1 loads from buf gets +overwritten by P0's store to buf, the fence guarantees that the store +to buf will propagate to P1 before the store to flag does, and the +store to flag propagates to P1 before P1 reads flag. + +The prop link says that in order to obtain the r1 = 1, r2 = 0 result, +P1 must execute its second load before the first. Indeed, if the load +from flag were executed first, then the buf = 1 store would already +have propagated to P1 by the time P1's load from buf executed, so r2 +would have been 1 at the end, not 0. (The reasoning holds even for +Alpha, although the details are more complicated and we will not go +into them.) + +But what if we put an smp_rmb() fence between P1's loads? The fence +would force the two loads to be executed in program order, and it +would generate a cycle in the hb relation: The fence would create a ppo +link (hence an hb link) from the first load to the second, and the +prop relation would give an hb link from the second load to the first. +Since an instruction can't execute before itself, we are forced to +conclude that if an smp_rmb() fence is added, the r1 = 1, r2 = 0 +outcome is impossible -- as it should be. + +The formal definition of the prop relation involves a coe or fre link, +followed by an arbitrary number of cumul-fence links, ending with an +rfe link. You can concoct more exotic examples, containing more than +one fence, although this quickly leads to diminishing returns in terms +of complexity. For instance, here's an example containing a coe link +followed by two cumul-fences and an rfe link, utilizing the fact that +release fences are A-cumulative: + + int x, y, z; + + P0() + { + int r0; + + WRITE_ONCE(x, 1); + r0 = READ_ONCE(z); + } + + P1() + { + WRITE_ONCE(x, 2); + smp_wmb(); + WRITE_ONCE(y, 1); + } + + P2() + { + int r2; + + r2 = READ_ONCE(y); + smp_store_release(&z, 1); + } + +If x = 2, r0 = 1, and r2 = 1 after this code runs then there is a prop +link from P0's store to its load. This is because P0's store gets +overwritten by P1's store since x = 2 at the end (a coe link), the +smp_wmb() ensures that P1's store to x propagates to P2 before the +store to y does (the first cumul-fence), the store to y propagates to P2 +before P2's load and store execute, P2's smp_store_release() +guarantees that the stores to x and y both propagate to P0 before the +store to z does (the second cumul-fence), and P0's load executes after the +store to z has propagated to P0 (an rfe link). + +In summary, the fact that the hb relation links memory access events +in the order they execute means that it must not have cycles. This +requirement is the content of the LKMM's "happens-before" axiom. + +The LKMM defines yet another relation connected to times of +instruction execution, but it is not included in hb. It relies on the +particular properties of strong fences, which we cover in the next +section. + + +THE PROPAGATES-BEFORE RELATION: pb +---------------------------------- + +The propagates-before (pb) relation capitalizes on the special +features of strong fences. It links two events E and F whenever some +store is coherence-later than E and propagates to every CPU and to RAM +before F executes. The formal definition requires that E be linked to +F via a coe or fre link, an arbitrary number of cumul-fences, an +optional rfe link, a strong fence, and an arbitrary number of hb +links. Let's see how this definition works out. + +Consider first the case where E is a store (implying that the sequence +of links begins with coe). Then there are events W, X, Y, and Z such +that: + + E ->coe W ->cumul-fence* X ->rfe? Y ->strong-fence Z ->hb* F, + +where the * suffix indicates an arbitrary number of links of the +specified type, and the ? suffix indicates the link is optional (Y may +be equal to X). Because of the cumul-fence links, we know that W will +propagate to Y's CPU before X does, hence before Y executes and hence +before the strong fence executes. Because this fence is strong, we +know that W will propagate to every CPU and to RAM before Z executes. +And because of the hb links, we know that Z will execute before F. +Thus W, which comes later than E in the coherence order, will +propagate to every CPU and to RAM before F executes. + +The case where E is a load is exactly the same, except that the first +link in the sequence is fre instead of coe. + +The existence of a pb link from E to F implies that E must execute +before F. To see why, suppose that F executed first. Then W would +have propagated to E's CPU before E executed. If E was a store, the +memory subsystem would then be forced to make E come after W in the +coherence order, contradicting the fact that E ->coe W. If E was a +load, the memory subsystem would then be forced to satisfy E's read +request with the value stored by W or an even later store, +contradicting the fact that E ->fre W. + +A good example illustrating how pb works is the SB pattern with strong +fences: + + int x = 0, y = 0; + + P0() + { + int r0; + + WRITE_ONCE(x, 1); + smp_mb(); + r0 = READ_ONCE(y); + } + + P1() + { + int r1; + + WRITE_ONCE(y, 1); + smp_mb(); + r1 = READ_ONCE(x); + } + +If r0 = 0 at the end then there is a pb link from P0's load to P1's +load: an fre link from P0's load to P1's store (which overwrites the +value read by P0), and a strong fence between P1's store and its load. +In this example, the sequences of cumul-fence and hb links are empty. +Note that this pb link is not included in hb as an instance of prop, +because it does not start and end on the same CPU. + +Similarly, if r1 = 0 at the end then there is a pb link from P1's load +to P0's. This means that if both r1 and r2 were 0 there would be a +cycle in pb, which is not possible since an instruction cannot execute +before itself. Thus, adding smp_mb() fences to the SB pattern +prevents the r0 = 0, r1 = 0 outcome. + +In summary, the fact that the pb relation links events in the order +they execute means that it cannot have cycles. This requirement is +the content of the LKMM's "propagation" axiom. + + +RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-order, rcu-fence, and rb +------------------------------------------------------------------------ + +RCU (Read-Copy-Update) is a powerful synchronization mechanism. It +rests on two concepts: grace periods and read-side critical sections. + +A grace period is the span of time occupied by a call to +synchronize_rcu(). A read-side critical section (or just critical +section, for short) is a region of code delimited by rcu_read_lock() +at the start and rcu_read_unlock() at the end. Critical sections can +be nested, although we won't make use of this fact. + +As far as memory models are concerned, RCU's main feature is its +Grace-Period Guarantee, which states that a critical section can never +span a full grace period. In more detail, the Guarantee says: + + For any critical section C and any grace period G, at least + one of the following statements must hold: + +(1) C ends before G does, and in addition, every store that + propagates to C's CPU before the end of C must propagate to + every CPU before G ends. + +(2) G starts before C does, and in addition, every store that + propagates to G's CPU before the start of G must propagate + to every CPU before C starts. + +In particular, it is not possible for a critical section to both start +before and end after a grace period. + +Here is a simple example of RCU in action: + + int x, y; + + P0() + { + rcu_read_lock(); + WRITE_ONCE(x, 1); + WRITE_ONCE(y, 1); + rcu_read_unlock(); + } + + P1() + { + int r1, r2; + + r1 = READ_ONCE(x); + synchronize_rcu(); + r2 = READ_ONCE(y); + } + +The Grace Period Guarantee tells us that when this code runs, it will +never end with r1 = 1 and r2 = 0. The reasoning is as follows. r1 = 1 +means that P0's store to x propagated to P1 before P1 called +synchronize_rcu(), so P0's critical section must have started before +P1's grace period, contrary to part (2) of the Guarantee. On the +other hand, r2 = 0 means that P0's store to y, which occurs before the +end of the critical section, did not propagate to P1 before the end of +the grace period, contrary to part (1). Together the results violate +the Guarantee. + +In the kernel's implementations of RCU, the requirements for stores +to propagate to every CPU are fulfilled by placing strong fences at +suitable places in the RCU-related code. Thus, if a critical section +starts before a grace period does then the critical section's CPU will +execute an smp_mb() fence after the end of the critical section and +some time before the grace period's synchronize_rcu() call returns. +And if a critical section ends after a grace period does then the +synchronize_rcu() routine will execute an smp_mb() fence at its start +and some time before the critical section's opening rcu_read_lock() +executes. + +What exactly do we mean by saying that a critical section "starts +before" or "ends after" a grace period? Some aspects of the meaning +are pretty obvious, as in the example above, but the details aren't +entirely clear. The LKMM formalizes this notion by means of the +rcu-link relation. rcu-link encompasses a very general notion of +"before": If E and F are RCU fence events (i.e., rcu_read_lock(), +rcu_read_unlock(), or synchronize_rcu()) then among other things, +E ->rcu-link F includes cases where E is po-before some memory-access +event X, F is po-after some memory-access event Y, and we have any of +X ->rfe Y, X ->co Y, or X ->fr Y. + +The formal definition of the rcu-link relation is more than a little +obscure, and we won't give it here. It is closely related to the pb +relation, and the details don't matter unless you want to comb through +a somewhat lengthy formal proof. Pretty much all you need to know +about rcu-link is the information in the preceding paragraph. + +The LKMM also defines the rcu-gp and rcu-rscsi relations. They bring +grace periods and read-side critical sections into the picture, in the +following way: + + E ->rcu-gp F means that E and F are in fact the same event, + and that event is a synchronize_rcu() fence (i.e., a grace + period). + + E ->rcu-rscsi F means that E and F are the rcu_read_unlock() + and rcu_read_lock() fence events delimiting some read-side + critical section. (The 'i' at the end of the name emphasizes + that this relation is "inverted": It links the end of the + critical section to the start.) + +If we think of the rcu-link relation as standing for an extended +"before", then X ->rcu-gp Y ->rcu-link Z roughly says that X is a +grace period which ends before Z begins. (In fact it covers more than +this, because it also includes cases where some store propagates to +Z's CPU before Z begins but doesn't propagate to some other CPU until +after X ends.) Similarly, X ->rcu-rscsi Y ->rcu-link Z says that X is +the end of a critical section which starts before Z begins. + +The LKMM goes on to define the rcu-order relation as a sequence of +rcu-gp and rcu-rscsi links separated by rcu-link links, in which the +number of rcu-gp links is >= the number of rcu-rscsi links. For +example: + + X ->rcu-gp Y ->rcu-link Z ->rcu-rscsi T ->rcu-link U ->rcu-gp V + +would imply that X ->rcu-order V, because this sequence contains two +rcu-gp links and one rcu-rscsi link. (It also implies that +X ->rcu-order T and Z ->rcu-order V.) On the other hand: + + X ->rcu-rscsi Y ->rcu-link Z ->rcu-rscsi T ->rcu-link U ->rcu-gp V + +does not imply X ->rcu-order V, because the sequence contains only +one rcu-gp link but two rcu-rscsi links. + +The rcu-order relation is important because the Grace Period Guarantee +means that rcu-order links act kind of like strong fences. In +particular, E ->rcu-order F implies not only that E begins before F +ends, but also that any write po-before E will propagate to every CPU +before any instruction po-after F can execute. (However, it does not +imply that E must execute before F; in fact, each synchronize_rcu() +fence event is linked to itself by rcu-order as a degenerate case.) + +To prove this in full generality requires some intellectual effort. +We'll consider just a very simple case: + + G ->rcu-gp W ->rcu-link Z ->rcu-rscsi F. + +This formula means that G and W are the same event (a grace period), +and there are events X, Y and a read-side critical section C such that: + + 1. G = W is po-before or equal to X; + + 2. X comes "before" Y in some sense (including rfe, co and fr); + + 3. Y is po-before Z; + + 4. Z is the rcu_read_unlock() event marking the end of C; + + 5. F is the rcu_read_lock() event marking the start of C. + +From 1 - 4 we deduce that the grace period G ends before the critical +section C. Then part (2) of the Grace Period Guarantee says not only +that G starts before C does, but also that any write which executes on +G's CPU before G starts must propagate to every CPU before C starts. +In particular, the write propagates to every CPU before F finishes +executing and hence before any instruction po-after F can execute. +This sort of reasoning can be extended to handle all the situations +covered by rcu-order. + +The rcu-fence relation is a simple extension of rcu-order. While +rcu-order only links certain fence events (calls to synchronize_rcu(), +rcu_read_lock(), or rcu_read_unlock()), rcu-fence links any events +that are separated by an rcu-order link. This is analogous to the way +the strong-fence relation links events that are separated by an +smp_mb() fence event (as mentioned above, rcu-order links act kind of +like strong fences). Written symbolically, X ->rcu-fence Y means +there are fence events E and F such that: + + X ->po E ->rcu-order F ->po Y. + +From the discussion above, we see this implies not only that X +executes before Y, but also (if X is a store) that X propagates to +every CPU before Y executes. Thus rcu-fence is sort of a +"super-strong" fence: Unlike the original strong fences (smp_mb() and +synchronize_rcu()), rcu-fence is able to link events on different +CPUs. (Perhaps this fact should lead us to say that rcu-fence isn't +really a fence at all!) + +Finally, the LKMM defines the RCU-before (rb) relation in terms of +rcu-fence. This is done in essentially the same way as the pb +relation was defined in terms of strong-fence. We will omit the +details; the end result is that E ->rb F implies E must execute +before F, just as E ->pb F does (and for much the same reasons). + +Putting this all together, the LKMM expresses the Grace Period +Guarantee by requiring that the rb relation does not contain a cycle. +Equivalently, this "rcu" axiom requires that there are no events E +and F with E ->rcu-link F ->rcu-order E. Or to put it a third way, +the axiom requires that there are no cycles consisting of rcu-gp and +rcu-rscsi alternating with rcu-link, where the number of rcu-gp links +is >= the number of rcu-rscsi links. + +Justifying the axiom isn't easy, but it is in fact a valid +formalization of the Grace Period Guarantee. We won't attempt to go +through the detailed argument, but the following analysis gives a +taste of what is involved. Suppose both parts of the Guarantee are +violated: A critical section starts before a grace period, and some +store propagates to the critical section's CPU before the end of the +critical section but doesn't propagate to some other CPU until after +the end of the grace period. + +Putting symbols to these ideas, let L and U be the rcu_read_lock() and +rcu_read_unlock() fence events delimiting the critical section in +question, and let S be the synchronize_rcu() fence event for the grace +period. Saying that the critical section starts before S means there +are events Q and R where Q is po-after L (which marks the start of the +critical section), Q is "before" R in the sense used by the rcu-link +relation, and R is po-before the grace period S. Thus we have: + + L ->rcu-link S. + +Let W be the store mentioned above, let Y come before the end of the +critical section and witness that W propagates to the critical +section's CPU by reading from W, and let Z on some arbitrary CPU be a +witness that W has not propagated to that CPU, where Z happens after +some event X which is po-after S. Symbolically, this amounts to: + + S ->po X ->hb* Z ->fr W ->rf Y ->po U. + +The fr link from Z to W indicates that W has not propagated to Z's CPU +at the time that Z executes. From this, it can be shown (see the +discussion of the rcu-link relation earlier) that S and U are related +by rcu-link: + + S ->rcu-link U. + +Since S is a grace period we have S ->rcu-gp S, and since L and U are +the start and end of the critical section C we have U ->rcu-rscsi L. +From this we obtain: + + S ->rcu-gp S ->rcu-link U ->rcu-rscsi L ->rcu-link S, + +a forbidden cycle. Thus the "rcu" axiom rules out this violation of +the Grace Period Guarantee. + +For something a little more down-to-earth, let's see how the axiom +works out in practice. Consider the RCU code example from above, this +time with statement labels added: + + int x, y; + + P0() + { + L: rcu_read_lock(); + X: WRITE_ONCE(x, 1); + Y: WRITE_ONCE(y, 1); + U: rcu_read_unlock(); + } + + P1() + { + int r1, r2; + + Z: r1 = READ_ONCE(x); + S: synchronize_rcu(); + W: r2 = READ_ONCE(y); + } + + +If r2 = 0 at the end then P0's store at Y overwrites the value that +P1's load at W reads from, so we have W ->fre Y. Since S ->po W and +also Y ->po U, we get S ->rcu-link U. In addition, S ->rcu-gp S +because S is a grace period. + +If r1 = 1 at the end then P1's load at Z reads from P0's store at X, +so we have X ->rfe Z. Together with L ->po X and Z ->po S, this +yields L ->rcu-link S. And since L and U are the start and end of a +critical section, we have U ->rcu-rscsi L. + +Then U ->rcu-rscsi L ->rcu-link S ->rcu-gp S ->rcu-link U is a +forbidden cycle, violating the "rcu" axiom. Hence the outcome is not +allowed by the LKMM, as we would expect. + +For contrast, let's see what can happen in a more complicated example: + + int x, y, z; + + P0() + { + int r0; + + L0: rcu_read_lock(); + r0 = READ_ONCE(x); + WRITE_ONCE(y, 1); + U0: rcu_read_unlock(); + } + + P1() + { + int r1; + + r1 = READ_ONCE(y); + S1: synchronize_rcu(); + WRITE_ONCE(z, 1); + } + + P2() + { + int r2; + + L2: rcu_read_lock(); + r2 = READ_ONCE(z); + WRITE_ONCE(x, 1); + U2: rcu_read_unlock(); + } + +If r0 = r1 = r2 = 1 at the end, then similar reasoning to before shows +that U0 ->rcu-rscsi L0 ->rcu-link S1 ->rcu-gp S1 ->rcu-link U2 ->rcu-rscsi +L2 ->rcu-link U0. However this cycle is not forbidden, because the +sequence of relations contains fewer instances of rcu-gp (one) than of +rcu-rscsi (two). Consequently the outcome is allowed by the LKMM. +The following instruction timing diagram shows how it might actually +occur: + +P0 P1 P2 +-------------------- -------------------- -------------------- +rcu_read_lock() +WRITE_ONCE(y, 1) + r1 = READ_ONCE(y) + synchronize_rcu() starts + . rcu_read_lock() + . WRITE_ONCE(x, 1) +r0 = READ_ONCE(x) . +rcu_read_unlock() . + synchronize_rcu() ends + WRITE_ONCE(z, 1) + r2 = READ_ONCE(z) + rcu_read_unlock() + +This requires P0 and P2 to execute their loads and stores out of +program order, but of course they are allowed to do so. And as you +can see, the Grace Period Guarantee is not violated: The critical +section in P0 both starts before P1's grace period does and ends +before it does, and the critical section in P2 both starts after P1's +grace period does and ends after it does. + +Addendum: The LKMM now supports SRCU (Sleepable Read-Copy-Update) in +addition to normal RCU. The ideas involved are much the same as +above, with new relations srcu-gp and srcu-rscsi added to represent +SRCU grace periods and read-side critical sections. There is a +restriction on the srcu-gp and srcu-rscsi links that can appear in an +rcu-order sequence (the srcu-rscsi links must be paired with srcu-gp +links having the same SRCU domain with proper nesting); the details +are relatively unimportant. + + +LOCKING +------- + +The LKMM includes locking. In fact, there is special code for locking +in the formal model, added in order to make tools run faster. +However, this special code is intended to be more or less equivalent +to concepts we have already covered. A spinlock_t variable is treated +the same as an int, and spin_lock(&s) is treated almost the same as: + + while (cmpxchg_acquire(&s, 0, 1) != 0) + cpu_relax(); + +This waits until s is equal to 0 and then atomically sets it to 1, +and the read part of the cmpxchg operation acts as an acquire fence. +An alternate way to express the same thing would be: + + r = xchg_acquire(&s, 1); + +along with a requirement that at the end, r = 0. Similarly, +spin_trylock(&s) is treated almost the same as: + + return !cmpxchg_acquire(&s, 0, 1); + +which atomically sets s to 1 if it is currently equal to 0 and returns +true if it succeeds (the read part of the cmpxchg operation acts as an +acquire fence only if the operation is successful). spin_unlock(&s) +is treated almost the same as: + + smp_store_release(&s, 0); + +The "almost" qualifiers above need some explanation. In the LKMM, the +store-release in a spin_unlock() and the load-acquire which forms the +first half of the atomic rmw update in a spin_lock() or a successful +spin_trylock() -- we can call these things lock-releases and +lock-acquires -- have two properties beyond those of ordinary releases +and acquires. + +First, when a lock-acquire reads from a lock-release, the LKMM +requires that every instruction po-before the lock-release must +execute before any instruction po-after the lock-acquire. This would +naturally hold if the release and acquire operations were on different +CPUs, but the LKMM says it holds even when they are on the same CPU. +For example: + + int x, y; + spinlock_t s; + + P0() + { + int r1, r2; + + spin_lock(&s); + r1 = READ_ONCE(x); + spin_unlock(&s); + spin_lock(&s); + r2 = READ_ONCE(y); + spin_unlock(&s); + } + + P1() + { + WRITE_ONCE(y, 1); + smp_wmb(); + WRITE_ONCE(x, 1); + } + +Here the second spin_lock() reads from the first spin_unlock(), and +therefore the load of x must execute before the load of y. Thus we +cannot have r1 = 1 and r2 = 0 at the end (this is an instance of the +MP pattern). + +This requirement does not apply to ordinary release and acquire +fences, only to lock-related operations. For instance, suppose P0() +in the example had been written as: + + P0() + { + int r1, r2, r3; + + r1 = READ_ONCE(x); + smp_store_release(&s, 1); + r3 = smp_load_acquire(&s); + r2 = READ_ONCE(y); + } + +Then the CPU would be allowed to forward the s = 1 value from the +smp_store_release() to the smp_load_acquire(), executing the +instructions in the following order: + + r3 = smp_load_acquire(&s); // Obtains r3 = 1 + r2 = READ_ONCE(y); + r1 = READ_ONCE(x); + smp_store_release(&s, 1); // Value is forwarded + +and thus it could load y before x, obtaining r2 = 0 and r1 = 1. + +Second, when a lock-acquire reads from a lock-release, and some other +stores W and W' occur po-before the lock-release and po-after the +lock-acquire respectively, the LKMM requires that W must propagate to +each CPU before W' does. For example, consider: + + int x, y; + spinlock_t x; + + P0() + { + spin_lock(&s); + WRITE_ONCE(x, 1); + spin_unlock(&s); + } + + P1() + { + int r1; + + spin_lock(&s); + r1 = READ_ONCE(x); + WRITE_ONCE(y, 1); + spin_unlock(&s); + } + + P2() + { + int r2, r3; + + r2 = READ_ONCE(y); + smp_rmb(); + r3 = READ_ONCE(x); + } + +If r1 = 1 at the end then the spin_lock() in P1 must have read from +the spin_unlock() in P0. Hence the store to x must propagate to P2 +before the store to y does, so we cannot have r2 = 1 and r3 = 0. + +These two special requirements for lock-release and lock-acquire do +not arise from the operational model. Nevertheless, kernel developers +have come to expect and rely on them because they do hold on all +architectures supported by the Linux kernel, albeit for various +differing reasons. + + +PLAIN ACCESSES AND DATA RACES +----------------------------- + +In the LKMM, memory accesses such as READ_ONCE(x), atomic_inc(&y), +smp_load_acquire(&z), and so on are collectively referred to as +"marked" accesses, because they are all annotated with special +operations of one kind or another. Ordinary C-language memory +accesses such as x or y = 0 are simply called "plain" accesses. + +Early versions of the LKMM had nothing to say about plain accesses. +The C standard allows compilers to assume that the variables affected +by plain accesses are not concurrently read or written by any other +threads or CPUs. This leaves compilers free to implement all manner +of transformations or optimizations of code containing plain accesses, +making such code very difficult for a memory model to handle. + +Here is just one example of a possible pitfall: + + int a = 6; + int *x = &a; + + P0() + { + int *r1; + int r2 = 0; + + r1 = x; + if (r1 != NULL) + r2 = READ_ONCE(*r1); + } + + P1() + { + WRITE_ONCE(x, NULL); + } + +On the face of it, one would expect that when this code runs, the only +possible final values for r2 are 6 and 0, depending on whether or not +P1's store to x propagates to P0 before P0's load from x executes. +But since P0's load from x is a plain access, the compiler may decide +to carry out the load twice (for the comparison against NULL, then again +for the READ_ONCE()) and eliminate the temporary variable r1. The +object code generated for P0 could therefore end up looking rather +like this: + + P0() + { + int r2 = 0; + + if (x != NULL) + r2 = READ_ONCE(*x); + } + +And now it is obvious that this code runs the risk of dereferencing a +NULL pointer, because P1's store to x might propagate to P0 after the +test against NULL has been made but before the READ_ONCE() executes. +If the original code had said "r1 = READ_ONCE(x)" instead of "r1 = x", +the compiler would not have performed this optimization and there +would be no possibility of a NULL-pointer dereference. + +Given the possibility of transformations like this one, the LKMM +doesn't try to predict all possible outcomes of code containing plain +accesses. It is instead content to determine whether the code +violates the compiler's assumptions, which would render the ultimate +outcome undefined. + +In technical terms, the compiler is allowed to assume that when the +program executes, there will not be any data races. A "data race" +occurs when there are two memory accesses such that: + +1. they access the same location, + +2. at least one of them is a store, + +3. at least one of them is plain, + +4. they occur on different CPUs (or in different threads on the + same CPU), and + +5. they execute concurrently. + +In the literature, two accesses are said to "conflict" if they satisfy +1 and 2 above. We'll go a little farther and say that two accesses +are "race candidates" if they satisfy 1 - 4. Thus, whether or not two +race candidates actually do race in a given execution depends on +whether they are concurrent. + +The LKMM tries to determine whether a program contains race candidates +which may execute concurrently; if it does then the LKMM says there is +a potential data race and makes no predictions about the program's +outcome. + +Determining whether two accesses are race candidates is easy; you can +see that all the concepts involved in the definition above are already +part of the memory model. The hard part is telling whether they may +execute concurrently. The LKMM takes a conservative attitude, +assuming that accesses may be concurrent unless it can prove they +are not. + +If two memory accesses aren't concurrent then one must execute before +the other. Therefore the LKMM decides two accesses aren't concurrent +if they can be connected by a sequence of hb, pb, and rb links +(together referred to as xb, for "executes before"). However, there +are two complicating factors. + +If X is a load and X executes before a store Y, then indeed there is +no danger of X and Y being concurrent. After all, Y can't have any +effect on the value obtained by X until the memory subsystem has +propagated Y from its own CPU to X's CPU, which won't happen until +some time after Y executes and thus after X executes. But if X is a +store, then even if X executes before Y it is still possible that X +will propagate to Y's CPU just as Y is executing. In such a case X +could very well interfere somehow with Y, and we would have to +consider X and Y to be concurrent. + +Therefore when X is a store, for X and Y to be non-concurrent the LKMM +requires not only that X must execute before Y but also that X must +propagate to Y's CPU before Y executes. (Or vice versa, of course, if +Y executes before X -- then Y must propagate to X's CPU before X +executes if Y is a store.) This is expressed by the visibility +relation (vis), where X ->vis Y is defined to hold if there is an +intermediate event Z such that: + + X is connected to Z by a possibly empty sequence of + cumul-fence links followed by an optional rfe link (if none of + these links are present, X and Z are the same event), + +and either: + + Z is connected to Y by a strong-fence link followed by a + possibly empty sequence of xb links, + +or: + + Z is on the same CPU as Y and is connected to Y by a possibly + empty sequence of xb links (again, if the sequence is empty it + means Z and Y are the same event). + +The motivations behind this definition are straightforward: + + cumul-fence memory barriers force stores that are po-before + the barrier to propagate to other CPUs before stores that are + po-after the barrier. + + An rfe link from an event W to an event R says that R reads + from W, which certainly means that W must have propagated to + R's CPU before R executed. + + strong-fence memory barriers force stores that are po-before + the barrier, or that propagate to the barrier's CPU before the + barrier executes, to propagate to all CPUs before any events + po-after the barrier can execute. + +To see how this works out in practice, consider our old friend, the MP +pattern (with fences and statement labels, but without the conditional +test): + + int buf = 0, flag = 0; + + P0() + { + X: WRITE_ONCE(buf, 1); + smp_wmb(); + W: WRITE_ONCE(flag, 1); + } + + P1() + { + int r1; + int r2 = 0; + + Z: r1 = READ_ONCE(flag); + smp_rmb(); + Y: r2 = READ_ONCE(buf); + } + +The smp_wmb() memory barrier gives a cumul-fence link from X to W, and +assuming r1 = 1 at the end, there is an rfe link from W to Z. This +means that the store to buf must propagate from P0 to P1 before Z +executes. Next, Z and Y are on the same CPU and the smp_rmb() fence +provides an xb link from Z to Y (i.e., it forces Z to execute before +Y). Therefore we have X ->vis Y: X must propagate to Y's CPU before Y +executes. + +The second complicating factor mentioned above arises from the fact +that when we are considering data races, some of the memory accesses +are plain. Now, although we have not said so explicitly, up to this +point most of the relations defined by the LKMM (ppo, hb, prop, +cumul-fence, pb, and so on -- including vis) apply only to marked +accesses. + +There are good reasons for this restriction. The compiler is not +allowed to apply fancy transformations to marked accesses, and +consequently each such access in the source code corresponds more or +less directly to a single machine instruction in the object code. But +plain accesses are a different story; the compiler may combine them, +split them up, duplicate them, eliminate them, invent new ones, and +who knows what else. Seeing a plain access in the source code tells +you almost nothing about what machine instructions will end up in the +object code. + +Fortunately, the compiler isn't completely free; it is subject to some +limitations. For one, it is not allowed to introduce a data race into +the object code if the source code does not already contain a data +race (if it could, memory models would be useless and no multithreaded +code would be safe!). For another, it cannot move a plain access past +a compiler barrier. + +A compiler barrier is a kind of fence, but as the name implies, it +only affects the compiler; it does not necessarily have any effect on +how instructions are executed by the CPU. In Linux kernel source +code, the barrier() function is a compiler barrier. It doesn't give +rise directly to any machine instructions in the object code; rather, +it affects how the compiler generates the rest of the object code. +Given source code like this: + + ... some memory accesses ... + barrier(); + ... some other memory accesses ... + +the barrier() function ensures that the machine instructions +corresponding to the first group of accesses will all end po-before +any machine instructions corresponding to the second group of accesses +-- even if some of the accesses are plain. (Of course, the CPU may +then execute some of those accesses out of program order, but we +already know how to deal with such issues.) Without the barrier() +there would be no such guarantee; the two groups of accesses could be +intermingled or even reversed in the object code. + +The LKMM doesn't say much about the barrier() function, but it does +require that all fences are also compiler barriers. In addition, it +requires that the ordering properties of memory barriers such as +smp_rmb() or smp_store_release() apply to plain accesses as well as to +marked accesses. + +This is the key to analyzing data races. Consider the MP pattern +again, now using plain accesses for buf: + + int buf = 0, flag = 0; + + P0() + { + U: buf = 1; + smp_wmb(); + X: WRITE_ONCE(flag, 1); + } + + P1() + { + int r1; + int r2 = 0; + + Y: r1 = READ_ONCE(flag); + if (r1) { + smp_rmb(); + V: r2 = buf; + } + } + +This program does not contain a data race. Although the U and V +accesses are race candidates, the LKMM can prove they are not +concurrent as follows: + + The smp_wmb() fence in P0 is both a compiler barrier and a + cumul-fence. It guarantees that no matter what hash of + machine instructions the compiler generates for the plain + access U, all those instructions will be po-before the fence. + Consequently U's store to buf, no matter how it is carried out + at the machine level, must propagate to P1 before X's store to + flag does. + + X and Y are both marked accesses. Hence an rfe link from X to + Y is a valid indicator that X propagated to P1 before Y + executed, i.e., X ->vis Y. (And if there is no rfe link then + r1 will be 0, so V will not be executed and ipso facto won't + race with U.) + + The smp_rmb() fence in P1 is a compiler barrier as well as a + fence. It guarantees that all the machine-level instructions + corresponding to the access V will be po-after the fence, and + therefore any loads among those instructions will execute + after the fence does and hence after Y does. + +Thus U's store to buf is forced to propagate to P1 before V's load +executes (assuming V does execute), ruling out the possibility of a +data race between them. + +This analysis illustrates how the LKMM deals with plain accesses in +general. Suppose R is a plain load and we want to show that R +executes before some marked access E. We can do this by finding a +marked access X such that R and X are ordered by a suitable fence and +X ->xb* E. If E was also a plain access, we would also look for a +marked access Y such that X ->xb* Y, and Y and E are ordered by a +fence. We describe this arrangement by saying that R is +"post-bounded" by X and E is "pre-bounded" by Y. + +In fact, we go one step further: Since R is a read, we say that R is +"r-post-bounded" by X. Similarly, E would be "r-pre-bounded" or +"w-pre-bounded" by Y, depending on whether E was a store or a load. +This distinction is needed because some fences affect only loads +(i.e., smp_rmb()) and some affect only stores (smp_wmb()); otherwise +the two types of bounds are the same. And as a degenerate case, we +say that a marked access pre-bounds and post-bounds itself (e.g., if R +above were a marked load then X could simply be taken to be R itself.) + +The need to distinguish between r- and w-bounding raises yet another +issue. When the source code contains a plain store, the compiler is +allowed to put plain loads of the same location into the object code. +For example, given the source code: + + x = 1; + +the compiler is theoretically allowed to generate object code that +looks like: + + if (x != 1) + x = 1; + +thereby adding a load (and possibly replacing the store entirely). +For this reason, whenever the LKMM requires a plain store to be +w-pre-bounded or w-post-bounded by a marked access, it also requires +the store to be r-pre-bounded or r-post-bounded, so as to handle cases +where the compiler adds a load. + +(This may be overly cautious. We don't know of any examples where a +compiler has augmented a store with a load in this fashion, and the +Linux kernel developers would probably fight pretty hard to change a +compiler if it ever did this. Still, better safe than sorry.) + +Incidentally, the other tranformation -- augmenting a plain load by +adding in a store to the same location -- is not allowed. This is +because the compiler cannot know whether any other CPUs might perform +a concurrent load from that location. Two concurrent loads don't +constitute a race (they can't interfere with each other), but a store +does race with a concurrent load. Thus adding a store might create a +data race where one was not already present in the source code, +something the compiler is forbidden to do. Augmenting a store with a +load, on the other hand, is acceptable because doing so won't create a +data race unless one already existed. + +The LKMM includes a second way to pre-bound plain accesses, in +addition to fences: an address dependency from a marked load. That +is, in the sequence: + + p = READ_ONCE(ptr); + r = *p; + +the LKMM says that the marked load of ptr pre-bounds the plain load of +*p; the marked load must execute before any of the machine +instructions corresponding to the plain load. This is a reasonable +stipulation, since after all, the CPU can't perform the load of *p +until it knows what value p will hold. Furthermore, without some +assumption like this one, some usages typical of RCU would count as +data races. For example: + + int a = 1, b; + int *ptr = &a; + + P0() + { + b = 2; + rcu_assign_pointer(ptr, &b); + } + + P1() + { + int *p; + int r; + + rcu_read_lock(); + p = rcu_dereference(ptr); + r = *p; + rcu_read_unlock(); + } + +(In this example the rcu_read_lock() and rcu_read_unlock() calls don't +really do anything, because there aren't any grace periods. They are +included merely for the sake of good form; typically P0 would call +synchronize_rcu() somewhere after the rcu_assign_pointer().) + +rcu_assign_pointer() performs a store-release, so the plain store to b +is definitely w-post-bounded before the store to ptr, and the two +stores will propagate to P1 in that order. However, rcu_dereference() +is only equivalent to READ_ONCE(). While it is a marked access, it is +not a fence or compiler barrier. Hence the only guarantee we have +that the load of ptr in P1 is r-pre-bounded before the load of *p +(thus avoiding a race) is the assumption about address dependencies. + +This is a situation where the compiler can undermine the memory model, +and a certain amount of care is required when programming constructs +like this one. In particular, comparisons between the pointer and +other known addresses can cause trouble. If you have something like: + + p = rcu_dereference(ptr); + if (p == &x) + r = *p; + +then the compiler just might generate object code resembling: + + p = rcu_dereference(ptr); + if (p == &x) + r = x; + +or even: + + rtemp = x; + p = rcu_dereference(ptr); + if (p == &x) + r = rtemp; + +which would invalidate the memory model's assumption, since the CPU +could now perform the load of x before the load of ptr (there might be +a control dependency but no address dependency at the machine level). + +Finally, it turns out there is a situation in which a plain write does +not need to be w-post-bounded: when it is separated from the other +race-candidate access by a fence. At first glance this may seem +impossible. After all, to be race candidates the two accesses must +be on different CPUs, and fences don't link events on different CPUs. +Well, normal fences don't -- but rcu-fence can! Here's an example: + + int x, y; + + P0() + { + WRITE_ONCE(x, 1); + synchronize_rcu(); + y = 3; + } + + P1() + { + rcu_read_lock(); + if (READ_ONCE(x) == 0) + y = 2; + rcu_read_unlock(); + } + +Do the plain stores to y race? Clearly not if P1 reads a non-zero +value for x, so let's assume the READ_ONCE(x) does obtain 0. This +means that the read-side critical section in P1 must finish executing +before the grace period in P0 does, because RCU's Grace-Period +Guarantee says that otherwise P0's store to x would have propagated to +P1 before the critical section started and so would have been visible +to the READ_ONCE(). (Another way of putting it is that the fre link +from the READ_ONCE() to the WRITE_ONCE() gives rise to an rcu-link +between those two events.) + +This means there is an rcu-fence link from P1's "y = 2" store to P0's +"y = 3" store, and consequently the first must propagate from P1 to P0 +before the second can execute. Therefore the two stores cannot be +concurrent and there is no race, even though P1's plain store to y +isn't w-post-bounded by any marked accesses. + +Putting all this material together yields the following picture. For +race-candidate stores W and W', where W ->co W', the LKMM says the +stores don't race if W can be linked to W' by a + + w-post-bounded ; vis ; w-pre-bounded + +sequence. If W is plain then they also have to be linked by an + + r-post-bounded ; xb* ; w-pre-bounded + +sequence, and if W' is plain then they also have to be linked by a + + w-post-bounded ; vis ; r-pre-bounded + +sequence. For race-candidate load R and store W, the LKMM says the +two accesses don't race if R can be linked to W by an + + r-post-bounded ; xb* ; w-pre-bounded + +sequence or if W can be linked to R by a + + w-post-bounded ; vis ; r-pre-bounded + +sequence. For the cases involving a vis link, the LKMM also accepts +sequences in which W is linked to W' or R by a + + strong-fence ; xb* ; {w and/or r}-pre-bounded + +sequence with no post-bounding, and in every case the LKMM also allows +the link simply to be a fence with no bounding at all. If no sequence +of the appropriate sort exists, the LKMM says that the accesses race. + +There is one more part of the LKMM related to plain accesses (although +not to data races) we should discuss. Recall that many relations such +as hb are limited to marked accesses only. As a result, the +happens-before, propagates-before, and rcu axioms (which state that +various relation must not contain a cycle) doesn't apply to plain +accesses. Nevertheless, we do want to rule out such cycles, because +they don't make sense even for plain accesses. + +To this end, the LKMM imposes three extra restrictions, together +called the "plain-coherence" axiom because of their resemblance to the +rules used by the operational model to ensure cache coherence (that +is, the rules governing the memory subsystem's choice of a store to +satisfy a load request and its determination of where a store will +fall in the coherence order): + + If R and W are race candidates and it is possible to link R to + W by one of the xb* sequences listed above, then W ->rfe R is + not allowed (i.e., a load cannot read from a store that it + executes before, even if one or both is plain). + + If W and R are race candidates and it is possible to link W to + R by one of the vis sequences listed above, then R ->fre W is + not allowed (i.e., if a store is visible to a load then the + load must read from that store or one coherence-after it). + + If W and W' are race candidates and it is possible to link W + to W' by one of the vis sequences listed above, then W' ->co W + is not allowed (i.e., if one store is visible to a second then + the second must come after the first in the coherence order). + +This is the extent to which the LKMM deals with plain accesses. +Perhaps it could say more (for example, plain accesses might +contribute to the ppo relation), but at the moment it seems that this +minimal, conservative approach is good enough. + + +ODDS AND ENDS +------------- + +This section covers material that didn't quite fit anywhere in the +earlier sections. + +The descriptions in this document don't always match the formal +version of the LKMM exactly. For example, the actual formal +definition of the prop relation makes the initial coe or fre part +optional, and it doesn't require the events linked by the relation to +be on the same CPU. These differences are very unimportant; indeed, +instances where the coe/fre part of prop is missing are of no interest +because all the other parts (fences and rfe) are already included in +hb anyway, and where the formal model adds prop into hb, it includes +an explicit requirement that the events being linked are on the same +CPU. + +Another minor difference has to do with events that are both memory +accesses and fences, such as those corresponding to smp_load_acquire() +calls. In the formal model, these events aren't actually both reads +and fences; rather, they are read events with an annotation marking +them as acquires. (Or write events annotated as releases, in the case +smp_store_release().) The final effect is the same. + +Although we didn't mention it above, the instruction execution +ordering provided by the smp_rmb() fence doesn't apply to read events +that are part of a non-value-returning atomic update. For instance, +given: + + atomic_inc(&x); + smp_rmb(); + r1 = READ_ONCE(y); + +it is not guaranteed that the load from y will execute after the +update to x. This is because the ARMv8 architecture allows +non-value-returning atomic operations effectively to be executed off +the CPU. Basically, the CPU tells the memory subsystem to increment +x, and then the increment is carried out by the memory hardware with +no further involvement from the CPU. Since the CPU doesn't ever read +the value of x, there is nothing for the smp_rmb() fence to act on. + +The LKMM defines a few extra synchronization operations in terms of +things we have already covered. In particular, rcu_dereference() is +treated as READ_ONCE() and rcu_assign_pointer() is treated as +smp_store_release() -- which is basically how the Linux kernel treats +them. + +Although we said that plain accesses are not linked by the ppo +relation, they do contribute to it indirectly. Namely, when there is +an address dependency from a marked load R to a plain store W, +followed by smp_wmb() and then a marked store W', the LKMM creates a +ppo link from R to W'. The reasoning behind this is perhaps a little +shaky, but essentially it says there is no way to generate object code +for this source code in which W' could execute before R. Just as with +pre-bounding by address dependencies, it is possible for the compiler +to undermine this relation if sufficient care is not taken. + +There are a few oddball fences which need special treatment: +smp_mb__before_atomic(), smp_mb__after_atomic(), and +smp_mb__after_spinlock(). The LKMM uses fence events with special +annotations for them; they act as strong fences just like smp_mb() +except for the sets of events that they order. Instead of ordering +all po-earlier events against all po-later events, as smp_mb() does, +they behave as follows: + + smp_mb__before_atomic() orders all po-earlier events against + po-later atomic updates and the events following them; + + smp_mb__after_atomic() orders po-earlier atomic updates and + the events preceding them against all po-later events; + + smp_mb_after_spinlock() orders po-earlier lock acquisition + events and the events preceding them against all po-later + events. + +Interestingly, RCU and locking each introduce the possibility of +deadlock. When faced with code sequences such as: + + spin_lock(&s); + spin_lock(&s); + spin_unlock(&s); + spin_unlock(&s); + +or: + + rcu_read_lock(); + synchronize_rcu(); + rcu_read_unlock(); + +what does the LKMM have to say? Answer: It says there are no allowed +executions at all, which makes sense. But this can also lead to +misleading results, because if a piece of code has multiple possible +executions, some of which deadlock, the model will report only on the +non-deadlocking executions. For example: + + int x, y; + + P0() + { + int r0; + + WRITE_ONCE(x, 1); + r0 = READ_ONCE(y); + } + + P1() + { + rcu_read_lock(); + if (READ_ONCE(x) > 0) { + WRITE_ONCE(y, 36); + synchronize_rcu(); + } + rcu_read_unlock(); + } + +Is it possible to end up with r0 = 36 at the end? The LKMM will tell +you it is not, but the model won't mention that this is because P1 +will self-deadlock in the executions where it stores 36 in y. diff --git a/tools/memory-model/Documentation/litmus-tests.txt b/tools/memory-model/Documentation/litmus-tests.txt new file mode 100644 index 000000000..2f840dcd1 --- /dev/null +++ b/tools/memory-model/Documentation/litmus-tests.txt @@ -0,0 +1,1074 @@ +Linux-Kernel Memory Model Litmus Tests +====================================== + +This file describes the LKMM litmus-test format by example, describes +some tricks and traps, and finally outlines LKMM's limitations. Earlier +versions of this material appeared in a number of LWN articles, including: + +https://lwn.net/Articles/720550/ + A formal kernel memory-ordering model (part 2) +https://lwn.net/Articles/608550/ + Axiomatic validation of memory barriers and atomic instructions +https://lwn.net/Articles/470681/ + Validating Memory Barriers and Atomic Instructions + +This document presents information in decreasing order of applicability, +so that, where possible, the information that has proven more commonly +useful is shown near the beginning. + +For information on installing LKMM, including the underlying "herd7" +tool, please see tools/memory-model/README. + + +Copy-Pasta +========== + +As with other software, it is often better (if less macho) to adapt an +existing litmus test than it is to create one from scratch. A number +of litmus tests may be found in the kernel source tree: + + tools/memory-model/litmus-tests/ + Documentation/litmus-tests/ + +Several thousand more example litmus tests are available on github +and kernel.org: + + https://github.com/paulmckrcu/litmus + https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/perfbook.git/tree/CodeSamples/formal/herd + https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/perfbook.git/tree/CodeSamples/formal/litmus + +The -l and -L arguments to "git grep" can be quite helpful in identifying +existing litmus tests that are similar to the one you need. But even if +you start with an existing litmus test, it is still helpful to have a +good understanding of the litmus-test format. + + +Examples and Format +=================== + +This section describes the overall format of litmus tests, starting +with a small example of the message-passing pattern and moving on to +more complex examples that illustrate explicit initialization and LKMM's +minimalistic set of flow-control statements. + + +Message-Passing Example +----------------------- + +This section gives an overview of the format of a litmus test using an +example based on the common message-passing use case. This use case +appears often in the Linux kernel. For example, a flag (modeled by "y" +below) indicates that a buffer (modeled by "x" below) is now completely +filled in and ready for use. It would be very bad if the consumer saw the +flag set, but, due to memory misordering, saw old values in the buffer. + +This example asks whether smp_store_release() and smp_load_acquire() +suffices to avoid this bad outcome: + + 1 C MP+pooncerelease+poacquireonce + 2 + 3 {} + 4 + 5 P0(int *x, int *y) + 6 { + 7 WRITE_ONCE(*x, 1); + 8 smp_store_release(y, 1); + 9 } +10 +11 P1(int *x, int *y) +12 { +13 int r0; +14 int r1; +15 +16 r0 = smp_load_acquire(y); +17 r1 = READ_ONCE(*x); +18 } +19 +20 exists (1:r0=1 /\ 1:r1=0) + +Line 1 starts with "C", which identifies this file as being in the +LKMM C-language format (which, as we will see, is a small fragment +of the full C language). The remainder of line 1 is the name of +the test, which by convention is the filename with the ".litmus" +suffix stripped. In this case, the actual test may be found in +tools/memory-model/litmus-tests/MP+pooncerelease+poacquireonce.litmus +in the Linux-kernel source tree. + +Mechanically generated litmus tests will often have an optional +double-quoted comment string on the second line. Such strings are ignored +when running the test. Yes, you can add your own comments to litmus +tests, but this is a bit involved due to the use of multiple parsers. +For now, you can use C-language comments in the C code, and these comments +may be in either the "/* */" or the "//" style. A later section will +cover the full litmus-test commenting story. + +Line 3 is the initialization section. Because the default initialization +to zero suffices for this test, the "{}" syntax is used, which mean the +initialization section is empty. Litmus tests requiring non-default +initialization must have non-empty initialization sections, as in the +example that will be presented later in this document. + +Lines 5-9 show the first process and lines 11-18 the second process. Each +process corresponds to a Linux-kernel task (or kthread, workqueue, thread, +and so on; LKMM discussions often use these terms interchangeably). +The name of the first process is "P0" and that of the second "P1". +You can name your processes anything you like as long as the names consist +of a single "P" followed by a number, and as long as the numbers are +consecutive starting with zero. This can actually be quite helpful, +for example, a .litmus file matching "^P1(" but not matching "^P2(" +must contain a two-process litmus test. + +The argument list for each function are pointers to the global variables +used by that function. Unlike normal C-language function parameters, the +names are significant. The fact that both P0() and P1() have a formal +parameter named "x" means that these two processes are working with the +same global variable, also named "x". So the "int *x, int *y" on P0() +and P1() mean that both processes are working with two shared global +variables, "x" and "y". Global variables are always passed to processes +by reference, hence "P0(int *x, int *y)", but *never* "P0(int x, int y)". + +P0() has no local variables, but P1() has two of them named "r0" and "r1". +These names may be freely chosen, but for historical reasons stemming from +other litmus-test formats, it is conventional to use names consisting of +"r" followed by a number as shown here. A common bug in litmus tests +is forgetting to add a global variable to a process's parameter list. +This will sometimes result in an error message, but can also cause the +intended global to instead be silently treated as an undeclared local +variable. + +Each process's code is similar to Linux-kernel C, as can be seen on lines +7-8 and 13-17. This code may use many of the Linux kernel's atomic +operations, some of its exclusive-lock functions, and some of its RCU +and SRCU functions. An approximate list of the currently supported +functions may be found in the linux-kernel.def file. + +The P0() process does "WRITE_ONCE(*x, 1)" on line 7. Because "x" is a +pointer in P0()'s parameter list, this does an unordered store to global +variable "x". Line 8 does "smp_store_release(y, 1)", and because "y" +is also in P0()'s parameter list, this does a release store to global +variable "y". + +The P1() process declares two local variables on lines 13 and 14. +Line 16 does "r0 = smp_load_acquire(y)" which does an acquire load +from global variable "y" into local variable "r0". Line 17 does a +"r1 = READ_ONCE(*x)", which does an unordered load from "*x" into local +variable "r1". Both "x" and "y" are in P1()'s parameter list, so both +reference the same global variables that are used by P0(). + +Line 20 is the "exists" assertion expression to evaluate the final state. +This final state is evaluated after the dust has settled: both processes +have completed and all of their memory references and memory barriers +have propagated to all parts of the system. The references to the local +variables "r0" and "r1" in line 24 must be prefixed with "1:" to specify +which process they are local to. + +Note that the assertion expression is written in the litmus-test +language rather than in C. For example, single "=" is an equality +operator rather than an assignment. The "/\" character combination means +"and". Similarly, "\/" stands for "or". Both of these are ASCII-art +representations of the corresponding mathematical symbols. Finally, +"~" stands for "logical not", which is "!" in C, and not to be confused +with the C-language "~" operator which instead stands for "bitwise not". +Parentheses may be used to override precedence. + +The "exists" assertion on line 20 is satisfied if the consumer sees the +flag ("y") set but the buffer ("x") as not yet filled in, that is, if P1() +loaded a value from "x" that was equal to 1 but loaded a value from "y" +that was still equal to zero. + +This example can be checked by running the following command, which +absolutely must be run from the tools/memory-model directory and from +this directory only: + +herd7 -conf linux-kernel.cfg litmus-tests/MP+pooncerelease+poacquireonce.litmus + +The output is the result of something similar to a full state-space +search, and is as follows: + + 1 Test MP+pooncerelease+poacquireonce Allowed + 2 States 3 + 3 1:r0=0; 1:r1=0; + 4 1:r0=0; 1:r1=1; + 5 1:r0=1; 1:r1=1; + 6 No + 7 Witnesses + 8 Positive: 0 Negative: 3 + 9 Condition exists (1:r0=1 /\ 1:r1=0) +10 Observation MP+pooncerelease+poacquireonce Never 0 3 +11 Time MP+pooncerelease+poacquireonce 0.00 +12 Hash=579aaa14d8c35a39429b02e698241d09 + +The most pertinent line is line 10, which contains "Never 0 3", which +indicates that the bad result flagged by the "exists" clause never +happens. This line might instead say "Sometimes" to indicate that the +bad result happened in some but not all executions, or it might say +"Always" to indicate that the bad result happened in all executions. +(The herd7 tool doesn't judge, so it is only an LKMM convention that the +"exists" clause indicates a bad result. To see this, invert the "exists" +clause's condition and run the test.) The numbers ("0 3") at the end +of this line indicate the number of end states satisfying the "exists" +clause (0) and the number not not satisfying that clause (3). + +Another important part of this output is shown in lines 2-5, repeated here: + + 2 States 3 + 3 1:r0=0; 1:r1=0; + 4 1:r0=0; 1:r1=1; + 5 1:r0=1; 1:r1=1; + +Line 2 gives the total number of end states, and each of lines 3-5 list +one of these states, with the first ("1:r0=0; 1:r1=0;") indicating that +both of P1()'s loads returned the value "0". As expected, given the +"Never" on line 10, the state flagged by the "exists" clause is not +listed. This full list of states can be helpful when debugging a new +litmus test. + +The rest of the output is not normally needed, either due to irrelevance +or due to being redundant with the lines discussed above. However, the +following paragraph lists them for the benefit of readers possessed of +an insatiable curiosity. Other readers should feel free to skip ahead. + +Line 1 echos the test name, along with the "Test" and "Allowed". Line 6's +"No" says that the "exists" clause was not satisfied by any execution, +and as such it has the same meaning as line 10's "Never". Line 7 is a +lead-in to line 8's "Positive: 0 Negative: 3", which lists the number +of end states satisfying and not satisfying the "exists" clause, just +like the two numbers at the end of line 10. Line 9 repeats the "exists" +clause so that you don't have to look it up in the litmus-test file. +The number at the end of line 11 (which begins with "Time") gives the +time in seconds required to analyze the litmus test. Small tests such +as this one complete in a few milliseconds, so "0.00" is quite common. +Line 12 gives a hash of the contents for the litmus-test file, and is used +by tooling that manages litmus tests and their output. This tooling is +used by people modifying LKMM itself, and among other things lets such +people know which of the several thousand relevant litmus tests were +affected by a given change to LKMM. + + +Initialization +-------------- + +The previous example relied on the default zero initialization for +"x" and "y", but a similar litmus test could instead initialize them +to some other value: + + 1 C MP+pooncerelease+poacquireonce + 2 + 3 { + 4 x=42; + 5 y=42; + 6 } + 7 + 8 P0(int *x, int *y) + 9 { +10 WRITE_ONCE(*x, 1); +11 smp_store_release(y, 1); +12 } +13 +14 P1(int *x, int *y) +15 { +16 int r0; +17 int r1; +18 +19 r0 = smp_load_acquire(y); +20 r1 = READ_ONCE(*x); +21 } +22 +23 exists (1:r0=1 /\ 1:r1=42) + +Lines 3-6 now initialize both "x" and "y" to the value 42. This also +means that the "exists" clause on line 23 must change "1:r1=0" to +"1:r1=42". + +Running the test gives the same overall result as before, but with the +value 42 appearing in place of the value zero: + + 1 Test MP+pooncerelease+poacquireonce Allowed + 2 States 3 + 3 1:r0=1; 1:r1=1; + 4 1:r0=42; 1:r1=1; + 5 1:r0=42; 1:r1=42; + 6 No + 7 Witnesses + 8 Positive: 0 Negative: 3 + 9 Condition exists (1:r0=1 /\ 1:r1=42) +10 Observation MP+pooncerelease+poacquireonce Never 0 3 +11 Time MP+pooncerelease+poacquireonce 0.02 +12 Hash=ab9a9b7940a75a792266be279a980156 + +It is tempting to avoid the open-coded repetitions of the value "42" +by defining another global variable "initval=42" and replacing all +occurrences of "42" with "initval". This will not, repeat *not*, +initialize "x" and "y" to 42, but instead to the address of "initval" +(try it!). See the section below on linked lists to learn more about +why this approach to initialization can be useful. + + +Control Structures +------------------ + +LKMM supports the C-language "if" statement, which allows modeling of +conditional branches. In LKMM, conditional branches can affect ordering, +but only if you are *very* careful (compilers are surprisingly able +to optimize away conditional branches). The following example shows +the "load buffering" (LB) use case that is used in the Linux kernel to +synchronize between ring-buffer producers and consumers. In the example +below, P0() is one side checking to see if an operation may proceed and +P1() is the other side completing its update. + + 1 C LB+fencembonceonce+ctrlonceonce + 2 + 3 {} + 4 + 5 P0(int *x, int *y) + 6 { + 7 int r0; + 8 + 9 r0 = READ_ONCE(*x); +10 if (r0) +11 WRITE_ONCE(*y, 1); +12 } +13 +14 P1(int *x, int *y) +15 { +16 int r0; +17 +18 r0 = READ_ONCE(*y); +19 smp_mb(); +20 WRITE_ONCE(*x, 1); +21 } +22 +23 exists (0:r0=1 /\ 1:r0=1) + +P1()'s "if" statement on line 10 works as expected, so that line 11 is +executed only if line 9 loads a non-zero value from "x". Because P1()'s +write of "1" to "x" happens only after P1()'s read from "y", one would +hope that the "exists" clause cannot be satisfied. LKMM agrees: + + 1 Test LB+fencembonceonce+ctrlonceonce Allowed + 2 States 2 + 3 0:r0=0; 1:r0=0; + 4 0:r0=1; 1:r0=0; + 5 No + 6 Witnesses + 7 Positive: 0 Negative: 2 + 8 Condition exists (0:r0=1 /\ 1:r0=1) + 9 Observation LB+fencembonceonce+ctrlonceonce Never 0 2 +10 Time LB+fencembonceonce+ctrlonceonce 0.00 +11 Hash=e5260556f6de495fd39b556d1b831c3b + +However, there is no "while" statement due to the fact that full +state-space search has some difficulty with iteration. However, there +are tricks that may be used to handle some special cases, which are +discussed below. In addition, loop-unrolling tricks may be applied, +albeit sparingly. + + +Tricks and Traps +================ + +This section covers extracting debug output from herd7, emulating +spin loops, handling trivial linked lists, adding comments to litmus tests, +emulating call_rcu(), and finally tricks to improve herd7 performance +in order to better handle large litmus tests. + + +Debug Output +------------ + +By default, the herd7 state output includes all variables mentioned +in the "exists" clause. But sometimes debugging efforts are greatly +aided by the values of other variables. Consider this litmus test +(tools/memory-order/litmus-tests/SB+rfionceonce-poonceonces.litmus but +slightly modified), which probes an obscure corner of hardware memory +ordering: + + 1 C SB+rfionceonce-poonceonces + 2 + 3 {} + 4 + 5 P0(int *x, int *y) + 6 { + 7 int r1; + 8 int r2; + 9 +10 WRITE_ONCE(*x, 1); +11 r1 = READ_ONCE(*x); +12 r2 = READ_ONCE(*y); +13 } +14 +15 P1(int *x, int *y) +16 { +17 int r3; +18 int r4; +19 +20 WRITE_ONCE(*y, 1); +21 r3 = READ_ONCE(*y); +22 r4 = READ_ONCE(*x); +23 } +24 +25 exists (0:r2=0 /\ 1:r4=0) + +The herd7 output is as follows: + + 1 Test SB+rfionceonce-poonceonces Allowed + 2 States 4 + 3 0:r2=0; 1:r4=0; + 4 0:r2=0; 1:r4=1; + 5 0:r2=1; 1:r4=0; + 6 0:r2=1; 1:r4=1; + 7 Ok + 8 Witnesses + 9 Positive: 1 Negative: 3 +10 Condition exists (0:r2=0 /\ 1:r4=0) +11 Observation SB+rfionceonce-poonceonces Sometimes 1 3 +12 Time SB+rfionceonce-poonceonces 0.01 +13 Hash=c7f30fe0faebb7d565405d55b7318ada + +(This output indicates that CPUs are permitted to "snoop their own +store buffers", which all of Linux's CPU families other than s390 will +happily do. Such snooping results in disagreement among CPUs on the +order of stores from different CPUs, which is rarely an issue.) + +But the herd7 output shows only the two variables mentioned in the +"exists" clause. Someone modifying this test might wish to know the +values of "x", "y", "0:r1", and "0:r3" as well. The "locations" +statement on line 25 shows how to cause herd7 to display additional +variables: + + 1 C SB+rfionceonce-poonceonces + 2 + 3 {} + 4 + 5 P0(int *x, int *y) + 6 { + 7 int r1; + 8 int r2; + 9 +10 WRITE_ONCE(*x, 1); +11 r1 = READ_ONCE(*x); +12 r2 = READ_ONCE(*y); +13 } +14 +15 P1(int *x, int *y) +16 { +17 int r3; +18 int r4; +19 +20 WRITE_ONCE(*y, 1); +21 r3 = READ_ONCE(*y); +22 r4 = READ_ONCE(*x); +23 } +24 +25 locations [0:r1; 1:r3; x; y] +26 exists (0:r2=0 /\ 1:r4=0) + +The herd7 output then displays the values of all the variables: + + 1 Test SB+rfionceonce-poonceonces Allowed + 2 States 4 + 3 0:r1=1; 0:r2=0; 1:r3=1; 1:r4=0; x=1; y=1; + 4 0:r1=1; 0:r2=0; 1:r3=1; 1:r4=1; x=1; y=1; + 5 0:r1=1; 0:r2=1; 1:r3=1; 1:r4=0; x=1; y=1; + 6 0:r1=1; 0:r2=1; 1:r3=1; 1:r4=1; x=1; y=1; + 7 Ok + 8 Witnesses + 9 Positive: 1 Negative: 3 +10 Condition exists (0:r2=0 /\ 1:r4=0) +11 Observation SB+rfionceonce-poonceonces Sometimes 1 3 +12 Time SB+rfionceonce-poonceonces 0.01 +13 Hash=40de8418c4b395388f6501cafd1ed38d + +What if you would like to know the value of a particular global variable +at some particular point in a given process's execution? One approach +is to use a READ_ONCE() to load that global variable into a new local +variable, then add that local variable to the "locations" clause. +But be careful: In some litmus tests, adding a READ_ONCE() will change +the outcome! For one example, please see the C-READ_ONCE.litmus and +C-READ_ONCE-omitted.litmus tests located here: + + https://github.com/paulmckrcu/litmus/blob/master/manual/kernel/ + + +Spin Loops +---------- + +The analysis carried out by herd7 explores full state space, which is +at best of exponential time complexity. Adding processes and increasing +the amount of code in a give process can greatly increase execution time. +Potentially infinite loops, such as those used to wait for locks to +become available, are clearly problematic. + +Fortunately, it is possible to avoid state-space explosion by specially +modeling such loops. For example, the following litmus tests emulates +locking using xchg_acquire(), but instead of enclosing xchg_acquire() +in a spin loop, it instead excludes executions that fail to acquire the +lock using a herd7 "filter" clause. Note that for exclusive locking, you +are better off using the spin_lock() and spin_unlock() that LKMM directly +models, if for no other reason that these are much faster. However, the +techniques illustrated in this section can be used for other purposes, +such as emulating reader-writer locking, which LKMM does not yet model. + + 1 C C-SB+l-o-o-u+l-o-o-u-X + 2 + 3 { + 4 } + 5 + 6 P0(int *sl, int *x0, int *x1) + 7 { + 8 int r2; + 9 int r1; +10 +11 r2 = xchg_acquire(sl, 1); +12 WRITE_ONCE(*x0, 1); +13 r1 = READ_ONCE(*x1); +14 smp_store_release(sl, 0); +15 } +16 +17 P1(int *sl, int *x0, int *x1) +18 { +19 int r2; +20 int r1; +21 +22 r2 = xchg_acquire(sl, 1); +23 WRITE_ONCE(*x1, 1); +24 r1 = READ_ONCE(*x0); +25 smp_store_release(sl, 0); +26 } +27 +28 filter (0:r2=0 /\ 1:r2=0) +29 exists (0:r1=0 /\ 1:r1=0) + +This litmus test may be found here: + +https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/perfbook.git/tree/CodeSamples/formal/herd/C-SB+l-o-o-u+l-o-o-u-X.litmus + +This test uses two global variables, "x1" and "x2", and also emulates a +single global spinlock named "sl". This spinlock is held by whichever +process changes the value of "sl" from "0" to "1", and is released when +that process sets "sl" back to "0". P0()'s lock acquisition is emulated +on line 11 using xchg_acquire(), which unconditionally stores the value +"1" to "sl" and stores either "0" or "1" to "r2", depending on whether +the lock acquisition was successful or unsuccessful (due to "sl" already +having the value "1"), respectively. P1() operates in a similar manner. + +Rather unconventionally, execution appears to proceed to the critical +section on lines 12 and 13 in either case. Line 14 then uses an +smp_store_release() to store zero to "sl", thus emulating lock release. + +The case where xchg_acquire() fails to acquire the lock is handled by +the "filter" clause on line 28, which tells herd7 to keep only those +executions in which both "0:r2" and "1:r2" are zero, that is to pay +attention only to those executions in which both locks are actually +acquired. Thus, the bogus executions that would execute the critical +sections are discarded and any effects that they might have had are +ignored. Note well that the "filter" clause keeps those executions +for which its expression is satisfied, that is, for which the expression +evaluates to true. In other words, the "filter" clause says what to +keep, not what to discard. + +The result of running this test is as follows: + + 1 Test C-SB+l-o-o-u+l-o-o-u-X Allowed + 2 States 2 + 3 0:r1=0; 1:r1=1; + 4 0:r1=1; 1:r1=0; + 5 No + 6 Witnesses + 7 Positive: 0 Negative: 2 + 8 Condition exists (0:r1=0 /\ 1:r1=0) + 9 Observation C-SB+l-o-o-u+l-o-o-u-X Never 0 2 +10 Time C-SB+l-o-o-u+l-o-o-u-X 0.03 + +The "Never" on line 9 indicates that this use of xchg_acquire() and +smp_store_release() really does correctly emulate locking. + +Why doesn't the litmus test take the simpler approach of using a spin loop +to handle failed spinlock acquisitions, like the kernel does? The key +insight behind this litmus test is that spin loops have no effect on the +possible "exists"-clause outcomes of program execution in the absence +of deadlock. In other words, given a high-quality lock-acquisition +primitive in a deadlock-free program running on high-quality hardware, +each lock acquisition will eventually succeed. Because herd7 already +explores the full state space, the length of time required to actually +acquire the lock does not matter. After all, herd7 already models all +possible durations of the xchg_acquire() statements. + +Why not just add the "filter" clause to the "exists" clause, thus +avoiding the "filter" clause entirely? This does work, but is slower. +The reason that the "filter" clause is faster is that (in the common case) +herd7 knows to abandon an execution as soon as the "filter" expression +fails to be satisfied. In contrast, the "exists" clause is evaluated +only at the end of time, thus requiring herd7 to waste time on bogus +executions in which both critical sections proceed concurrently. In +addition, some LKMM users like the separation of concerns provided by +using the both the "filter" and "exists" clauses. + +Readers lacking a pathological interest in odd corner cases should feel +free to skip the remainder of this section. + +But what if the litmus test were to temporarily set "0:r2" to a non-zero +value? Wouldn't that cause herd7 to abandon the execution prematurely +due to an early mismatch of the "filter" clause? + +Why not just try it? Line 4 of the following modified litmus test +introduces a new global variable "x2" that is initialized to "1". Line 23 +of P1() reads that variable into "1:r2" to force an early mismatch with +the "filter" clause. Line 24 does a known-true "if" condition to avoid +and static analysis that herd7 might do. Finally the "exists" clause +on line 32 is updated to a condition that is alway satisfied at the end +of the test. + + 1 C C-SB+l-o-o-u+l-o-o-u-X + 2 + 3 { + 4 x2=1; + 5 } + 6 + 7 P0(int *sl, int *x0, int *x1) + 8 { + 9 int r2; +10 int r1; +11 +12 r2 = xchg_acquire(sl, 1); +13 WRITE_ONCE(*x0, 1); +14 r1 = READ_ONCE(*x1); +15 smp_store_release(sl, 0); +16 } +17 +18 P1(int *sl, int *x0, int *x1, int *x2) +19 { +20 int r2; +21 int r1; +22 +23 r2 = READ_ONCE(*x2); +24 if (r2) +25 r2 = xchg_acquire(sl, 1); +26 WRITE_ONCE(*x1, 1); +27 r1 = READ_ONCE(*x0); +28 smp_store_release(sl, 0); +29 } +30 +31 filter (0:r2=0 /\ 1:r2=0) +32 exists (x1=1) + +If the "filter" clause were to check each variable at each point in the +execution, running this litmus test would display no executions because +all executions would be filtered out at line 23. However, the output +is instead as follows: + + 1 Test C-SB+l-o-o-u+l-o-o-u-X Allowed + 2 States 1 + 3 x1=1; + 4 Ok + 5 Witnesses + 6 Positive: 2 Negative: 0 + 7 Condition exists (x1=1) + 8 Observation C-SB+l-o-o-u+l-o-o-u-X Always 2 0 + 9 Time C-SB+l-o-o-u+l-o-o-u-X 0.04 +10 Hash=080bc508da7f291e122c6de76c0088e3 + +Line 3 shows that there is one execution that did not get filtered out, +so the "filter" clause is evaluated only on the last assignment to +the variables that it checks. In this case, the "filter" clause is a +disjunction, so it might be evaluated twice, once at the final (and only) +assignment to "0:r2" and once at the final assignment to "1:r2". + + +Linked Lists +------------ + +LKMM can handle linked lists, but only linked lists in which each node +contains nothing except a pointer to the next node in the list. This is +of course quite restrictive, but there is nevertheless quite a bit that +can be done within these confines, as can be seen in the litmus test +at tools/memory-model/litmus-tests/MP+onceassign+derefonce.litmus: + + 1 C MP+onceassign+derefonce + 2 + 3 { + 4 y=z; + 5 z=0; + 6 } + 7 + 8 P0(int *x, int **y) + 9 { +10 WRITE_ONCE(*x, 1); +11 rcu_assign_pointer(*y, x); +12 } +13 +14 P1(int *x, int **y) +15 { +16 int *r0; +17 int r1; +18 +19 rcu_read_lock(); +20 r0 = rcu_dereference(*y); +21 r1 = READ_ONCE(*r0); +22 rcu_read_unlock(); +23 } +24 +25 exists (1:r0=x /\ 1:r1=0) + +Line 4's "y=z" may seem odd, given that "z" has not yet been initialized. +But "y=z" does not set the value of "y" to that of "z", but instead +sets the value of "y" to the *address* of "z". Lines 4 and 5 therefore +create a simple linked list, with "y" pointing to "z" and "z" having a +NULL pointer. A much longer linked list could be created if desired, +and circular singly linked lists can also be created and manipulated. + +The "exists" clause works the same way, with the "1:r0=x" comparing P1()'s +"r0" not to the value of "x", but again to its address. This term of the +"exists" clause therefore tests whether line 20's load from "y" saw the +value stored by line 11, which is in fact what is required in this case. + +P0()'s line 10 initializes "x" to the value 1 then line 11 links to "x" +from "y", replacing "z". + +P1()'s line 20 loads a pointer from "y", and line 21 dereferences that +pointer. The RCU read-side critical section spanning lines 19-22 is just +for show in this example. Note that the address used for line 21's load +depends on (in this case, "is exactly the same as") the value loaded by +line 20. This is an example of what is called an "address dependency". +This particular address dependency extends from the load on line 20 to the +load on line 21. Address dependencies provide a weak form of ordering. + +Running this test results in the following: + + 1 Test MP+onceassign+derefonce Allowed + 2 States 2 + 3 1:r0=x; 1:r1=1; + 4 1:r0=z; 1:r1=0; + 5 No + 6 Witnesses + 7 Positive: 0 Negative: 2 + 8 Condition exists (1:r0=x /\ 1:r1=0) + 9 Observation MP+onceassign+derefonce Never 0 2 +10 Time MP+onceassign+derefonce 0.00 +11 Hash=49ef7a741563570102448a256a0c8568 + +The only possible outcomes feature P1() loading a pointer to "z" +(which contains zero) on the one hand and P1() loading a pointer to "x" +(which contains the value one) on the other. This should be reassuring +because it says that RCU readers cannot see the old preinitialization +values when accessing a newly inserted list node. This undesirable +scenario is flagged by the "exists" clause, and would occur if P1() +loaded a pointer to "x", but obtained the pre-initialization value of +zero after dereferencing that pointer. + + +Comments +-------- + +Different portions of a litmus test are processed by different parsers, +which has the charming effect of requiring different comment syntax in +different portions of the litmus test. The C-syntax portions use +C-language comments (either "/* */" or "//"), while the other portions +use Ocaml comments "(* *)". + +The following litmus test illustrates the comment style corresponding +to each syntactic unit of the test: + + 1 C MP+onceassign+derefonce (* A *) + 2 + 3 (* B *) + 4 + 5 { + 6 y=z; (* C *) + 7 z=0; + 8 } // D + 9 +10 // E +11 +12 P0(int *x, int **y) // F +13 { +14 WRITE_ONCE(*x, 1); // G +15 rcu_assign_pointer(*y, x); +16 } +17 +18 // H +19 +20 P1(int *x, int **y) +21 { +22 int *r0; +23 int r1; +24 +25 rcu_read_lock(); +26 r0 = rcu_dereference(*y); +27 r1 = READ_ONCE(*r0); +28 rcu_read_unlock(); +29 } +30 +31 // I +32 +33 exists (* J *) (1:r0=x /\ (* K *) 1:r1=0) (* L *) + +In short, use C-language comments in the C code and Ocaml comments in +the rest of the litmus test. + +On the other hand, if you prefer C-style comments everywhere, the +C preprocessor is your friend. + + +Asynchronous RCU Grace Periods +------------------------------ + +The following litmus test is derived from the example show in +Documentation/litmus-tests/rcu/RCU+sync+free.litmus, but converted to +emulate call_rcu(): + + 1 C RCU+sync+free + 2 + 3 { + 4 int x = 1; + 5 int *y = &x; + 6 int z = 1; + 7 } + 8 + 9 P0(int *x, int *z, int **y) +10 { +11 int *r0; +12 int r1; +13 +14 rcu_read_lock(); +15 r0 = rcu_dereference(*y); +16 r1 = READ_ONCE(*r0); +17 rcu_read_unlock(); +18 } +19 +20 P1(int *z, int **y, int *c) +21 { +22 rcu_assign_pointer(*y, z); +23 smp_store_release(*c, 1); // Emulate call_rcu(). +24 } +25 +26 P2(int *x, int *z, int **y, int *c) +27 { +28 int r0; +29 +30 r0 = smp_load_acquire(*c); // Note call_rcu() request. +31 synchronize_rcu(); // Wait one grace period. +32 WRITE_ONCE(*x, 0); // Emulate the RCU callback. +33 } +34 +35 filter (2:r0=1) (* Reject too-early starts. *) +36 exists (0:r0=x /\ 0:r1=0) + +Lines 4-6 initialize a linked list headed by "y" that initially contains +"x". In addition, "z" is pre-initialized to prepare for P1(), which +will replace "x" with "z" in this list. + +P0() on lines 9-18 enters an RCU read-side critical section, loads the +list header "y" and dereferences it, leaving the node in "0:r0" and +the node's value in "0:r1". + +P1() on lines 20-24 updates the list header to instead reference "z", +then emulates call_rcu() by doing a release store into "c". + +P2() on lines 27-33 emulates the behind-the-scenes effect of doing a +call_rcu(). Line 30 first does an acquire load from "c", then line 31 +waits for an RCU grace period to elapse, and finally line 32 emulates +the RCU callback, which in turn emulates a call to kfree(). + +Of course, it is possible for P2() to start too soon, so that the +value of "2:r0" is zero rather than the required value of "1". +The "filter" clause on line 35 handles this possibility, rejecting +all executions in which "2:r0" is not equal to the value "1". + + +Performance +----------- + +LKMM's exploration of the full state-space can be extremely helpful, +but it does not come for free. The price is exponential computational +complexity in terms of the number of processes, the average number +of statements in each process, and the total number of stores in the +litmus test. + +So it is best to start small and then work up. Where possible, break +your code down into small pieces each representing a core concurrency +requirement. + +That said, herd7 is quite fast. On an unprepossessing x86 laptop, it +was able to analyze the following 10-process RCU litmus test in about +six seconds. + +https://github.com/paulmckrcu/litmus/blob/master/auto/C-RW-R+RW-R+RW-G+RW-G+RW-G+RW-G+RW-R+RW-R+RW-R+RW-R.litmus + +One way to make herd7 run faster is to use the "-speedcheck true" option. +This option prevents herd7 from generating all possible end states, +instead causing it to focus solely on whether or not the "exists" +clause can be satisfied. With this option, herd7 evaluates the above +litmus test in about 300 milliseconds, for more than an order of magnitude +improvement in performance. + +Larger 16-process litmus tests that would normally consume 15 minutes +of time complete in about 40 seconds with this option. To be fair, +you do get an extra 65,535 states when you leave off the "-speedcheck +true" option. + +https://github.com/paulmckrcu/litmus/blob/master/auto/C-RW-R+RW-R+RW-G+RW-G+RW-G+RW-G+RW-R+RW-R+RW-R+RW-R+RW-G+RW-G+RW-G+RW-G+RW-R+RW-R.litmus + +Nevertheless, litmus-test analysis really is of exponential complexity, +whether with or without "-speedcheck true". Increasing by just three +processes to a 19-process litmus test requires 2 hours and 40 minutes +without, and about 8 minutes with "-speedcheck true". Each of these +results represent roughly an order of magnitude slowdown compared to the +16-process litmus test. Again, to be fair, the multi-hour run explores +no fewer than 524,287 additional states compared to the shorter one. + +https://github.com/paulmckrcu/litmus/blob/master/auto/C-RW-R+RW-R+RW-G+RW-G+RW-G+RW-G+RW-R+RW-R+RW-R+RW-R+RW-R+RW-R+RW-G+RW-G+RW-G+RW-G+RW-R+RW-R+RW-R.litmus + +If you don't like command-line arguments, you can obtain a similar speedup +by adding a "filter" clause with exactly the same expression as your +"exists" clause. + +However, please note that seeing the full set of states can be extremely +helpful when developing and debugging litmus tests. + + +LIMITATIONS +=========== + +Limitations of the Linux-kernel memory model (LKMM) include: + +1. Compiler optimizations are not accurately modeled. Of course, + the use of READ_ONCE() and WRITE_ONCE() limits the compiler's + ability to optimize, but under some circumstances it is possible + for the compiler to undermine the memory model. For more + information, see Documentation/explanation.txt (in particular, + the "THE PROGRAM ORDER RELATION: po AND po-loc" and "A WARNING" + sections). + + Note that this limitation in turn limits LKMM's ability to + accurately model address, control, and data dependencies. + For example, if the compiler can deduce the value of some variable + carrying a dependency, then the compiler can break that dependency + by substituting a constant of that value. + +2. Multiple access sizes for a single variable are not supported, + and neither are misaligned or partially overlapping accesses. + +3. Exceptions and interrupts are not modeled. In some cases, + this limitation can be overcome by modeling the interrupt or + exception with an additional process. + +4. I/O such as MMIO or DMA is not supported. + +5. Self-modifying code (such as that found in the kernel's + alternatives mechanism, function tracer, Berkeley Packet Filter + JIT compiler, and module loader) is not supported. + +6. Complete modeling of all variants of atomic read-modify-write + operations, locking primitives, and RCU is not provided. + For example, call_rcu() and rcu_barrier() are not supported. + However, a substantial amount of support is provided for these + operations, as shown in the linux-kernel.def file. + + Here are specific limitations: + + a. When rcu_assign_pointer() is passed NULL, the Linux + kernel provides no ordering, but LKMM models this + case as a store release. + + b. The "unless" RMW operations are not currently modeled: + atomic_long_add_unless(), atomic_inc_unless_negative(), + and atomic_dec_unless_positive(). These can be emulated + in litmus tests, for example, by using atomic_cmpxchg(). + + One exception of this limitation is atomic_add_unless(), + which is provided directly by herd7 (so no corresponding + definition in linux-kernel.def). atomic_add_unless() is + modeled by herd7 therefore it can be used in litmus tests. + + c. The call_rcu() function is not modeled. As was shown above, + it can be emulated in litmus tests by adding another + process that invokes synchronize_rcu() and the body of the + callback function, with (for example) a release-acquire + from the site of the emulated call_rcu() to the beginning + of the additional process. + + d. The rcu_barrier() function is not modeled. It can be + emulated in litmus tests emulating call_rcu() via + (for example) a release-acquire from the end of each + additional call_rcu() process to the site of the + emulated rcu-barrier(). + + e. Although sleepable RCU (SRCU) is now modeled, there + are some subtle differences between its semantics and + those in the Linux kernel. For example, the kernel + might interpret the following sequence as two partially + overlapping SRCU read-side critical sections: + + 1 r1 = srcu_read_lock(&my_srcu); + 2 do_something_1(); + 3 r2 = srcu_read_lock(&my_srcu); + 4 do_something_2(); + 5 srcu_read_unlock(&my_srcu, r1); + 6 do_something_3(); + 7 srcu_read_unlock(&my_srcu, r2); + + In contrast, LKMM will interpret this as a nested pair of + SRCU read-side critical sections, with the outer critical + section spanning lines 1-7 and the inner critical section + spanning lines 3-5. + + This difference would be more of a concern had anyone + identified a reasonable use case for partially overlapping + SRCU read-side critical sections. For more information + on the trickiness of such overlapping, please see: + https://paulmck.livejournal.com/40593.html + + f. Reader-writer locking is not modeled. It can be + emulated in litmus tests using atomic read-modify-write + operations. + +The fragment of the C language supported by these litmus tests is quite +limited and in some ways non-standard: + +1. There is no automatic C-preprocessor pass. You can of course + run it manually, if you choose. + +2. There is no way to create functions other than the Pn() functions + that model the concurrent processes. + +3. The Pn() functions' formal parameters must be pointers to the + global shared variables. Nothing can be passed by value into + these functions. + +4. The only functions that can be invoked are those built directly + into herd7 or that are defined in the linux-kernel.def file. + +5. The "switch", "do", "for", "while", and "goto" C statements are + not supported. The "switch" statement can be emulated by the + "if" statement. The "do", "for", and "while" statements can + often be emulated by manually unrolling the loop, or perhaps by + enlisting the aid of the C preprocessor to minimize the resulting + code duplication. Some uses of "goto" can be emulated by "if", + and some others by unrolling. + +6. Although you can use a wide variety of types in litmus-test + variable declarations, and especially in global-variable + declarations, the "herd7" tool understands only int and + pointer types. There is no support for floating-point types, + enumerations, characters, strings, arrays, or structures. + +7. Parsing of variable declarations is very loose, with almost no + type checking. + +8. Initializers differ from their C-language counterparts. + For example, when an initializer contains the name of a shared + variable, that name denotes a pointer to that variable, not + the current value of that variable. For example, "int x = y" + is interpreted the way "int x = &y" would be in C. + +9. Dynamic memory allocation is not supported, although this can + be worked around in some cases by supplying multiple statically + allocated variables. + +Some of these limitations may be overcome in the future, but others are +more likely to be addressed by incorporating the Linux-kernel memory model +into other tools. + +Finally, please note that LKMM is subject to change as hardware, use cases, +and compilers evolve. diff --git a/tools/memory-model/Documentation/recipes.txt b/tools/memory-model/Documentation/recipes.txt new file mode 100644 index 000000000..03f58b11c --- /dev/null +++ b/tools/memory-model/Documentation/recipes.txt @@ -0,0 +1,570 @@ +This document provides "recipes", that is, litmus tests for commonly +occurring situations, as well as a few that illustrate subtly broken but +attractive nuisances. Many of these recipes include example code from +v5.7 of the Linux kernel. + +The first section covers simple special cases, the second section +takes off the training wheels to cover more involved examples, +and the third section provides a few rules of thumb. + + +Simple special cases +==================== + +This section presents two simple special cases, the first being where +there is only one CPU or only one memory location is accessed, and the +second being use of that old concurrency workhorse, locking. + + +Single CPU or single memory location +------------------------------------ + +If there is only one CPU on the one hand or only one variable +on the other, the code will execute in order. There are (as +usual) some things to be careful of: + +1. Some aspects of the C language are unordered. For example, + in the expression "f(x) + g(y)", the order in which f and g are + called is not defined; the object code is allowed to use either + order or even to interleave the computations. + +2. Compilers are permitted to use the "as-if" rule. That is, a + compiler can emit whatever code it likes for normal accesses, + as long as the results of a single-threaded execution appear + just as if the compiler had followed all the relevant rules. + To see this, compile with a high level of optimization and run + the debugger on the resulting binary. + +3. If there is only one variable but multiple CPUs, that variable + must be properly aligned and all accesses to that variable must + be full sized. Variables that straddle cachelines or pages void + your full-ordering warranty, as do undersized accesses that load + from or store to only part of the variable. + +4. If there are multiple CPUs, accesses to shared variables should + use READ_ONCE() and WRITE_ONCE() or stronger to prevent load/store + tearing, load/store fusing, and invented loads and stores. + There are exceptions to this rule, including: + + i. When there is no possibility of a given shared variable + being updated by some other CPU, for example, while + holding the update-side lock, reads from that variable + need not use READ_ONCE(). + + ii. When there is no possibility of a given shared variable + being either read or updated by other CPUs, for example, + when running during early boot, reads from that variable + need not use READ_ONCE() and writes to that variable + need not use WRITE_ONCE(). + + +Locking +------- + +Locking is well-known and straightforward, at least if you don't think +about it too hard. And the basic rule is indeed quite simple: Any CPU that +has acquired a given lock sees any changes previously seen or made by any +CPU before it released that same lock. Note that this statement is a bit +stronger than "Any CPU holding a given lock sees all changes made by any +CPU during the time that CPU was holding this same lock". For example, +consider the following pair of code fragments: + + /* See MP+polocks.litmus. */ + void CPU0(void) + { + WRITE_ONCE(x, 1); + spin_lock(&mylock); + WRITE_ONCE(y, 1); + spin_unlock(&mylock); + } + + void CPU1(void) + { + spin_lock(&mylock); + r0 = READ_ONCE(y); + spin_unlock(&mylock); + r1 = READ_ONCE(x); + } + +The basic rule guarantees that if CPU0() acquires mylock before CPU1(), +then both r0 and r1 must be set to the value 1. This also has the +consequence that if the final value of r0 is equal to 1, then the final +value of r1 must also be equal to 1. In contrast, the weaker rule would +say nothing about the final value of r1. + +The converse to the basic rule also holds, as illustrated by the +following litmus test: + + /* See MP+porevlocks.litmus. */ + void CPU0(void) + { + r0 = READ_ONCE(y); + spin_lock(&mylock); + r1 = READ_ONCE(x); + spin_unlock(&mylock); + } + + void CPU1(void) + { + spin_lock(&mylock); + WRITE_ONCE(x, 1); + spin_unlock(&mylock); + WRITE_ONCE(y, 1); + } + +This converse to the basic rule guarantees that if CPU0() acquires +mylock before CPU1(), then both r0 and r1 must be set to the value 0. +This also has the consequence that if the final value of r1 is equal +to 0, then the final value of r0 must also be equal to 0. In contrast, +the weaker rule would say nothing about the final value of r0. + +These examples show only a single pair of CPUs, but the effects of the +locking basic rule extend across multiple acquisitions of a given lock +across multiple CPUs. + +However, it is not necessarily the case that accesses ordered by +locking will be seen as ordered by CPUs not holding that lock. +Consider this example: + + /* See Z6.0+pooncelock+pooncelock+pombonce.litmus. */ + void CPU0(void) + { + spin_lock(&mylock); + WRITE_ONCE(x, 1); + WRITE_ONCE(y, 1); + spin_unlock(&mylock); + } + + void CPU1(void) + { + spin_lock(&mylock); + r0 = READ_ONCE(y); + WRITE_ONCE(z, 1); + spin_unlock(&mylock); + } + + void CPU2(void) + { + WRITE_ONCE(z, 2); + smp_mb(); + r1 = READ_ONCE(x); + } + +Counter-intuitive though it might be, it is quite possible to have +the final value of r0 be 1, the final value of z be 2, and the final +value of r1 be 0. The reason for this surprising outcome is that +CPU2() never acquired the lock, and thus did not benefit from the +lock's ordering properties. + +Ordering can be extended to CPUs not holding the lock by careful use +of smp_mb__after_spinlock(): + + /* See Z6.0+pooncelock+poonceLock+pombonce.litmus. */ + void CPU0(void) + { + spin_lock(&mylock); + WRITE_ONCE(x, 1); + WRITE_ONCE(y, 1); + spin_unlock(&mylock); + } + + void CPU1(void) + { + spin_lock(&mylock); + smp_mb__after_spinlock(); + r0 = READ_ONCE(y); + WRITE_ONCE(z, 1); + spin_unlock(&mylock); + } + + void CPU2(void) + { + WRITE_ONCE(z, 2); + smp_mb(); + r1 = READ_ONCE(x); + } + +This addition of smp_mb__after_spinlock() strengthens the lock acquisition +sufficiently to rule out the counter-intuitive outcome. + + +Taking off the training wheels +============================== + +This section looks at more complex examples, including message passing, +load buffering, release-acquire chains, store buffering. +Many classes of litmus tests have abbreviated names, which may be found +here: https://www.cl.cam.ac.uk/~pes20/ppc-supplemental/test6.pdf + + +Message passing (MP) +-------------------- + +The MP pattern has one CPU execute a pair of stores to a pair of variables +and another CPU execute a pair of loads from this same pair of variables, +but in the opposite order. The goal is to avoid the counter-intuitive +outcome in which the first load sees the value written by the second store +but the second load does not see the value written by the first store. +In the absence of any ordering, this goal may not be met, as can be seen +in the MP+poonceonces.litmus litmus test. This section therefore looks at +a number of ways of meeting this goal. + + +Release and acquire +~~~~~~~~~~~~~~~~~~~ + +Use of smp_store_release() and smp_load_acquire() is one way to force +the desired MP ordering. The general approach is shown below: + + /* See MP+pooncerelease+poacquireonce.litmus. */ + void CPU0(void) + { + WRITE_ONCE(x, 1); + smp_store_release(&y, 1); + } + + void CPU1(void) + { + r0 = smp_load_acquire(&y); + r1 = READ_ONCE(x); + } + +The smp_store_release() macro orders any prior accesses against the +store, while the smp_load_acquire macro orders the load against any +subsequent accesses. Therefore, if the final value of r0 is the value 1, +the final value of r1 must also be the value 1. + +The init_stack_slab() function in lib/stackdepot.c uses release-acquire +in this way to safely initialize of a slab of the stack. Working out +the mutual-exclusion design is left as an exercise for the reader. + + +Assign and dereference +~~~~~~~~~~~~~~~~~~~~~~ + +Use of rcu_assign_pointer() and rcu_dereference() is quite similar to the +use of smp_store_release() and smp_load_acquire(), except that both +rcu_assign_pointer() and rcu_dereference() operate on RCU-protected +pointers. The general approach is shown below: + + /* See MP+onceassign+derefonce.litmus. */ + int z; + int *y = &z; + int x; + + void CPU0(void) + { + WRITE_ONCE(x, 1); + rcu_assign_pointer(y, &x); + } + + void CPU1(void) + { + rcu_read_lock(); + r0 = rcu_dereference(y); + r1 = READ_ONCE(*r0); + rcu_read_unlock(); + } + +In this example, if the final value of r0 is &x then the final value of +r1 must be 1. + +The rcu_assign_pointer() macro has the same ordering properties as does +smp_store_release(), but the rcu_dereference() macro orders the load only +against later accesses that depend on the value loaded. A dependency +is present if the value loaded determines the address of a later access +(address dependency, as shown above), the value written by a later store +(data dependency), or whether or not a later store is executed in the +first place (control dependency). Note that the term "data dependency" +is sometimes casually used to cover both address and data dependencies. + +In lib/math/prime_numbers.c, the expand_to_next_prime() function invokes +rcu_assign_pointer(), and the next_prime_number() function invokes +rcu_dereference(). This combination mediates access to a bit vector +that is expanded as additional primes are needed. + + +Write and read memory barriers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +It is usually better to use smp_store_release() instead of smp_wmb() +and to use smp_load_acquire() instead of smp_rmb(). However, the older +smp_wmb() and smp_rmb() APIs are still heavily used, so it is important +to understand their use cases. The general approach is shown below: + + /* See MP+fencewmbonceonce+fencermbonceonce.litmus. */ + void CPU0(void) + { + WRITE_ONCE(x, 1); + smp_wmb(); + WRITE_ONCE(y, 1); + } + + void CPU1(void) + { + r0 = READ_ONCE(y); + smp_rmb(); + r1 = READ_ONCE(x); + } + +The smp_wmb() macro orders prior stores against later stores, and the +smp_rmb() macro orders prior loads against later loads. Therefore, if +the final value of r0 is 1, the final value of r1 must also be 1. + +The xlog_state_switch_iclogs() function in fs/xfs/xfs_log.c contains +the following write-side code fragment: + + log->l_curr_block -= log->l_logBBsize; + ASSERT(log->l_curr_block >= 0); + smp_wmb(); + log->l_curr_cycle++; + +And the xlog_valid_lsn() function in fs/xfs/xfs_log_priv.h contains +the corresponding read-side code fragment: + + cur_cycle = READ_ONCE(log->l_curr_cycle); + smp_rmb(); + cur_block = READ_ONCE(log->l_curr_block); + +Alternatively, consider the following comment in function +perf_output_put_handle() in kernel/events/ring_buffer.c: + + * kernel user + * + * if (LOAD ->data_tail) { LOAD ->data_head + * (A) smp_rmb() (C) + * STORE $data LOAD $data + * smp_wmb() (B) smp_mb() (D) + * STORE ->data_head STORE ->data_tail + * } + +The B/C pairing is an example of the MP pattern using smp_wmb() on the +write side and smp_rmb() on the read side. + +Of course, given that smp_mb() is strictly stronger than either smp_wmb() +or smp_rmb(), any code fragment that would work with smp_rmb() and +smp_wmb() would also work with smp_mb() replacing either or both of the +weaker barriers. + + +Load buffering (LB) +------------------- + +The LB pattern has one CPU load from one variable and then store to a +second, while another CPU loads from the second variable and then stores +to the first. The goal is to avoid the counter-intuitive situation where +each load reads the value written by the other CPU's store. In the +absence of any ordering it is quite possible that this may happen, as +can be seen in the LB+poonceonces.litmus litmus test. + +One way of avoiding the counter-intuitive outcome is through the use of a +control dependency paired with a full memory barrier: + + /* See LB+fencembonceonce+ctrlonceonce.litmus. */ + void CPU0(void) + { + r0 = READ_ONCE(x); + if (r0) + WRITE_ONCE(y, 1); + } + + void CPU1(void) + { + r1 = READ_ONCE(y); + smp_mb(); + WRITE_ONCE(x, 1); + } + +This pairing of a control dependency in CPU0() with a full memory +barrier in CPU1() prevents r0 and r1 from both ending up equal to 1. + +The A/D pairing from the ring-buffer use case shown earlier also +illustrates LB. Here is a repeat of the comment in +perf_output_put_handle() in kernel/events/ring_buffer.c, showing a +control dependency on the kernel side and a full memory barrier on +the user side: + + * kernel user + * + * if (LOAD ->data_tail) { LOAD ->data_head + * (A) smp_rmb() (C) + * STORE $data LOAD $data + * smp_wmb() (B) smp_mb() (D) + * STORE ->data_head STORE ->data_tail + * } + * + * Where A pairs with D, and B pairs with C. + +The kernel's control dependency between the load from ->data_tail +and the store to data combined with the user's full memory barrier +between the load from data and the store to ->data_tail prevents +the counter-intuitive outcome where the kernel overwrites the data +before the user gets done loading it. + + +Release-acquire chains +---------------------- + +Release-acquire chains are a low-overhead, flexible, and easy-to-use +method of maintaining order. However, they do have some limitations that +need to be fully understood. Here is an example that maintains order: + + /* See ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus. */ + void CPU0(void) + { + WRITE_ONCE(x, 1); + smp_store_release(&y, 1); + } + + void CPU1(void) + { + r0 = smp_load_acquire(y); + smp_store_release(&z, 1); + } + + void CPU2(void) + { + r1 = smp_load_acquire(z); + r2 = READ_ONCE(x); + } + +In this case, if r0 and r1 both have final values of 1, then r2 must +also have a final value of 1. + +The ordering in this example is stronger than it needs to be. For +example, ordering would still be preserved if CPU1()'s smp_load_acquire() +invocation was replaced with READ_ONCE(). + +It is tempting to assume that CPU0()'s store to x is globally ordered +before CPU1()'s store to z, but this is not the case: + + /* See Z6.0+pooncerelease+poacquirerelease+mbonceonce.litmus. */ + void CPU0(void) + { + WRITE_ONCE(x, 1); + smp_store_release(&y, 1); + } + + void CPU1(void) + { + r0 = smp_load_acquire(y); + smp_store_release(&z, 1); + } + + void CPU2(void) + { + WRITE_ONCE(z, 2); + smp_mb(); + r1 = READ_ONCE(x); + } + +One might hope that if the final value of r0 is 1 and the final value +of z is 2, then the final value of r1 must also be 1, but it really is +possible for r1 to have the final value of 0. The reason, of course, +is that in this version, CPU2() is not part of the release-acquire chain. +This situation is accounted for in the rules of thumb below. + +Despite this limitation, release-acquire chains are low-overhead as +well as simple and powerful, at least as memory-ordering mechanisms go. + + +Store buffering +--------------- + +Store buffering can be thought of as upside-down load buffering, so +that one CPU first stores to one variable and then loads from a second, +while another CPU stores to the second variable and then loads from the +first. Preserving order requires nothing less than full barriers: + + /* See SB+fencembonceonces.litmus. */ + void CPU0(void) + { + WRITE_ONCE(x, 1); + smp_mb(); + r0 = READ_ONCE(y); + } + + void CPU1(void) + { + WRITE_ONCE(y, 1); + smp_mb(); + r1 = READ_ONCE(x); + } + +Omitting either smp_mb() will allow both r0 and r1 to have final +values of 0, but providing both full barriers as shown above prevents +this counter-intuitive outcome. + +This pattern most famously appears as part of Dekker's locking +algorithm, but it has a much more practical use within the Linux kernel +of ordering wakeups. The following comment taken from waitqueue_active() +in include/linux/wait.h shows the canonical pattern: + + * CPU0 - waker CPU1 - waiter + * + * for (;;) { + * @cond = true; prepare_to_wait(&wq_head, &wait, state); + * smp_mb(); // smp_mb() from set_current_state() + * if (waitqueue_active(wq_head)) if (@cond) + * wake_up(wq_head); break; + * schedule(); + * } + * finish_wait(&wq_head, &wait); + +On CPU0, the store is to @cond and the load is in waitqueue_active(). +On CPU1, prepare_to_wait() contains both a store to wq_head and a call +to set_current_state(), which contains an smp_mb() barrier; the load is +"if (@cond)". The full barriers prevent the undesirable outcome where +CPU1 puts the waiting task to sleep and CPU0 fails to wake it up. + +Note that use of locking can greatly simplify this pattern. + + +Rules of thumb +============== + +There might seem to be no pattern governing what ordering primitives are +needed in which situations, but this is not the case. There is a pattern +based on the relation between the accesses linking successive CPUs in a +given litmus test. There are three types of linkage: + +1. Write-to-read, where the next CPU reads the value that the + previous CPU wrote. The LB litmus-test patterns contain only + this type of relation. In formal memory-modeling texts, this + relation is called "reads-from" and is usually abbreviated "rf". + +2. Read-to-write, where the next CPU overwrites the value that the + previous CPU read. The SB litmus test contains only this type + of relation. In formal memory-modeling texts, this relation is + often called "from-reads" and is sometimes abbreviated "fr". + +3. Write-to-write, where the next CPU overwrites the value written + by the previous CPU. The Z6.0 litmus test pattern contains a + write-to-write relation between the last access of CPU1() and + the first access of CPU2(). In formal memory-modeling texts, + this relation is often called "coherence order" and is sometimes + abbreviated "co". In the C++ standard, it is instead called + "modification order" and often abbreviated "mo". + +The strength of memory ordering required for a given litmus test to +avoid a counter-intuitive outcome depends on the types of relations +linking the memory accesses for the outcome in question: + +o If all links are write-to-read links, then the weakest + possible ordering within each CPU suffices. For example, in + the LB litmus test, a control dependency was enough to do the + job. + +o If all but one of the links are write-to-read links, then a + release-acquire chain suffices. Both the MP and the ISA2 + litmus tests illustrate this case. + +o If more than one of the links are something other than + write-to-read links, then a full memory barrier is required + between each successive pair of non-write-to-read links. This + case is illustrated by the Z6.0 litmus tests, both in the + locking and in the release-acquire sections. + +However, if you find yourself having to stretch these rules of thumb +to fit your situation, you should consider creating a litmus test and +running it on the model. diff --git a/tools/memory-model/Documentation/references.txt b/tools/memory-model/Documentation/references.txt new file mode 100644 index 000000000..c5fdfd19d --- /dev/null +++ b/tools/memory-model/Documentation/references.txt @@ -0,0 +1,131 @@ +This document provides background reading for memory models and related +tools. These documents are aimed at kernel hackers who are interested +in memory models. + + +Hardware manuals and models +=========================== + +o SPARC International Inc. (Ed.). 1994. "The SPARC Architecture + Reference Manual Version 9". SPARC International Inc. + +o Compaq Computer Corporation (Ed.). 2002. "Alpha Architecture + Reference Manual". Compaq Computer Corporation. + +o Intel Corporation (Ed.). 2002. "A Formal Specification of Intel + Itanium Processor Family Memory Ordering". Intel Corporation. + +o Intel Corporation (Ed.). 2002. "Intel 64 and IA-32 Architectures + Software Developer’s Manual". Intel Corporation. + +o Peter Sewell, Susmit Sarkar, Scott Owens, Francesco Zappa Nardelli, + and Magnus O. Myreen. 2010. "x86-TSO: A Rigorous and Usable + Programmer's Model for x86 Multiprocessors". Commun. ACM 53, 7 + (July, 2010), 89-97. http://doi.acm.org/10.1145/1785414.1785443 + +o IBM Corporation (Ed.). 2009. "Power ISA Version 2.06". IBM + Corporation. + +o ARM Ltd. (Ed.). 2009. "ARM Barrier Litmus Tests and Cookbook". + ARM Ltd. + +o Susmit Sarkar, Peter Sewell, Jade Alglave, Luc Maranget, and + Derek Williams. 2011. "Understanding POWER Multiprocessors". In + Proceedings of the 32Nd ACM SIGPLAN Conference on Programming + Language Design and Implementation (PLDI ’11). ACM, New York, + NY, USA, 175–186. + +o Susmit Sarkar, Kayvan Memarian, Scott Owens, Mark Batty, + Peter Sewell, Luc Maranget, Jade Alglave, and Derek Williams. + 2012. "Synchronising C/C++ and POWER". In Proceedings of the 33rd + ACM SIGPLAN Conference on Programming Language Design and + Implementation (PLDI '12). ACM, New York, NY, USA, 311-322. + +o ARM Ltd. (Ed.). 2014. "ARM Architecture Reference Manual (ARMv8, + for ARMv8-A architecture profile)". ARM Ltd. + +o Imagination Technologies, LTD. 2015. "MIPS(R) Architecture + For Programmers, Volume II-A: The MIPS64(R) Instruction, + Set Reference Manual". Imagination Technologies, + LTD. https://imgtec.com/?do-download=4302. + +o Shaked Flur, Kathryn E. Gray, Christopher Pulte, Susmit + Sarkar, Ali Sezgin, Luc Maranget, Will Deacon, and Peter + Sewell. 2016. "Modelling the ARMv8 Architecture, Operationally: + Concurrency and ISA". In Proceedings of the 43rd Annual ACM + SIGPLAN-SIGACT Symposium on Principles of Programming Languages + (POPL ’16). ACM, New York, NY, USA, 608–621. + +o Shaked Flur, Susmit Sarkar, Christopher Pulte, Kyndylan Nienhuis, + Luc Maranget, Kathryn E. Gray, Ali Sezgin, Mark Batty, and Peter + Sewell. 2017. "Mixed-size Concurrency: ARM, POWER, C/C++11, + and SC". In Proceedings of the 44th ACM SIGPLAN Symposium on + Principles of Programming Languages (POPL 2017). ACM, New York, + NY, USA, 429–442. + +o Christopher Pulte, Shaked Flur, Will Deacon, Jon French, + Susmit Sarkar, and Peter Sewell. 2018. "Simplifying ARM concurrency: + multicopy-atomic axiomatic and operational models for ARMv8". In + Proceedings of the ACM on Programming Languages, Volume 2, Issue + POPL, Article No. 19. ACM, New York, NY, USA. + + +Linux-kernel memory model +========================= + +o Jade Alglave, Will Deacon, Boqun Feng, David Howells, Daniel + Lustig, Luc Maranget, Paul E. McKenney, Andrea Parri, Nicholas + Piggin, Alan Stern, Akira Yokosawa, and Peter Zijlstra. + 2019. "Calibrating your fear of big bad optimizing compilers" + Linux Weekly News. https://lwn.net/Articles/799218/ + +o Jade Alglave, Will Deacon, Boqun Feng, David Howells, Daniel + Lustig, Luc Maranget, Paul E. McKenney, Andrea Parri, Nicholas + Piggin, Alan Stern, Akira Yokosawa, and Peter Zijlstra. + 2019. "Who's afraid of a big bad optimizing compiler?" + Linux Weekly News. https://lwn.net/Articles/793253/ + +o Jade Alglave, Luc Maranget, Paul E. McKenney, Andrea Parri, and + Alan Stern. 2018. "Frightening small children and disconcerting + grown-ups: Concurrency in the Linux kernel". In Proceedings of + the 23rd International Conference on Architectural Support for + Programming Languages and Operating Systems (ASPLOS 2018). ACM, + New York, NY, USA, 405-418. Webpage: http://diy.inria.fr/linux/. + +o Jade Alglave, Luc Maranget, Paul E. McKenney, Andrea Parri, and + Alan Stern. 2017. "A formal kernel memory-ordering model (part 1)" + Linux Weekly News. https://lwn.net/Articles/718628/ + +o Jade Alglave, Luc Maranget, Paul E. McKenney, Andrea Parri, and + Alan Stern. 2017. "A formal kernel memory-ordering model (part 2)" + Linux Weekly News. https://lwn.net/Articles/720550/ + +o Jade Alglave, Luc Maranget, Paul E. McKenney, Andrea Parri, and + Alan Stern. 2017-2019. "A Formal Model of Linux-Kernel Memory + Ordering" (backup material for the LWN articles) + https://mirrors.edge.kernel.org/pub/linux/kernel/people/paulmck/LWNLinuxMM/ + + +Memory-model tooling +==================== + +o Daniel Jackson. 2002. "Alloy: A Lightweight Object Modelling + Notation". ACM Trans. Softw. Eng. Methodol. 11, 2 (April 2002), + 256–290. http://doi.acm.org/10.1145/505145.505149 + +o Jade Alglave, Luc Maranget, and Michael Tautschnig. 2014. "Herding + Cats: Modelling, Simulation, Testing, and Data Mining for Weak + Memory". ACM Trans. Program. Lang. Syst. 36, 2, Article 7 (July + 2014), 7:1–7:74 pages. + +o Jade Alglave, Patrick Cousot, and Luc Maranget. 2016. "Syntax and + semantics of the weak consistency model specification language + cat". CoRR abs/1608.07531 (2016). https://arxiv.org/abs/1608.07531 + + +Memory-model comparisons +======================== + +o Paul E. McKenney, Ulrich Weigand, Andrea Parri, and Boqun + Feng. 2018. "Linux-Kernel Memory Model". (27 September 2018). + http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0124r6.html. diff --git a/tools/memory-model/Documentation/simple.txt b/tools/memory-model/Documentation/simple.txt new file mode 100644 index 000000000..81e1a0ec5 --- /dev/null +++ b/tools/memory-model/Documentation/simple.txt @@ -0,0 +1,271 @@ +This document provides options for those wishing to keep their +memory-ordering lives simple, as is necessary for those whose domain +is complex. After all, there are bugs other than memory-ordering bugs, +and the time spent gaining memory-ordering knowledge is not available +for gaining domain knowledge. Furthermore Linux-kernel memory model +(LKMM) is quite complex, with subtle differences in code often having +dramatic effects on correctness. + +The options near the beginning of this list are quite simple. The idea +is not that kernel hackers don't already know about them, but rather +that they might need the occasional reminder. + +Please note that this is a generic guide, and that specific subsystems +will often have special requirements or idioms. For example, developers +of MMIO-based device drivers will often need to use mb(), rmb(), and +wmb(), and therefore might find smp_mb(), smp_rmb(), and smp_wmb() +to be more natural than smp_load_acquire() and smp_store_release(). +On the other hand, those coming in from other environments will likely +be more familiar with these last two. + + +Single-threaded code +==================== + +In single-threaded code, there is no reordering, at least assuming +that your toolchain and hardware are working correctly. In addition, +it is generally a mistake to assume your code will only run in a single +threaded context as the kernel can enter the same code path on multiple +CPUs at the same time. One important exception is a function that makes +no external data references. + +In the general case, you will need to take explicit steps to ensure that +your code really is executed within a single thread that does not access +shared variables. A simple way to achieve this is to define a global lock +that you acquire at the beginning of your code and release at the end, +taking care to ensure that all references to your code's shared data are +also carried out under that same lock. Because only one thread can hold +this lock at a given time, your code will be executed single-threaded. +This approach is called "code locking". + +Code locking can severely limit both performance and scalability, so it +should be used with caution, and only on code paths that execute rarely. +After all, a huge amount of effort was required to remove the Linux +kernel's old "Big Kernel Lock", so let's please be very careful about +adding new "little kernel locks". + +One of the advantages of locking is that, in happy contrast with the +year 1981, almost all kernel developers are very familiar with locking. +The Linux kernel's lockdep (CONFIG_PROVE_LOCKING=y) is very helpful with +the formerly feared deadlock scenarios. + +Please use the standard locking primitives provided by the kernel rather +than rolling your own. For one thing, the standard primitives interact +properly with lockdep. For another thing, these primitives have been +tuned to deal better with high contention. And for one final thing, it is +surprisingly hard to correctly code production-quality lock acquisition +and release functions. After all, even simple non-production-quality +locking functions must carefully prevent both the CPU and the compiler +from moving code in either direction across the locking function. + +Despite the scalability limitations of single-threaded code, RCU +takes this approach for much of its grace-period processing and also +for early-boot operation. The reason RCU is able to scale despite +single-threaded grace-period processing is use of batching, where all +updates that accumulated during one grace period are handled by the +next one. In other words, slowing down grace-period processing makes +it more efficient. Nor is RCU unique: Similar batching optimizations +are used in many I/O operations. + + +Packaged code +============= + +Even if performance and scalability concerns prevent your code from +being completely single-threaded, it is often possible to use library +functions that handle the concurrency nearly or entirely on their own. +This approach delegates any LKMM worries to the library maintainer. + +In the kernel, what is the "library"? Quite a bit. It includes the +contents of the lib/ directory, much of the include/linux/ directory along +with a lot of other heavily used APIs. But heavily used examples include +the list macros (for example, include/linux/{,rcu}list.h), workqueues, +smp_call_function(), and the various hash tables and search trees. + + +Data locking +============ + +With code locking, we use single-threaded code execution to guarantee +serialized access to the data that the code is accessing. However, +we can also achieve this by instead associating the lock with specific +instances of the data structures. This creates a "critical section" +in the code execution that will execute as though it is single threaded. +By placing all the accesses and modifications to a shared data structure +inside a critical section, we ensure that the execution context that +holds the lock has exclusive access to the shared data. + +The poster boy for this approach is the hash table, where placing a lock +in each hash bucket allows operations on different buckets to proceed +concurrently. This works because the buckets do not overlap with each +other, so that an operation on one bucket does not interfere with any +other bucket. + +As the number of buckets increases, data locking scales naturally. +In particular, if the amount of data increases with the number of CPUs, +increasing the number of buckets as the number of CPUs increase results +in a naturally scalable data structure. + + +Per-CPU processing +================== + +Partitioning processing and data over CPUs allows each CPU to take +a single-threaded approach while providing excellent performance and +scalability. Of course, there is no free lunch: The dark side of this +excellence is substantially increased memory footprint. + +In addition, it is sometimes necessary to occasionally update some global +view of this processing and data, in which case something like locking +must be used to protect this global view. This is the approach taken +by the percpu_counter infrastructure. In many cases, there are already +generic/library variants of commonly used per-cpu constructs available. +Please use them rather than rolling your own. + +RCU uses DEFINE_PER_CPU*() declaration to create a number of per-CPU +data sets. For example, each CPU does private quiescent-state processing +within its instance of the per-CPU rcu_data structure, and then uses data +locking to report quiescent states up the grace-period combining tree. + + +Packaged primitives: Sequence locking +===================================== + +Lockless programming is considered by many to be more difficult than +lock-based programming, but there are a few lockless design patterns that +have been built out into an API. One of these APIs is sequence locking. +Although this APIs can be used in extremely complex ways, there are simple +and effective ways of using it that avoid the need to pay attention to +memory ordering. + +The basic keep-things-simple rule for sequence locking is "do not write +in read-side code". Yes, you can do writes from within sequence-locking +readers, but it won't be so simple. For example, such writes will be +lockless and should be idempotent. + +For more sophisticated use cases, LKMM can guide you, including use +cases involving combining sequence locking with other synchronization +primitives. (LKMM does not yet know about sequence locking, so it is +currently necessary to open-code it in your litmus tests.) + +Additional information may be found in include/linux/seqlock.h. + +Packaged primitives: RCU +======================== + +Another lockless design pattern that has been baked into an API +is RCU. The Linux kernel makes sophisticated use of RCU, but the +keep-things-simple rules for RCU are "do not write in read-side code" +and "do not update anything that is visible to and accessed by readers", +and "protect updates with locking". + +These rules are illustrated by the functions foo_update_a() and +foo_get_a() shown in Documentation/RCU/whatisRCU.rst. Additional +RCU usage patterns maybe found in Documentation/RCU and in the +source code. + + +Packaged primitives: Atomic operations +====================================== + +Back in the day, the Linux kernel had three types of atomic operations: + +1. Initialization and read-out, such as atomic_set() and atomic_read(). + +2. Operations that did not return a value and provided no ordering, + such as atomic_inc() and atomic_dec(). + +3. Operations that returned a value and provided full ordering, such as + atomic_add_return() and atomic_dec_and_test(). Note that some + value-returning operations provide full ordering only conditionally. + For example, cmpxchg() provides ordering only upon success. + +More recent kernels have operations that return a value but do not +provide full ordering. These are flagged with either a _relaxed() +suffix (providing no ordering), or an _acquire() or _release() suffix +(providing limited ordering). + +Additional information may be found in these files: + +Documentation/atomic_t.txt +Documentation/atomic_bitops.txt +Documentation/core-api/atomic_ops.rst +Documentation/core-api/refcount-vs-atomic.rst + +Reading code using these primitives is often also quite helpful. + + +Lockless, fully ordered +======================= + +When using locking, there often comes a time when it is necessary +to access some variable or another without holding the data lock +that serializes access to that variable. + +If you want to keep things simple, use the initialization and read-out +operations from the previous section only when there are no racing +accesses. Otherwise, use only fully ordered operations when accessing +or modifying the variable. This approach guarantees that code prior +to a given access to that variable will be seen by all CPUs has having +happened before any code following any later access to that same variable. + +Please note that per-CPU functions are not atomic operations and +hence they do not provide any ordering guarantees at all. + +If the lockless accesses are frequently executed reads that are used +only for heuristics, or if they are frequently executed writes that +are used only for statistics, please see the next section. + + +Lockless statistics and heuristics +================================== + +Unordered primitives such as atomic_read(), atomic_set(), READ_ONCE(), and +WRITE_ONCE() can safely be used in some cases. These primitives provide +no ordering, but they do prevent the compiler from carrying out a number +of destructive optimizations (for which please see the next section). +One example use for these primitives is statistics, such as per-CPU +counters exemplified by the rt_cache_stat structure's routing-cache +statistics counters. Another example use case is heuristics, such as +the jiffies_till_first_fqs and jiffies_till_next_fqs kernel parameters +controlling how often RCU scans for idle CPUs. + +But be careful. "Unordered" really does mean "unordered". It is all +too easy to assume ordering, and this assumption must be avoided when +using these primitives. + + +Don't let the compiler trip you up +================================== + +It can be quite tempting to use plain C-language accesses for lockless +loads from and stores to shared variables. Although this is both +possible and quite common in the Linux kernel, it does require a +surprising amount of analysis, care, and knowledge about the compiler. +Yes, some decades ago it was not unfair to consider a C compiler to be +an assembler with added syntax and better portability, but the advent of +sophisticated optimizing compilers mean that those days are long gone. +Today's optimizing compilers can profoundly rewrite your code during the +translation process, and have long been ready, willing, and able to do so. + +Therefore, if you really need to use C-language assignments instead of +READ_ONCE(), WRITE_ONCE(), and so on, you will need to have a very good +understanding of both the C standard and your compiler. Here are some +introductory references and some tooling to start you on this noble quest: + +Who's afraid of a big bad optimizing compiler? + https://lwn.net/Articles/793253/ +Calibrating your fear of big bad optimizing compilers + https://lwn.net/Articles/799218/ +Concurrency bugs should fear the big bad data-race detector (part 1) + https://lwn.net/Articles/816850/ +Concurrency bugs should fear the big bad data-race detector (part 2) + https://lwn.net/Articles/816854/ + + +More complex use cases +====================== + +If the alternatives above do not do what you need, please look at the +recipes-pairs.txt file to peel off the next layer of the memory-ordering +onion. diff --git a/tools/memory-model/README b/tools/memory-model/README new file mode 100644 index 000000000..c8144d4aa --- /dev/null +++ b/tools/memory-model/README @@ -0,0 +1,228 @@ + ===================================== + LINUX KERNEL MEMORY CONSISTENCY MODEL + ===================================== + +============ +INTRODUCTION +============ + +This directory contains the memory consistency model (memory model, for +short) of the Linux kernel, written in the "cat" language and executable +by the externally provided "herd7" simulator, which exhaustively explores +the state space of small litmus tests. + +In addition, the "klitmus7" tool (also externally provided) may be used +to convert a litmus test to a Linux kernel module, which in turn allows +that litmus test to be exercised within the Linux kernel. + + +============ +REQUIREMENTS +============ + +Version 7.52 or higher of the "herd7" and "klitmus7" tools must be +downloaded separately: + + https://github.com/herd/herdtools7 + +See "herdtools7/INSTALL.md" for installation instructions. + +Note that although these tools usually provide backwards compatibility, +this is not absolutely guaranteed. + +For example, a future version of herd7 might not work with the model +in this release. A compatible model will likely be made available in +a later release of Linux kernel. + +If you absolutely need to run the model in this particular release, +please try using the exact version called out above. + +klitmus7 is independent of the model provided here. It has its own +dependency on a target kernel release where converted code is built +and executed. Any change in kernel APIs essential to klitmus7 will +necessitate an upgrade of klitmus7. + +If you find any compatibility issues in klitmus7, please inform the +memory model maintainers. + +klitmus7 Compatibility Table +---------------------------- + + ============ ========== + target Linux herdtools7 + ------------ ---------- + -- 4.18 7.48 -- + 4.15 -- 4.19 7.49 -- + 4.20 -- 5.5 7.54 -- + 5.6 -- 7.56 -- + ============ ========== + + +================== +BASIC USAGE: HERD7 +================== + +The memory model is used, in conjunction with "herd7", to exhaustively +explore the state space of small litmus tests. Documentation describing +the format, features, capabilities and limitations of these litmus +tests is available in tools/memory-model/Documentation/litmus-tests.txt. + +Example litmus tests may be found in the Linux-kernel source tree: + + tools/memory-model/litmus-tests/ + Documentation/litmus-tests/ + +Several thousand more example litmus tests are available here: + + https://github.com/paulmckrcu/litmus + https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/perfbook.git/tree/CodeSamples/formal/herd + https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/perfbook.git/tree/CodeSamples/formal/litmus + +Documentation describing litmus tests and now to use them may be found +here: + + tools/memory-model/Documentation/litmus-tests.txt + +The remainder of this section uses the SB+fencembonceonces.litmus test +located in the tools/memory-model directory. + +To run SB+fencembonceonces.litmus against the memory model: + + $ cd $LINUX_SOURCE_TREE/tools/memory-model + $ herd7 -conf linux-kernel.cfg litmus-tests/SB+fencembonceonces.litmus + +Here is the corresponding output: + + Test SB+fencembonceonces Allowed + States 3 + 0:r0=0; 1:r0=1; + 0:r0=1; 1:r0=0; + 0:r0=1; 1:r0=1; + No + Witnesses + Positive: 0 Negative: 3 + Condition exists (0:r0=0 /\ 1:r0=0) + Observation SB+fencembonceonces Never 0 3 + Time SB+fencembonceonces 0.01 + Hash=d66d99523e2cac6b06e66f4c995ebb48 + +The "Positive: 0 Negative: 3" and the "Never 0 3" each indicate that +this litmus test's "exists" clause can not be satisfied. + +See "herd7 -help" or "herdtools7/doc/" for more information on running the +tool itself, but please be aware that this documentation is intended for +people who work on the memory model itself, that is, people making changes +to the tools/memory-model/linux-kernel.* files. It is not intended for +people focusing on writing, understanding, and running LKMM litmus tests. + + +===================== +BASIC USAGE: KLITMUS7 +===================== + +The "klitmus7" tool converts a litmus test into a Linux kernel module, +which may then be loaded and run. + +For example, to run SB+fencembonceonces.litmus against hardware: + + $ mkdir mymodules + $ klitmus7 -o mymodules litmus-tests/SB+fencembonceonces.litmus + $ cd mymodules ; make + $ sudo sh run.sh + +The corresponding output includes: + + Test SB+fencembonceonces Allowed + Histogram (3 states) + 644580 :>0:r0=1; 1:r0=0; + 644328 :>0:r0=0; 1:r0=1; + 711092 :>0:r0=1; 1:r0=1; + No + Witnesses + Positive: 0, Negative: 2000000 + Condition exists (0:r0=0 /\ 1:r0=0) is NOT validated + Hash=d66d99523e2cac6b06e66f4c995ebb48 + Observation SB+fencembonceonces Never 0 2000000 + Time SB+fencembonceonces 0.16 + +The "Positive: 0 Negative: 2000000" and the "Never 0 2000000" indicate +that during two million trials, the state specified in this litmus +test's "exists" clause was not reached. + +And, as with "herd7", please see "klitmus7 -help" or "herdtools7/doc/" +for more information. And again, please be aware that this documentation +is intended for people who work on the memory model itself, that is, +people making changes to the tools/memory-model/linux-kernel.* files. +It is not intended for people focusing on writing, understanding, and +running LKMM litmus tests. + + +==================== +DESCRIPTION OF FILES +==================== + +Documentation/cheatsheet.txt + Quick-reference guide to the Linux-kernel memory model. + +Documentation/explanation.txt + Describes the memory model in detail. + +Documentation/litmus-tests.txt + Describes the format, features, capabilities, and limitations + of the litmus tests that LKMM can evaluate. + +Documentation/recipes.txt + Lists common memory-ordering patterns. + +Documentation/references.txt + Provides background reading. + +Documentation/simple.txt + Starting point for someone new to Linux-kernel concurrency. + And also for those needing a reminder of the simpler approaches + to concurrency! + +linux-kernel.bell + Categorizes the relevant instructions, including memory + references, memory barriers, atomic read-modify-write operations, + lock acquisition/release, and RCU operations. + + More formally, this file (1) lists the subtypes of the various + event types used by the memory model and (2) performs RCU + read-side critical section nesting analysis. + +linux-kernel.cat + Specifies what reorderings are forbidden by memory references, + memory barriers, atomic read-modify-write operations, and RCU. + + More formally, this file specifies what executions are forbidden + by the memory model. Allowed executions are those which + satisfy the model's "coherence", "atomic", "happens-before", + "propagation", and "rcu" axioms, which are defined in the file. + +linux-kernel.cfg + Convenience file that gathers the common-case herd7 command-line + arguments. + +linux-kernel.def + Maps from C-like syntax to herd7's internal litmus-test + instruction-set architecture. + +litmus-tests + Directory containing a few representative litmus tests, which + are listed in litmus-tests/README. A great deal more litmus + tests are available at https://github.com/paulmckrcu/litmus. + +lock.cat + Provides a front-end analysis of lock acquisition and release, + for example, associating a lock acquisition with the preceding + and following releases and checking for self-deadlock. + + More formally, this file defines a performance-enhanced scheme + for generation of the possible reads-from and coherence order + relations on the locking primitives. + +README + This file. + +scripts Various scripts, see scripts/README. diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell new file mode 100644 index 000000000..5be86b102 --- /dev/null +++ b/tools/memory-model/linux-kernel.bell @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0+ +(* + * Copyright (C) 2015 Jade Alglave <j.alglave@ucl.ac.uk>, + * Copyright (C) 2016 Luc Maranget <luc.maranget@inria.fr> for Inria + * Copyright (C) 2017 Alan Stern <stern@rowland.harvard.edu>, + * Andrea Parri <parri.andrea@gmail.com> + * + * An earlier version of this file appeared in the companion webpage for + * "Frightening small children and disconcerting grown-ups: Concurrency + * in the Linux kernel" by Alglave, Maranget, McKenney, Parri, and Stern, + * which appeared in ASPLOS 2018. + *) + +"Linux-kernel memory consistency model" + +enum Accesses = 'once (*READ_ONCE,WRITE_ONCE*) || + 'release (*smp_store_release*) || + 'acquire (*smp_load_acquire*) || + 'noreturn (* R of non-return RMW *) +instructions R[{'once,'acquire,'noreturn}] +instructions W[{'once,'release}] +instructions RMW[{'once,'acquire,'release}] + +enum Barriers = 'wmb (*smp_wmb*) || + 'rmb (*smp_rmb*) || + 'mb (*smp_mb*) || + 'barrier (*barrier*) || + 'rcu-lock (*rcu_read_lock*) || + 'rcu-unlock (*rcu_read_unlock*) || + 'sync-rcu (*synchronize_rcu*) || + 'before-atomic (*smp_mb__before_atomic*) || + 'after-atomic (*smp_mb__after_atomic*) || + 'after-spinlock (*smp_mb__after_spinlock*) || + 'after-unlock-lock (*smp_mb__after_unlock_lock*) +instructions F[Barriers] + +(* SRCU *) +enum SRCU = 'srcu-lock || 'srcu-unlock || 'sync-srcu +instructions SRCU[SRCU] +(* All srcu events *) +let Srcu = Srcu-lock | Srcu-unlock | Sync-srcu + +(* Compute matching pairs of nested Rcu-lock and Rcu-unlock *) +let rcu-rscs = let rec + unmatched-locks = Rcu-lock \ domain(matched) + and unmatched-unlocks = Rcu-unlock \ range(matched) + and unmatched = unmatched-locks | unmatched-unlocks + and unmatched-po = [unmatched] ; po ; [unmatched] + and unmatched-locks-to-unlocks = + [unmatched-locks] ; po ; [unmatched-unlocks] + and matched = matched | (unmatched-locks-to-unlocks \ + (unmatched-po ; unmatched-po)) + in matched + +(* Validate nesting *) +flag ~empty Rcu-lock \ domain(rcu-rscs) as unbalanced-rcu-locking +flag ~empty Rcu-unlock \ range(rcu-rscs) as unbalanced-rcu-locking + +(* Compute matching pairs of nested Srcu-lock and Srcu-unlock *) +let srcu-rscs = let rec + unmatched-locks = Srcu-lock \ domain(matched) + and unmatched-unlocks = Srcu-unlock \ range(matched) + and unmatched = unmatched-locks | unmatched-unlocks + and unmatched-po = ([unmatched] ; po ; [unmatched]) & loc + and unmatched-locks-to-unlocks = + ([unmatched-locks] ; po ; [unmatched-unlocks]) & loc + and matched = matched | (unmatched-locks-to-unlocks \ + (unmatched-po ; unmatched-po)) + in matched + +(* Validate nesting *) +flag ~empty Srcu-lock \ domain(srcu-rscs) as unbalanced-srcu-locking +flag ~empty Srcu-unlock \ range(srcu-rscs) as unbalanced-srcu-locking + +(* Check for use of synchronize_srcu() inside an RCU critical section *) +flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep + +(* Validate SRCU dynamic match *) +flag ~empty different-values(srcu-rscs) as srcu-bad-nesting + +(* Compute marked and plain memory accesses *) +let Marked = (~M) | IW | Once | Release | Acquire | domain(rmw) | range(rmw) | + LKR | LKW | UL | LF | RL | RU +let Plain = M \ Marked diff --git a/tools/memory-model/linux-kernel.cat b/tools/memory-model/linux-kernel.cat new file mode 100644 index 000000000..2a9b4fe4a --- /dev/null +++ b/tools/memory-model/linux-kernel.cat @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0+ +(* + * Copyright (C) 2015 Jade Alglave <j.alglave@ucl.ac.uk>, + * Copyright (C) 2016 Luc Maranget <luc.maranget@inria.fr> for Inria + * Copyright (C) 2017 Alan Stern <stern@rowland.harvard.edu>, + * Andrea Parri <parri.andrea@gmail.com> + * + * An earlier version of this file appeared in the companion webpage for + * "Frightening small children and disconcerting grown-ups: Concurrency + * in the Linux kernel" by Alglave, Maranget, McKenney, Parri, and Stern, + * which appeared in ASPLOS 2018. + *) + +"Linux-kernel memory consistency model" + +(* + * File "lock.cat" handles locks and is experimental. + * It can be replaced by include "cos.cat" for tests that do not use locks. + *) + +include "lock.cat" + +(*******************) +(* Basic relations *) +(*******************) + +(* Release Acquire *) +let acq-po = [Acquire] ; po ; [M] +let po-rel = [M] ; po ; [Release] +let po-unlock-rf-lock-po = po ; [UL] ; rf ; [LKR] ; po + +(* Fences *) +let R4rmb = R \ Noreturn (* Reads for which rmb works *) +let rmb = [R4rmb] ; fencerel(Rmb) ; [R4rmb] +let wmb = [W] ; fencerel(Wmb) ; [W] +let mb = ([M] ; fencerel(Mb) ; [M]) | + ([M] ; fencerel(Before-atomic) ; [RMW] ; po? ; [M]) | + ([M] ; po? ; [RMW] ; fencerel(After-atomic) ; [M]) | + ([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M]) | + ([M] ; po ; [UL] ; (co | po) ; [LKW] ; + fencerel(After-unlock-lock) ; [M]) +let gp = po ; [Sync-rcu | Sync-srcu] ; po? +let strong-fence = mb | gp + +let nonrw-fence = strong-fence | po-rel | acq-po +let fence = nonrw-fence | wmb | rmb +let barrier = fencerel(Barrier | Rmb | Wmb | Mb | Sync-rcu | Sync-srcu | + Before-atomic | After-atomic | Acquire | Release | + Rcu-lock | Rcu-unlock | Srcu-lock | Srcu-unlock) | + (po ; [Release]) | ([Acquire] ; po) + +(**********************************) +(* Fundamental coherence ordering *) +(**********************************) + +(* Sequential Consistency Per Variable *) +let com = rf | co | fr +acyclic po-loc | com as coherence + +(* Atomic Read-Modify-Write *) +empty rmw & (fre ; coe) as atomic + +(**********************************) +(* Instruction execution ordering *) +(**********************************) + +(* Preserved Program Order *) +let dep = addr | data +let rwdep = (dep | ctrl) ; [W] +let overwrite = co | fr +let to-w = rwdep | (overwrite & int) | (addr ; [Plain] ; wmb) +let to-r = addr | (dep ; [Marked] ; rfi) +let ppo = to-r | to-w | fence | (po-unlock-rf-lock-po & int) + +(* Propagation: Ordering from release operations and strong fences. *) +let A-cumul(r) = (rfe ; [Marked])? ; r +let cumul-fence = [Marked] ; (A-cumul(strong-fence | po-rel) | wmb | + po-unlock-rf-lock-po) ; [Marked] +let prop = [Marked] ; (overwrite & ext)? ; cumul-fence* ; + [Marked] ; rfe? ; [Marked] + +(* + * Happens Before: Ordering from the passage of time. + * No fences needed here for prop because relation confined to one process. + *) +let hb = [Marked] ; (ppo | rfe | ((prop \ id) & int)) ; [Marked] +acyclic hb as happens-before + +(****************************************) +(* Write and fence propagation ordering *) +(****************************************) + +(* Propagation: Each non-rf link needs a strong fence. *) +let pb = prop ; strong-fence ; hb* ; [Marked] +acyclic pb as propagation + +(*******) +(* RCU *) +(*******) + +(* + * Effects of read-side critical sections proceed from the rcu_read_unlock() + * or srcu_read_unlock() backwards on the one hand, and from the + * rcu_read_lock() or srcu_read_lock() forwards on the other hand. + * + * In the definition of rcu-fence below, the po term at the left-hand side + * of each disjunct and the po? term at the right-hand end have been factored + * out. They have been moved into the definitions of rcu-link and rb. + * This was necessary in order to apply the "& loc" tests correctly. + *) +let rcu-gp = [Sync-rcu] (* Compare with gp *) +let srcu-gp = [Sync-srcu] +let rcu-rscsi = rcu-rscs^-1 +let srcu-rscsi = srcu-rscs^-1 + +(* + * The synchronize_rcu() strong fence is special in that it can order not + * one but two non-rf relations, but only in conjunction with an RCU + * read-side critical section. + *) +let rcu-link = po? ; hb* ; pb* ; prop ; po + +(* + * Any sequence containing at least as many grace periods as RCU read-side + * critical sections (joined by rcu-link) induces order like a generalized + * inter-CPU strong fence. + * Likewise for SRCU grace periods and read-side critical sections, provided + * the synchronize_srcu() and srcu_read_[un]lock() calls refer to the same + * struct srcu_struct location. + *) +let rec rcu-order = rcu-gp | srcu-gp | + (rcu-gp ; rcu-link ; rcu-rscsi) | + ((srcu-gp ; rcu-link ; srcu-rscsi) & loc) | + (rcu-rscsi ; rcu-link ; rcu-gp) | + ((srcu-rscsi ; rcu-link ; srcu-gp) & loc) | + (rcu-gp ; rcu-link ; rcu-order ; rcu-link ; rcu-rscsi) | + ((srcu-gp ; rcu-link ; rcu-order ; rcu-link ; srcu-rscsi) & loc) | + (rcu-rscsi ; rcu-link ; rcu-order ; rcu-link ; rcu-gp) | + ((srcu-rscsi ; rcu-link ; rcu-order ; rcu-link ; srcu-gp) & loc) | + (rcu-order ; rcu-link ; rcu-order) +let rcu-fence = po ; rcu-order ; po? +let fence = fence | rcu-fence +let strong-fence = strong-fence | rcu-fence + +(* rb orders instructions just as pb does *) +let rb = prop ; rcu-fence ; hb* ; pb* ; [Marked] + +irreflexive rb as rcu + +(* + * The happens-before, propagation, and rcu constraints are all + * expressions of temporal ordering. They could be replaced by + * a single constraint on an "executes-before" relation, xb: + * + * let xb = hb | pb | rb + * acyclic xb as executes-before + *) + +(*********************************) +(* Plain accesses and data races *) +(*********************************) + +(* Warn about plain writes and marked accesses in the same region *) +let mixed-accesses = ([Plain & W] ; (po-loc \ barrier) ; [Marked]) | + ([Marked] ; (po-loc \ barrier) ; [Plain & W]) +flag ~empty mixed-accesses as mixed-accesses + +(* Executes-before and visibility *) +let xbstar = (hb | pb | rb)* +let vis = cumul-fence* ; rfe? ; [Marked] ; + ((strong-fence ; [Marked] ; xbstar) | (xbstar & int)) + +(* Boundaries for lifetimes of plain accesses *) +let w-pre-bounded = [Marked] ; (addr | fence)? +let r-pre-bounded = [Marked] ; (addr | nonrw-fence | + ([R4rmb] ; fencerel(Rmb) ; [~Noreturn]))? +let w-post-bounded = fence? ; [Marked] +let r-post-bounded = (nonrw-fence | ([~Noreturn] ; fencerel(Rmb) ; [R4rmb]))? ; + [Marked] + +(* Visibility and executes-before for plain accesses *) +let ww-vis = fence | (strong-fence ; xbstar ; w-pre-bounded) | + (w-post-bounded ; vis ; w-pre-bounded) +let wr-vis = fence | (strong-fence ; xbstar ; r-pre-bounded) | + (w-post-bounded ; vis ; r-pre-bounded) +let rw-xbstar = fence | (r-post-bounded ; xbstar ; w-pre-bounded) + +(* Potential races *) +let pre-race = ext & ((Plain * M) | ((M \ IW) * Plain)) + +(* Coherence requirements for plain accesses *) +let wr-incoh = pre-race & rf & rw-xbstar^-1 +let rw-incoh = pre-race & fr & wr-vis^-1 +let ww-incoh = pre-race & co & ww-vis^-1 +empty (wr-incoh | rw-incoh | ww-incoh) as plain-coherence + +(* Actual races *) +let ww-nonrace = ww-vis & ((Marked * W) | rw-xbstar) & ((W * Marked) | wr-vis) +let ww-race = (pre-race & co) \ ww-nonrace +let wr-race = (pre-race & (co? ; rf)) \ wr-vis \ rw-xbstar^-1 +let rw-race = (pre-race & fr) \ rw-xbstar + +flag ~empty (ww-race | wr-race | rw-race) as data-race diff --git a/tools/memory-model/linux-kernel.cfg b/tools/memory-model/linux-kernel.cfg new file mode 100644 index 000000000..3c8098e99 --- /dev/null +++ b/tools/memory-model/linux-kernel.cfg @@ -0,0 +1,21 @@ +macros linux-kernel.def +bell linux-kernel.bell +model linux-kernel.cat +graph columns +squished true +showevents noregs +movelabel true +fontsize 8 +xscale 2.0 +yscale 1.5 +arrowsize 0.8 +showinitrf false +showfinalrf false +showinitwrites false +splines spline +pad 0.1 +edgeattr hb,color,indigo +edgeattr co,color,blue +edgeattr mb,color,darkgreen +edgeattr wmb,color,darkgreen +edgeattr rmb,color,darkgreen diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def new file mode 100644 index 000000000..ef0f3c185 --- /dev/null +++ b/tools/memory-model/linux-kernel.def @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// An earlier version of this file appeared in the companion webpage for +// "Frightening small children and disconcerting grown-ups: Concurrency +// in the Linux kernel" by Alglave, Maranget, McKenney, Parri, and Stern, +// which appeared in ASPLOS 2018. + +// ONCE +READ_ONCE(X) __load{once}(X) +WRITE_ONCE(X,V) { __store{once}(X,V); } + +// Release Acquire and friends +smp_store_release(X,V) { __store{release}(*X,V); } +smp_load_acquire(X) __load{acquire}(*X) +rcu_assign_pointer(X,V) { __store{release}(X,V); } +rcu_dereference(X) __load{once}(X) +smp_store_mb(X,V) { __store{once}(X,V); __fence{mb}; } + +// Fences +smp_mb() { __fence{mb}; } +smp_rmb() { __fence{rmb}; } +smp_wmb() { __fence{wmb}; } +smp_mb__before_atomic() { __fence{before-atomic}; } +smp_mb__after_atomic() { __fence{after-atomic}; } +smp_mb__after_spinlock() { __fence{after-spinlock}; } +smp_mb__after_unlock_lock() { __fence{after-unlock-lock}; } +barrier() { __fence{barrier}; } + +// Exchange +xchg(X,V) __xchg{mb}(X,V) +xchg_relaxed(X,V) __xchg{once}(X,V) +xchg_release(X,V) __xchg{release}(X,V) +xchg_acquire(X,V) __xchg{acquire}(X,V) +cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) +cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) +cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) +cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W) + +// Spinlocks +spin_lock(X) { __lock(X); } +spin_unlock(X) { __unlock(X); } +spin_trylock(X) __trylock(X) +spin_is_locked(X) __islocked(X) + +// RCU +rcu_read_lock() { __fence{rcu-lock}; } +rcu_read_unlock() { __fence{rcu-unlock}; } +synchronize_rcu() { __fence{sync-rcu}; } +synchronize_rcu_expedited() { __fence{sync-rcu}; } + +// SRCU +srcu_read_lock(X) __srcu{srcu-lock}(X) +srcu_read_unlock(X,Y) { __srcu{srcu-unlock}(X,Y); } +synchronize_srcu(X) { __srcu{sync-srcu}(X); } +synchronize_srcu_expedited(X) { __srcu{sync-srcu}(X); } + +// Atomic +atomic_read(X) READ_ONCE(*X) +atomic_set(X,V) { WRITE_ONCE(*X,V); } +atomic_read_acquire(X) smp_load_acquire(X) +atomic_set_release(X,V) { smp_store_release(X,V); } + +atomic_add(V,X) { __atomic_op(X,+,V); } +atomic_sub(V,X) { __atomic_op(X,-,V); } +atomic_inc(X) { __atomic_op(X,+,1); } +atomic_dec(X) { __atomic_op(X,-,1); } + +atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V) +atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V) +atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V) +atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V) +atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V) +atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V) +atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V) +atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V) + +atomic_inc_return(X) __atomic_op_return{mb}(X,+,1) +atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1) +atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1) +atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1) +atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1) +atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1) +atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1) +atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1) + +atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V) +atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V) +atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V) +atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V) +atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V) +atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V) +atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V) +atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V) + +atomic_dec_return(X) __atomic_op_return{mb}(X,-,1) +atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1) +atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1) +atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1) +atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1) +atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1) +atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1) +atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1) + +atomic_xchg(X,V) __xchg{mb}(X,V) +atomic_xchg_relaxed(X,V) __xchg{once}(X,V) +atomic_xchg_release(X,V) __xchg{release}(X,V) +atomic_xchg_acquire(X,V) __xchg{acquire}(X,V) +atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) +atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) +atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) +atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W) + +atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) == 0 +atomic_dec_and_test(X) __atomic_op_return{mb}(X,-,1) == 0 +atomic_inc_and_test(X) __atomic_op_return{mb}(X,+,1) == 0 +atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0 diff --git a/tools/memory-model/litmus-tests/.gitignore b/tools/memory-model/litmus-tests/.gitignore new file mode 100644 index 000000000..c492a1dda --- /dev/null +++ b/tools/memory-model/litmus-tests/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +*.litmus.out diff --git a/tools/memory-model/litmus-tests/CoRR+poonceonce+Once.litmus b/tools/memory-model/litmus-tests/CoRR+poonceonce+Once.litmus new file mode 100644 index 000000000..967f9f2a6 --- /dev/null +++ b/tools/memory-model/litmus-tests/CoRR+poonceonce+Once.litmus @@ -0,0 +1,26 @@ +C CoRR+poonceonce+Once + +(* + * Result: Never + * + * Test of read-read coherence, that is, whether or not two successive + * reads from the same variable are ordered. + *) + +{} + +P0(int *x) +{ + WRITE_ONCE(*x, 1); +} + +P1(int *x) +{ + int r0; + int r1; + + r0 = READ_ONCE(*x); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 1:r1=0) diff --git a/tools/memory-model/litmus-tests/CoRW+poonceonce+Once.litmus b/tools/memory-model/litmus-tests/CoRW+poonceonce+Once.litmus new file mode 100644 index 000000000..4635739f3 --- /dev/null +++ b/tools/memory-model/litmus-tests/CoRW+poonceonce+Once.litmus @@ -0,0 +1,25 @@ +C CoRW+poonceonce+Once + +(* + * Result: Never + * + * Test of read-write coherence, that is, whether or not a read from + * a given variable and a later write to that same variable are ordered. + *) + +{} + +P0(int *x) +{ + int r0; + + r0 = READ_ONCE(*x); + WRITE_ONCE(*x, 1); +} + +P1(int *x) +{ + WRITE_ONCE(*x, 2); +} + +exists (x=2 /\ 0:r0=2) diff --git a/tools/memory-model/litmus-tests/CoWR+poonceonce+Once.litmus b/tools/memory-model/litmus-tests/CoWR+poonceonce+Once.litmus new file mode 100644 index 000000000..bb068c92d --- /dev/null +++ b/tools/memory-model/litmus-tests/CoWR+poonceonce+Once.litmus @@ -0,0 +1,25 @@ +C CoWR+poonceonce+Once + +(* + * Result: Never + * + * Test of write-read coherence, that is, whether or not a write to a + * given variable and a later read from that same variable are ordered. + *) + +{} + +P0(int *x) +{ + int r0; + + WRITE_ONCE(*x, 1); + r0 = READ_ONCE(*x); +} + +P1(int *x) +{ + WRITE_ONCE(*x, 2); +} + +exists (x=1 /\ 0:r0=2) diff --git a/tools/memory-model/litmus-tests/CoWW+poonceonce.litmus b/tools/memory-model/litmus-tests/CoWW+poonceonce.litmus new file mode 100644 index 000000000..0d9f0a958 --- /dev/null +++ b/tools/memory-model/litmus-tests/CoWW+poonceonce.litmus @@ -0,0 +1,18 @@ +C CoWW+poonceonce + +(* + * Result: Never + * + * Test of write-write coherence, that is, whether or not two successive + * writes to the same variable are ordered. + *) + +{} + +P0(int *x) +{ + WRITE_ONCE(*x, 1); + WRITE_ONCE(*x, 2); +} + +exists (x=1) diff --git a/tools/memory-model/litmus-tests/IRIW+fencembonceonces+OnceOnce.litmus b/tools/memory-model/litmus-tests/IRIW+fencembonceonces+OnceOnce.litmus new file mode 100644 index 000000000..e729d2776 --- /dev/null +++ b/tools/memory-model/litmus-tests/IRIW+fencembonceonces+OnceOnce.litmus @@ -0,0 +1,45 @@ +C IRIW+fencembonceonces+OnceOnce + +(* + * Result: Never + * + * Test of independent reads from independent writes with smp_mb() + * between each pairs of reads. In other words, is smp_mb() sufficient to + * cause two different reading processes to agree on the order of a pair + * of writes, where each write is to a different variable by a different + * process? This litmus test exercises LKMM's "propagation" rule. + *) + +{} + +P0(int *x) +{ + WRITE_ONCE(*x, 1); +} + +P1(int *x, int *y) +{ + int r0; + int r1; + + r0 = READ_ONCE(*x); + smp_mb(); + r1 = READ_ONCE(*y); +} + +P2(int *y) +{ + WRITE_ONCE(*y, 1); +} + +P3(int *x, int *y) +{ + int r0; + int r1; + + r0 = READ_ONCE(*y); + smp_mb(); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 1:r1=0 /\ 3:r0=1 /\ 3:r1=0) diff --git a/tools/memory-model/litmus-tests/IRIW+poonceonces+OnceOnce.litmus b/tools/memory-model/litmus-tests/IRIW+poonceonces+OnceOnce.litmus new file mode 100644 index 000000000..4b54dd6a6 --- /dev/null +++ b/tools/memory-model/litmus-tests/IRIW+poonceonces+OnceOnce.litmus @@ -0,0 +1,43 @@ +C IRIW+poonceonces+OnceOnce + +(* + * Result: Sometimes + * + * Test of independent reads from independent writes with nothing + * between each pairs of reads. In other words, is anything at all + * needed to cause two different reading processes to agree on the order + * of a pair of writes, where each write is to a different variable by a + * different process? + *) + +{} + +P0(int *x) +{ + WRITE_ONCE(*x, 1); +} + +P1(int *x, int *y) +{ + int r0; + int r1; + + r0 = READ_ONCE(*x); + r1 = READ_ONCE(*y); +} + +P2(int *y) +{ + WRITE_ONCE(*y, 1); +} + +P3(int *x, int *y) +{ + int r0; + int r1; + + r0 = READ_ONCE(*y); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 1:r1=0 /\ 3:r0=1 /\ 3:r1=0) diff --git a/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus b/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus new file mode 100644 index 000000000..094d58df7 --- /dev/null +++ b/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus @@ -0,0 +1,40 @@ +C ISA2+pooncelock+pooncelock+pombonce + +(* + * Result: Never + * + * This test shows that write-write ordering provided by locks + * (in P0() and P1()) is visible to external process P2(). + *) + +{} + +P0(int *x, int *y, spinlock_t *mylock) +{ + spin_lock(mylock); + WRITE_ONCE(*x, 1); + WRITE_ONCE(*y, 1); + spin_unlock(mylock); +} + +P1(int *y, int *z, spinlock_t *mylock) +{ + int r0; + + spin_lock(mylock); + r0 = READ_ONCE(*y); + WRITE_ONCE(*z, 1); + spin_unlock(mylock); +} + +P2(int *x, int *z) +{ + int r1; + int r2; + + r2 = READ_ONCE(*z); + smp_mb(); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 2:r2=1 /\ 2:r1=0) diff --git a/tools/memory-model/litmus-tests/ISA2+poonceonces.litmus b/tools/memory-model/litmus-tests/ISA2+poonceonces.litmus new file mode 100644 index 000000000..b321aa6f4 --- /dev/null +++ b/tools/memory-model/litmus-tests/ISA2+poonceonces.litmus @@ -0,0 +1,37 @@ +C ISA2+poonceonces + +(* + * Result: Sometimes + * + * Given a release-acquire chain ordering the first process's store + * against the last process's load, is ordering preserved if all of the + * smp_store_release() invocations are replaced by WRITE_ONCE() and all + * of the smp_load_acquire() invocations are replaced by READ_ONCE()? + *) + +{} + +P0(int *x, int *y) +{ + WRITE_ONCE(*x, 1); + WRITE_ONCE(*y, 1); +} + +P1(int *y, int *z) +{ + int r0; + + r0 = READ_ONCE(*y); + WRITE_ONCE(*z, 1); +} + +P2(int *x, int *z) +{ + int r0; + int r1; + + r0 = READ_ONCE(*z); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 2:r0=1 /\ 2:r1=0) diff --git a/tools/memory-model/litmus-tests/ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus b/tools/memory-model/litmus-tests/ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus new file mode 100644 index 000000000..025b0462e --- /dev/null +++ b/tools/memory-model/litmus-tests/ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus @@ -0,0 +1,39 @@ +C ISA2+pooncerelease+poacquirerelease+poacquireonce + +(* + * Result: Never + * + * This litmus test demonstrates that a release-acquire chain suffices + * to order P0()'s initial write against P2()'s final read. The reason + * that the release-acquire chain suffices is because in all but one + * case (P2() to P0()), each process reads from the preceding process's + * write. In memory-model-speak, there is only one non-reads-from + * (AKA non-rf) link, so release-acquire is all that is needed. + *) + +{} + +P0(int *x, int *y) +{ + WRITE_ONCE(*x, 1); + smp_store_release(y, 1); +} + +P1(int *y, int *z) +{ + int r0; + + r0 = smp_load_acquire(y); + smp_store_release(z, 1); +} + +P2(int *x, int *z) +{ + int r0; + int r1; + + r0 = smp_load_acquire(z); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 2:r0=1 /\ 2:r1=0) diff --git a/tools/memory-model/litmus-tests/LB+fencembonceonce+ctrlonceonce.litmus b/tools/memory-model/litmus-tests/LB+fencembonceonce+ctrlonceonce.litmus new file mode 100644 index 000000000..4727f5aaf --- /dev/null +++ b/tools/memory-model/litmus-tests/LB+fencembonceonce+ctrlonceonce.litmus @@ -0,0 +1,34 @@ +C LB+fencembonceonce+ctrlonceonce + +(* + * Result: Never + * + * This litmus test demonstrates that lightweight ordering suffices for + * the load-buffering pattern, in other words, preventing all processes + * reading from the preceding process's write. In this example, the + * combination of a control dependency and a full memory barrier are enough + * to do the trick. (But the full memory barrier could be replaced with + * another control dependency and order would still be maintained.) + *) + +{} + +P0(int *x, int *y) +{ + int r0; + + r0 = READ_ONCE(*x); + if (r0) + WRITE_ONCE(*y, 1); +} + +P1(int *x, int *y) +{ + int r0; + + r0 = READ_ONCE(*y); + smp_mb(); + WRITE_ONCE(*x, 1); +} + +exists (0:r0=1 /\ 1:r0=1) diff --git a/tools/memory-model/litmus-tests/LB+poacquireonce+pooncerelease.litmus b/tools/memory-model/litmus-tests/LB+poacquireonce+pooncerelease.litmus new file mode 100644 index 000000000..07b9904b0 --- /dev/null +++ b/tools/memory-model/litmus-tests/LB+poacquireonce+pooncerelease.litmus @@ -0,0 +1,29 @@ +C LB+poacquireonce+pooncerelease + +(* + * Result: Never + * + * Does a release-acquire pair suffice for the load-buffering litmus + * test, where each process reads from one of two variables then writes + * to the other? + *) + +{} + +P0(int *x, int *y) +{ + int r0; + + r0 = READ_ONCE(*x); + smp_store_release(y, 1); +} + +P1(int *x, int *y) +{ + int r0; + + r0 = smp_load_acquire(y); + WRITE_ONCE(*x, 1); +} + +exists (0:r0=1 /\ 1:r0=1) diff --git a/tools/memory-model/litmus-tests/LB+poonceonces.litmus b/tools/memory-model/litmus-tests/LB+poonceonces.litmus new file mode 100644 index 000000000..74c49cb3c --- /dev/null +++ b/tools/memory-model/litmus-tests/LB+poonceonces.litmus @@ -0,0 +1,28 @@ +C LB+poonceonces + +(* + * Result: Sometimes + * + * Can the counter-intuitive outcome for the load-buffering pattern + * be prevented even with no explicit ordering? + *) + +{} + +P0(int *x, int *y) +{ + int r0; + + r0 = READ_ONCE(*x); + WRITE_ONCE(*y, 1); +} + +P1(int *x, int *y) +{ + int r0; + + r0 = READ_ONCE(*y); + WRITE_ONCE(*x, 1); +} + +exists (0:r0=1 /\ 1:r0=1) diff --git a/tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus b/tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus new file mode 100644 index 000000000..a273da9fa --- /dev/null +++ b/tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus @@ -0,0 +1,30 @@ +C MP+fencewmbonceonce+fencermbonceonce + +(* + * Result: Never + * + * This litmus test demonstrates that smp_wmb() and smp_rmb() provide + * sufficient ordering for the message-passing pattern. However, it + * is usually better to use smp_store_release() and smp_load_acquire(). + *) + +{} + +P0(int *x, int *y) +{ + WRITE_ONCE(*x, 1); + smp_wmb(); + WRITE_ONCE(*y, 1); +} + +P1(int *x, int *y) +{ + int r0; + int r1; + + r0 = READ_ONCE(*y); + smp_rmb(); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 1:r1=0) diff --git a/tools/memory-model/litmus-tests/MP+onceassign+derefonce.litmus b/tools/memory-model/litmus-tests/MP+onceassign+derefonce.litmus new file mode 100644 index 000000000..97731b4bb --- /dev/null +++ b/tools/memory-model/litmus-tests/MP+onceassign+derefonce.litmus @@ -0,0 +1,34 @@ +C MP+onceassign+derefonce + +(* + * Result: Never + * + * This litmus test demonstrates that rcu_assign_pointer() and + * rcu_dereference() suffice to ensure that an RCU reader will not see + * pre-initialization garbage when it traverses an RCU-protected data + * structure containing a newly inserted element. + *) + +{ +y=z; +z=0; +} + +P0(int *x, int **y) +{ + WRITE_ONCE(*x, 1); + rcu_assign_pointer(*y, x); +} + +P1(int *x, int **y) +{ + int *r0; + int r1; + + rcu_read_lock(); + r0 = rcu_dereference(*y); + r1 = READ_ONCE(*r0); + rcu_read_unlock(); +} + +exists (1:r0=x /\ 1:r1=0) diff --git a/tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus b/tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus new file mode 100644 index 000000000..50f4d62bb --- /dev/null +++ b/tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus @@ -0,0 +1,35 @@ +C MP+polockmbonce+poacquiresilsil + +(* + * Result: Never + * + * Do spinlocks combined with smp_mb__after_spinlock() provide order + * to outside observers using spin_is_locked() to sense the lock-held + * state, ordered by acquire? Note that when the first spin_is_locked() + * returns false and the second true, we know that the smp_load_acquire() + * executed before the lock was acquired (loosely speaking). + *) + +{ +} + +P0(spinlock_t *lo, int *x) +{ + spin_lock(lo); + smp_mb__after_spinlock(); + WRITE_ONCE(*x, 1); + spin_unlock(lo); +} + +P1(spinlock_t *lo, int *x) +{ + int r1; + int r2; + int r3; + + r1 = smp_load_acquire(x); + r2 = spin_is_locked(lo); + r3 = spin_is_locked(lo); +} + +exists (1:r1=1 /\ 1:r2=0 /\ 1:r3=1) diff --git a/tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus b/tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus new file mode 100644 index 000000000..abf81e7a0 --- /dev/null +++ b/tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus @@ -0,0 +1,34 @@ +C MP+polockonce+poacquiresilsil + +(* + * Result: Sometimes + * + * Do spinlocks provide order to outside observers using spin_is_locked() + * to sense the lock-held state, ordered by acquire? Note that when the + * first spin_is_locked() returns false and the second true, we know that + * the smp_load_acquire() executed before the lock was acquired (loosely + * speaking). + *) + +{ +} + +P0(spinlock_t *lo, int *x) +{ + spin_lock(lo); + WRITE_ONCE(*x, 1); + spin_unlock(lo); +} + +P1(spinlock_t *lo, int *x) +{ + int r1; + int r2; + int r3; + + r1 = smp_load_acquire(x); + r2 = spin_is_locked(lo); + r3 = spin_is_locked(lo); +} + +exists (1:r1=1 /\ 1:r2=0 /\ 1:r3=1) diff --git a/tools/memory-model/litmus-tests/MP+polocks.litmus b/tools/memory-model/litmus-tests/MP+polocks.litmus new file mode 100644 index 000000000..712a4fcdf --- /dev/null +++ b/tools/memory-model/litmus-tests/MP+polocks.litmus @@ -0,0 +1,35 @@ +C MP+polocks + +(* + * Result: Never + * + * This litmus test demonstrates how lock acquisitions and releases can + * stand in for smp_load_acquire() and smp_store_release(), respectively. + * In other words, when holding a given lock (or indeed after releasing a + * given lock), a CPU is not only guaranteed to see the accesses that other + * CPUs made while previously holding that lock, it is also guaranteed + * to see all prior accesses by those other CPUs. + *) + +{} + +P0(int *x, int *y, spinlock_t *mylock) +{ + WRITE_ONCE(*x, 1); + spin_lock(mylock); + WRITE_ONCE(*y, 1); + spin_unlock(mylock); +} + +P1(int *x, int *y, spinlock_t *mylock) +{ + int r0; + int r1; + + spin_lock(mylock); + r0 = READ_ONCE(*y); + spin_unlock(mylock); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 1:r1=0) diff --git a/tools/memory-model/litmus-tests/MP+poonceonces.litmus b/tools/memory-model/litmus-tests/MP+poonceonces.litmus new file mode 100644 index 000000000..172f01453 --- /dev/null +++ b/tools/memory-model/litmus-tests/MP+poonceonces.litmus @@ -0,0 +1,27 @@ +C MP+poonceonces + +(* + * Result: Sometimes + * + * Can the counter-intuitive message-passing outcome be prevented with + * no ordering at all? + *) + +{} + +P0(int *x, int *y) +{ + WRITE_ONCE(*x, 1); + WRITE_ONCE(*y, 1); +} + +P1(int *x, int *y) +{ + int r0; + int r1; + + r0 = READ_ONCE(*y); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 1:r1=0) diff --git a/tools/memory-model/litmus-tests/MP+pooncerelease+poacquireonce.litmus b/tools/memory-model/litmus-tests/MP+pooncerelease+poacquireonce.litmus new file mode 100644 index 000000000..d52c68429 --- /dev/null +++ b/tools/memory-model/litmus-tests/MP+pooncerelease+poacquireonce.litmus @@ -0,0 +1,28 @@ +C MP+pooncerelease+poacquireonce + +(* + * Result: Never + * + * This litmus test demonstrates that smp_store_release() and + * smp_load_acquire() provide sufficient ordering for the message-passing + * pattern. + *) + +{} + +P0(int *x, int *y) +{ + WRITE_ONCE(*x, 1); + smp_store_release(y, 1); +} + +P1(int *x, int *y) +{ + int r0; + int r1; + + r0 = smp_load_acquire(y); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 1:r1=0) diff --git a/tools/memory-model/litmus-tests/MP+porevlocks.litmus b/tools/memory-model/litmus-tests/MP+porevlocks.litmus new file mode 100644 index 000000000..72c9276b3 --- /dev/null +++ b/tools/memory-model/litmus-tests/MP+porevlocks.litmus @@ -0,0 +1,35 @@ +C MP+porevlocks + +(* + * Result: Never + * + * This litmus test demonstrates how lock acquisitions and releases can + * stand in for smp_load_acquire() and smp_store_release(), respectively. + * In other words, when holding a given lock (or indeed after releasing a + * given lock), a CPU is not only guaranteed to see the accesses that other + * CPUs made while previously holding that lock, it is also guaranteed to + * see all prior accesses by those other CPUs. + *) + +{} + +P0(int *x, int *y, spinlock_t *mylock) +{ + int r0; + int r1; + + r0 = READ_ONCE(*y); + spin_lock(mylock); + r1 = READ_ONCE(*x); + spin_unlock(mylock); +} + +P1(int *x, int *y, spinlock_t *mylock) +{ + spin_lock(mylock); + WRITE_ONCE(*x, 1); + spin_unlock(mylock); + WRITE_ONCE(*y, 1); +} + +exists (0:r0=1 /\ 0:r1=0) diff --git a/tools/memory-model/litmus-tests/R+fencembonceonces.litmus b/tools/memory-model/litmus-tests/R+fencembonceonces.litmus new file mode 100644 index 000000000..222a0b850 --- /dev/null +++ b/tools/memory-model/litmus-tests/R+fencembonceonces.litmus @@ -0,0 +1,30 @@ +C R+fencembonceonces + +(* + * Result: Never + * + * This is the fully ordered (via smp_mb()) version of one of the classic + * counterintuitive litmus tests that illustrates the effects of store + * propagation delays. Note that weakening either of the barriers would + * cause the resulting test to be allowed. + *) + +{} + +P0(int *x, int *y) +{ + WRITE_ONCE(*x, 1); + smp_mb(); + WRITE_ONCE(*y, 1); +} + +P1(int *x, int *y) +{ + int r0; + + WRITE_ONCE(*y, 2); + smp_mb(); + r0 = READ_ONCE(*x); +} + +exists (y=2 /\ 1:r0=0) diff --git a/tools/memory-model/litmus-tests/R+poonceonces.litmus b/tools/memory-model/litmus-tests/R+poonceonces.litmus new file mode 100644 index 000000000..5386f128a --- /dev/null +++ b/tools/memory-model/litmus-tests/R+poonceonces.litmus @@ -0,0 +1,27 @@ +C R+poonceonces + +(* + * Result: Sometimes + * + * This is the unordered (thus lacking smp_mb()) version of one of the + * classic counterintuitive litmus tests that illustrates the effects of + * store propagation delays. + *) + +{} + +P0(int *x, int *y) +{ + WRITE_ONCE(*x, 1); + WRITE_ONCE(*y, 1); +} + +P1(int *x, int *y) +{ + int r0; + + WRITE_ONCE(*y, 2); + r0 = READ_ONCE(*x); +} + +exists (y=2 /\ 1:r0=0) diff --git a/tools/memory-model/litmus-tests/README b/tools/memory-model/litmus-tests/README new file mode 100644 index 000000000..681f9067f --- /dev/null +++ b/tools/memory-model/litmus-tests/README @@ -0,0 +1,253 @@ +============ +LITMUS TESTS +============ + +CoRR+poonceonce+Once.litmus + Test of read-read coherence, that is, whether or not two + successive reads from the same variable are ordered. + +CoRW+poonceonce+Once.litmus + Test of read-write coherence, that is, whether or not a read + from a given variable followed by a write to that same variable + are ordered. + +CoWR+poonceonce+Once.litmus + Test of write-read coherence, that is, whether or not a write + to a given variable followed by a read from that same variable + are ordered. + +CoWW+poonceonce.litmus + Test of write-write coherence, that is, whether or not two + successive writes to the same variable are ordered. + +IRIW+fencembonceonces+OnceOnce.litmus + Test of independent reads from independent writes with smp_mb() + between each pairs of reads. In other words, is smp_mb() + sufficient to cause two different reading processes to agree on + the order of a pair of writes, where each write is to a different + variable by a different process? This litmus test is forbidden + by LKMM's propagation rule. + +IRIW+poonceonces+OnceOnce.litmus + Test of independent reads from independent writes with nothing + between each pairs of reads. In other words, is anything at all + needed to cause two different reading processes to agree on the + order of a pair of writes, where each write is to a different + variable by a different process? + +ISA2+pooncelock+pooncelock+pombonce.litmus + Tests whether the ordering provided by a lock-protected S + litmus test is visible to an external process whose accesses are + separated by smp_mb(). This addition of an external process to + S is otherwise known as ISA2. + +ISA2+poonceonces.litmus + As below, but with store-release replaced with WRITE_ONCE() + and load-acquire replaced with READ_ONCE(). + +ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus + Can a release-acquire chain order a prior store against + a later load? + +LB+fencembonceonce+ctrlonceonce.litmus + Does a control dependency and an smp_mb() suffice for the + load-buffering litmus test, where each process reads from one + of two variables then writes to the other? + +LB+poacquireonce+pooncerelease.litmus + Does a release-acquire pair suffice for the load-buffering + litmus test, where each process reads from one of two variables then + writes to the other? + +LB+poonceonces.litmus + As above, but with store-release replaced with WRITE_ONCE() + and load-acquire replaced with READ_ONCE(). + +MP+onceassign+derefonce.litmus + As below, but with rcu_assign_pointer() and an rcu_dereference(). + +MP+polockmbonce+poacquiresilsil.litmus + Protect the access with a lock and an smp_mb__after_spinlock() + in one process, and use an acquire load followed by a pair of + spin_is_locked() calls in the other process. + +MP+polockonce+poacquiresilsil.litmus + Protect the access with a lock in one process, and use an + acquire load followed by a pair of spin_is_locked() calls + in the other process. + +MP+polocks.litmus + As below, but with the second access of the writer process + and the first access of reader process protected by a lock. + +MP+poonceonces.litmus + As below, but without the smp_rmb() and smp_wmb(). + +MP+pooncerelease+poacquireonce.litmus + As below, but with a release-acquire chain. + +MP+porevlocks.litmus + As below, but with the first access of the writer process + and the second access of reader process protected by a lock. + +MP+fencewmbonceonce+fencermbonceonce.litmus + Does a smp_wmb() (between the stores) and an smp_rmb() (between + the loads) suffice for the message-passing litmus test, where one + process writes data and then a flag, and the other process reads + the flag and then the data. (This is similar to the ISA2 tests, + but with two processes instead of three.) + +R+fencembonceonces.litmus + This is the fully ordered (via smp_mb()) version of one of + the classic counterintuitive litmus tests that illustrates the + effects of store propagation delays. + +R+poonceonces.litmus + As above, but without the smp_mb() invocations. + +SB+fencembonceonces.litmus + This is the fully ordered (again, via smp_mb() version of store + buffering, which forms the core of Dekker's mutual-exclusion + algorithm. + +SB+poonceonces.litmus + As above, but without the smp_mb() invocations. + +SB+rfionceonce-poonceonces.litmus + This litmus test demonstrates that LKMM is not fully multicopy + atomic. (Neither is it other multicopy atomic.) This litmus test + also demonstrates the "locations" debugging aid, which designates + additional registers and locations to be printed out in the dump + of final states in the herd7 output. Without the "locations" + statement, only those registers and locations mentioned in the + "exists" clause will be printed. + +S+poonceonces.litmus + As below, but without the smp_wmb() and acquire load. + +S+fencewmbonceonce+poacquireonce.litmus + Can a smp_wmb(), instead of a release, and an acquire order + a prior store against a subsequent store? + +WRC+poonceonces+Once.litmus +WRC+pooncerelease+fencermbonceonce+Once.litmus + These two are members of an extension of the MP litmus-test + class in which the first write is moved to a separate process. + The second is forbidden because smp_store_release() is + A-cumulative in LKMM. + +Z6.0+pooncelock+pooncelock+pombonce.litmus + Is the ordering provided by a spin_unlock() and a subsequent + spin_lock() sufficient to make ordering apparent to accesses + by a process not holding the lock? + +Z6.0+pooncelock+poonceLock+pombonce.litmus + As above, but with smp_mb__after_spinlock() immediately + following the spin_lock(). + +Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus + Is the ordering provided by a release-acquire chain sufficient + to make ordering apparent to accesses by a process that does + not participate in that release-acquire chain? + +A great many more litmus tests are available here: + + https://github.com/paulmckrcu/litmus + +================== +LITMUS TEST NAMING +================== + +Litmus tests are usually named based on their contents, which means that +looking at the name tells you what the litmus test does. The naming +scheme covers litmus tests having a single cycle that passes through +each process exactly once, so litmus tests not fitting this description +are named on an ad-hoc basis. + +The structure of a litmus-test name is the litmus-test class, a plus +sign ("+"), and one string for each process, separated by plus signs. +The end of the name is ".litmus". + +The litmus-test classes may be found in the infamous test6.pdf: +https://www.cl.cam.ac.uk/~pes20/ppc-supplemental/test6.pdf +Each class defines the pattern of accesses and of the variables accessed. +For example, if the one process writes to a pair of variables, and +the other process reads from these same variables, the corresponding +litmus-test class is "MP" (message passing), which may be found on the +left-hand end of the second row of tests on page one of test6.pdf. + +The strings used to identify the actions carried out by each process are +complex due to a desire to have short(er) names. Thus, there is a tool to +generate these strings from a given litmus test's actions. For example, +consider the processes from SB+rfionceonce-poonceonces.litmus: + + P0(int *x, int *y) + { + int r1; + int r2; + + WRITE_ONCE(*x, 1); + r1 = READ_ONCE(*x); + r2 = READ_ONCE(*y); + } + + P1(int *x, int *y) + { + int r3; + int r4; + + WRITE_ONCE(*y, 1); + r3 = READ_ONCE(*y); + r4 = READ_ONCE(*x); + } + +The next step is to construct a space-separated list of descriptors, +interleaving descriptions of the relation between a pair of consecutive +accesses with descriptions of the second access in the pair. + +P0()'s WRITE_ONCE() is read by its first READ_ONCE(), which is a +reads-from link (rf) and internal to the P0() process. This is +"rfi", which is an abbreviation for "reads-from internal". Because +some of the tools string these abbreviations together with space +characters separating processes, the first character is capitalized, +resulting in "Rfi". + +P0()'s second access is a READ_ONCE(), as opposed to (for example) +smp_load_acquire(), so next is "Once". Thus far, we have "Rfi Once". + +P0()'s third access is also a READ_ONCE(), but to y rather than x. +This is related to P0()'s second access by program order ("po"), +to a different variable ("d"), and both accesses are reads ("RR"). +The resulting descriptor is "PodRR". Because P0()'s third access is +READ_ONCE(), we add another "Once" descriptor. + +A from-read ("fre") relation links P0()'s third to P1()'s first +access, and the resulting descriptor is "Fre". P1()'s first access is +WRITE_ONCE(), which as before gives the descriptor "Once". The string +thus far is thus "Rfi Once PodRR Once Fre Once". + +The remainder of P1() is similar to P0(), which means we add +"Rfi Once PodRR Once". Another fre links P1()'s last access to +P0()'s first access, which is WRITE_ONCE(), so we add "Fre Once". +The full string is thus: + + Rfi Once PodRR Once Fre Once Rfi Once PodRR Once Fre Once + +This string can be given to the "norm7" and "classify7" tools to +produce the name: + + $ norm7 -bell linux-kernel.bell \ + Rfi Once PodRR Once Fre Once Rfi Once PodRR Once Fre Once | \ + sed -e 's/:.*//g' + SB+rfionceonce-poonceonces + +Adding the ".litmus" suffix: SB+rfionceonce-poonceonces.litmus + +The descriptors that describe connections between consecutive accesses +within the cycle through a given litmus test can be provided by the herd7 +tool (Rfi, Po, Fre, and so on) or by the linux-kernel.bell file (Once, +Release, Acquire, and so on). + +To see the full list of descriptors, execute the following command: + + $ diyone7 -bell linux-kernel.bell -show edges diff --git a/tools/memory-model/litmus-tests/S+fencewmbonceonce+poacquireonce.litmus b/tools/memory-model/litmus-tests/S+fencewmbonceonce+poacquireonce.litmus new file mode 100644 index 000000000..18479823c --- /dev/null +++ b/tools/memory-model/litmus-tests/S+fencewmbonceonce+poacquireonce.litmus @@ -0,0 +1,27 @@ +C S+fencewmbonceonce+poacquireonce + +(* + * Result: Never + * + * Can a smp_wmb(), instead of a release, and an acquire order a prior + * store against a subsequent store? + *) + +{} + +P0(int *x, int *y) +{ + WRITE_ONCE(*x, 2); + smp_wmb(); + WRITE_ONCE(*y, 1); +} + +P1(int *x, int *y) +{ + int r0; + + r0 = smp_load_acquire(y); + WRITE_ONCE(*x, 1); +} + +exists (x=2 /\ 1:r0=1) diff --git a/tools/memory-model/litmus-tests/S+poonceonces.litmus b/tools/memory-model/litmus-tests/S+poonceonces.litmus new file mode 100644 index 000000000..8c9c2f81a --- /dev/null +++ b/tools/memory-model/litmus-tests/S+poonceonces.litmus @@ -0,0 +1,28 @@ +C S+poonceonces + +(* + * Result: Sometimes + * + * Starting with a two-process release-acquire chain ordering P0()'s + * first store against P1()'s final load, if the smp_store_release() + * is replaced by WRITE_ONCE() and the smp_load_acquire() replaced by + * READ_ONCE(), is ordering preserved? + *) + +{} + +P0(int *x, int *y) +{ + WRITE_ONCE(*x, 2); + WRITE_ONCE(*y, 1); +} + +P1(int *x, int *y) +{ + int r0; + + r0 = READ_ONCE(*y); + WRITE_ONCE(*x, 1); +} + +exists (x=2 /\ 1:r0=1) diff --git a/tools/memory-model/litmus-tests/SB+fencembonceonces.litmus b/tools/memory-model/litmus-tests/SB+fencembonceonces.litmus new file mode 100644 index 000000000..ed5fff18d --- /dev/null +++ b/tools/memory-model/litmus-tests/SB+fencembonceonces.litmus @@ -0,0 +1,32 @@ +C SB+fencembonceonces + +(* + * Result: Never + * + * This litmus test demonstrates that full memory barriers suffice to + * order the store-buffering pattern, where each process writes to the + * variable that the preceding process reads. (Locking and RCU can also + * suffice, but not much else.) + *) + +{} + +P0(int *x, int *y) +{ + int r0; + + WRITE_ONCE(*x, 1); + smp_mb(); + r0 = READ_ONCE(*y); +} + +P1(int *x, int *y) +{ + int r0; + + WRITE_ONCE(*y, 1); + smp_mb(); + r0 = READ_ONCE(*x); +} + +exists (0:r0=0 /\ 1:r0=0) diff --git a/tools/memory-model/litmus-tests/SB+poonceonces.litmus b/tools/memory-model/litmus-tests/SB+poonceonces.litmus new file mode 100644 index 000000000..10d550730 --- /dev/null +++ b/tools/memory-model/litmus-tests/SB+poonceonces.litmus @@ -0,0 +1,29 @@ +C SB+poonceonces + +(* + * Result: Sometimes + * + * This litmus test demonstrates that at least some ordering is required + * to order the store-buffering pattern, where each process writes to the + * variable that the preceding process reads. + *) + +{} + +P0(int *x, int *y) +{ + int r0; + + WRITE_ONCE(*x, 1); + r0 = READ_ONCE(*y); +} + +P1(int *x, int *y) +{ + int r0; + + WRITE_ONCE(*y, 1); + r0 = READ_ONCE(*x); +} + +exists (0:r0=0 /\ 1:r0=0) diff --git a/tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus b/tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus new file mode 100644 index 000000000..04a166036 --- /dev/null +++ b/tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus @@ -0,0 +1,32 @@ +C SB+rfionceonce-poonceonces + +(* + * Result: Sometimes + * + * This litmus test demonstrates that LKMM is not fully multicopy atomic. + *) + +{} + +P0(int *x, int *y) +{ + int r1; + int r2; + + WRITE_ONCE(*x, 1); + r1 = READ_ONCE(*x); + r2 = READ_ONCE(*y); +} + +P1(int *x, int *y) +{ + int r3; + int r4; + + WRITE_ONCE(*y, 1); + r3 = READ_ONCE(*y); + r4 = READ_ONCE(*x); +} + +locations [0:r1; 1:r3; x; y] (* Debug aid: Print things not in "exists". *) +exists (0:r2=0 /\ 1:r4=0) diff --git a/tools/memory-model/litmus-tests/WRC+poonceonces+Once.litmus b/tools/memory-model/litmus-tests/WRC+poonceonces+Once.litmus new file mode 100644 index 000000000..6a2bc12a1 --- /dev/null +++ b/tools/memory-model/litmus-tests/WRC+poonceonces+Once.litmus @@ -0,0 +1,35 @@ +C WRC+poonceonces+Once + +(* + * Result: Sometimes + * + * This litmus test is an extension of the message-passing pattern, + * where the first write is moved to a separate process. Note that this + * test has no ordering at all. + *) + +{} + +P0(int *x) +{ + WRITE_ONCE(*x, 1); +} + +P1(int *x, int *y) +{ + int r0; + + r0 = READ_ONCE(*x); + WRITE_ONCE(*y, 1); +} + +P2(int *x, int *y) +{ + int r0; + int r1; + + r0 = READ_ONCE(*y); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 2:r0=1 /\ 2:r1=0) diff --git a/tools/memory-model/litmus-tests/WRC+pooncerelease+fencermbonceonce+Once.litmus b/tools/memory-model/litmus-tests/WRC+pooncerelease+fencermbonceonce+Once.litmus new file mode 100644 index 000000000..e9947250d --- /dev/null +++ b/tools/memory-model/litmus-tests/WRC+pooncerelease+fencermbonceonce+Once.litmus @@ -0,0 +1,38 @@ +C WRC+pooncerelease+fencermbonceonce+Once + +(* + * Result: Never + * + * This litmus test is an extension of the message-passing pattern, where + * the first write is moved to a separate process. Because it features + * a release and a read memory barrier, it should be forbidden. More + * specifically, this litmus test is forbidden because smp_store_release() + * is A-cumulative in LKMM. + *) + +{} + +P0(int *x) +{ + WRITE_ONCE(*x, 1); +} + +P1(int *x, int *y) +{ + int r0; + + r0 = READ_ONCE(*x); + smp_store_release(y, 1); +} + +P2(int *x, int *y) +{ + int r0; + int r1; + + r0 = READ_ONCE(*y); + smp_rmb(); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ 2:r0=1 /\ 2:r1=0) diff --git a/tools/memory-model/litmus-tests/Z6.0+pooncelock+poonceLock+pombonce.litmus b/tools/memory-model/litmus-tests/Z6.0+pooncelock+poonceLock+pombonce.litmus new file mode 100644 index 000000000..415248fb6 --- /dev/null +++ b/tools/memory-model/litmus-tests/Z6.0+pooncelock+poonceLock+pombonce.litmus @@ -0,0 +1,42 @@ +C Z6.0+pooncelock+poonceLock+pombonce + +(* + * Result: Never + * + * This litmus test demonstrates how smp_mb__after_spinlock() may be + * used to ensure that accesses in different critical sections for a + * given lock running on different CPUs are nevertheless seen in order + * by CPUs not holding that lock. + *) + +{} + +P0(int *x, int *y, spinlock_t *mylock) +{ + spin_lock(mylock); + WRITE_ONCE(*x, 1); + WRITE_ONCE(*y, 1); + spin_unlock(mylock); +} + +P1(int *y, int *z, spinlock_t *mylock) +{ + int r0; + + spin_lock(mylock); + smp_mb__after_spinlock(); + r0 = READ_ONCE(*y); + WRITE_ONCE(*z, 1); + spin_unlock(mylock); +} + +P2(int *x, int *z) +{ + int r1; + + WRITE_ONCE(*z, 2); + smp_mb(); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ z=2 /\ 2:r1=0) diff --git a/tools/memory-model/litmus-tests/Z6.0+pooncelock+pooncelock+pombonce.litmus b/tools/memory-model/litmus-tests/Z6.0+pooncelock+pooncelock+pombonce.litmus new file mode 100644 index 000000000..10a2aa04c --- /dev/null +++ b/tools/memory-model/litmus-tests/Z6.0+pooncelock+pooncelock+pombonce.litmus @@ -0,0 +1,40 @@ +C Z6.0+pooncelock+pooncelock+pombonce + +(* + * Result: Sometimes + * + * This example demonstrates that a pair of accesses made by different + * processes each while holding a given lock will not necessarily be + * seen as ordered by a third process not holding that lock. + *) + +{} + +P0(int *x, int *y, spinlock_t *mylock) +{ + spin_lock(mylock); + WRITE_ONCE(*x, 1); + WRITE_ONCE(*y, 1); + spin_unlock(mylock); +} + +P1(int *y, int *z, spinlock_t *mylock) +{ + int r0; + + spin_lock(mylock); + r0 = READ_ONCE(*y); + WRITE_ONCE(*z, 1); + spin_unlock(mylock); +} + +P2(int *x, int *z) +{ + int r1; + + WRITE_ONCE(*z, 2); + smp_mb(); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ z=2 /\ 2:r1=0) diff --git a/tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus b/tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus new file mode 100644 index 000000000..88e70b87a --- /dev/null +++ b/tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus @@ -0,0 +1,42 @@ +C Z6.0+pooncerelease+poacquirerelease+fencembonceonce + +(* + * Result: Sometimes + * + * This litmus test shows that a release-acquire chain, while sufficient + * when there is but one non-reads-from (AKA non-rf) link, does not suffice + * if there is more than one. Of the three processes, only P1() reads from + * P0's write, which means that there are two non-rf links: P1() to P2() + * is a write-to-write link (AKA a "coherence" or just "co" link) and P2() + * to P0() is a read-to-write link (AKA a "from-reads" or just "fr" link). + * When there are two or more non-rf links, you typically will need one + * full barrier for each non-rf link. (Exceptions include some cases + * involving locking.) + *) + +{} + +P0(int *x, int *y) +{ + WRITE_ONCE(*x, 1); + smp_store_release(y, 1); +} + +P1(int *y, int *z) +{ + int r0; + + r0 = smp_load_acquire(y); + smp_store_release(z, 1); +} + +P2(int *x, int *z) +{ + int r1; + + WRITE_ONCE(*z, 2); + smp_mb(); + r1 = READ_ONCE(*x); +} + +exists (1:r0=1 /\ z=2 /\ 2:r1=0) diff --git a/tools/memory-model/lock.cat b/tools/memory-model/lock.cat new file mode 100644 index 000000000..6b52f365d --- /dev/null +++ b/tools/memory-model/lock.cat @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0+ +(* + * Copyright (C) 2016 Luc Maranget <luc.maranget@inria.fr> for Inria + * Copyright (C) 2017 Alan Stern <stern@rowland.harvard.edu> + *) + +(* + * Generate coherence orders and handle lock operations + *) + +include "cross.cat" + +(* + * The lock-related events generated by herd7 are as follows: + * + * LKR Lock-Read: the read part of a spin_lock() or successful + * spin_trylock() read-modify-write event pair + * LKW Lock-Write: the write part of a spin_lock() or successful + * spin_trylock() RMW event pair + * UL Unlock: a spin_unlock() event + * LF Lock-Fail: a failed spin_trylock() event + * RL Read-Locked: a spin_is_locked() event which returns True + * RU Read-Unlocked: a spin_is_locked() event which returns False + * + * LKR and LKW events always come paired, like all RMW event sequences. + * + * LKR, LF, RL, and RU are read events; LKR has Acquire ordering. + * LKW and UL are write events; UL has Release ordering. + * LKW, LF, RL, and RU have no ordering properties. + *) + +(* Backward compatibility *) +let RL = try RL with emptyset +let RU = try RU with emptyset + +(* Treat RL as a kind of LF: a read with no ordering properties *) +let LF = LF | RL + +(* There should be no ordinary R or W accesses to spinlocks *) +let ALL-LOCKS = LKR | LKW | UL | LF | RU +flag ~empty [M \ IW] ; loc ; [ALL-LOCKS] as mixed-lock-accesses + +(* Link Lock-Reads to their RMW-partner Lock-Writes *) +let lk-rmw = ([LKR] ; po-loc ; [LKW]) \ (po ; po) +let rmw = rmw | lk-rmw + +(* The litmus test is invalid if an LKR/LKW event is not part of an RMW pair *) +flag ~empty LKW \ range(lk-rmw) as unpaired-LKW +flag ~empty LKR \ domain(lk-rmw) as unpaired-LKR + +(* + * An LKR must always see an unlocked value; spin_lock() calls nested + * inside a critical section (for the same lock) always deadlock. + *) +empty ([LKW] ; po-loc ; [LKR]) \ (po-loc ; [UL] ; po-loc) as lock-nest + +(* The final value of a spinlock should not be tested *) +flag ~empty [FW] ; loc ; [ALL-LOCKS] as lock-final + +(* + * Put lock operations in their appropriate classes, but leave UL out of W + * until after the co relation has been generated. + *) +let R = R | LKR | LF | RU +let W = W | LKW + +let Release = Release | UL +let Acquire = Acquire | LKR + +(* Match LKW events to their corresponding UL events *) +let critical = ([LKW] ; po-loc ; [UL]) \ (po-loc ; [LKW | UL] ; po-loc) + +flag ~empty UL \ range(critical) as unmatched-unlock + +(* Allow up to one unmatched LKW per location; more must deadlock *) +let UNMATCHED-LKW = LKW \ domain(critical) +empty ([UNMATCHED-LKW] ; loc ; [UNMATCHED-LKW]) \ id as unmatched-locks + +(* rfi for LF events: link each LKW to the LF events in its critical section *) +let rfi-lf = ([LKW] ; po-loc ; [LF]) \ ([LKW] ; po-loc ; [UL] ; po-loc) + +(* rfe for LF events *) +let all-possible-rfe-lf = + (* + * Given an LF event r, compute the possible rfe edges for that event + * (all those starting from LKW events in other threads), + * and then convert that relation to a set of single-edge relations. + *) + let possible-rfe-lf r = + let pair-to-relation p = p ++ 0 + in map pair-to-relation ((LKW * {r}) & loc & ext) + (* Do this for each LF event r that isn't in rfi-lf *) + in map possible-rfe-lf (LF \ range(rfi-lf)) + +(* Generate all rf relations for LF events *) +with rfe-lf from cross(all-possible-rfe-lf) +let rf-lf = rfe-lf | rfi-lf + +(* + * RU, i.e., spin_is_locked() returning False, is slightly different. + * We rely on the memory model to rule out cases where spin_is_locked() + * within one of the lock's critical sections returns False. + *) + +(* rfi for RU events: an RU may read from the last po-previous UL *) +let rfi-ru = ([UL] ; po-loc ; [RU]) \ ([UL] ; po-loc ; [LKW] ; po-loc) + +(* rfe for RU events: an RU may read from an external UL or the initial write *) +let all-possible-rfe-ru = + let possible-rfe-ru r = + let pair-to-relation p = p ++ 0 + in map pair-to-relation (((UL | IW) * {r}) & loc & ext) + in map possible-rfe-ru RU + +(* Generate all rf relations for RU events *) +with rfe-ru from cross(all-possible-rfe-ru) +let rf-ru = rfe-ru | rfi-ru + +(* Final rf relation *) +let rf = rf | rf-lf | rf-ru + +(* Generate all co relations, including LKW events but not UL *) +let co0 = co0 | ([IW] ; loc ; [LKW]) | + (([LKW] ; loc ; [UNMATCHED-LKW]) \ [UNMATCHED-LKW]) +include "cos-opt.cat" +let W = W | UL +let M = R | W + +(* Merge UL events into co *) +let co = (co | critical | (critical^-1 ; co))+ +let coe = co & ext +let coi = co & int + +(* Merge LKR events into rf *) +let rf = rf | ([IW | UL] ; singlestep(co) ; lk-rmw^-1) +let rfe = rf & ext +let rfi = rf & int + +let fr = rf^-1 ; co +let fre = fr & ext +let fri = fr & int + +show co,rf,fr diff --git a/tools/memory-model/scripts/README b/tools/memory-model/scripts/README new file mode 100644 index 000000000..095c7eb36 --- /dev/null +++ b/tools/memory-model/scripts/README @@ -0,0 +1,70 @@ + ============ + LKMM SCRIPTS + ============ + + +These scripts are run from the tools/memory-model directory. + +checkalllitmus.sh + + Run all litmus tests in the litmus-tests directory, checking + the results against the expected results recorded in the + "Result:" comment lines. + +checkghlitmus.sh + + Run all litmus tests in the https://github.com/paulmckrcu/litmus + archive that are C-language and that have "Result:" comment lines + documenting expected results, comparing the actual results to + those expected. + +checklitmushist.sh + + Run all litmus tests having .litmus.out files from previous + initlitmushist.sh or newlitmushist.sh runs, comparing the + herd7 output to that of the original runs. + +checklitmus.sh + + Check a single litmus test against its "Result:" expected result. + +cmplitmushist.sh + + Compare output from two different runs of the same litmus tests, + with the absolute pathnames of the tests to run provided one + name per line on standard input. Not normally run manually, + provided instead for use by other scripts. + +initlitmushist.sh + + Run all litmus tests having no more than the specified number + of processes given a specified timeout, recording the results + in .litmus.out files. + +judgelitmus.sh + + Given a .litmus file and its .litmus.out herd7 output, check the + .litmus.out file against the .litmus file's "Result:" comment to + judge whether the test ran correctly. Not normally run manually, + provided instead for use by other scripts. + +newlitmushist.sh + + For all new or updated litmus tests having no more than the + specified number of processes given a specified timeout, run + and record the results in .litmus.out files. + +parseargs.sh + + Parse command-line arguments. Not normally run manually, + provided instead for use by other scripts. + +runlitmushist.sh + + Run the litmus tests whose absolute pathnames are provided one + name per line on standard input. Not normally run manually, + provided instead for use by other scripts. + +README + + This file diff --git a/tools/memory-model/scripts/checkalllitmus.sh b/tools/memory-model/scripts/checkalllitmus.sh new file mode 100755 index 000000000..3c0c7fbbd --- /dev/null +++ b/tools/memory-model/scripts/checkalllitmus.sh @@ -0,0 +1,66 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ +# +# Run herd7 tests on all .litmus files in the litmus-tests directory +# and check each file's result against a "Result:" comment within that +# litmus test. If the verification result does not match that specified +# in the litmus test, this script prints an error message prefixed with +# "^^^". It also outputs verification results to a file whose name is +# that of the specified litmus test, but with ".out" appended. +# +# Usage: +# checkalllitmus.sh +# +# Run this in the directory containing the memory model. +# +# This script makes no attempt to run the litmus tests concurrently. +# +# Copyright IBM Corporation, 2018 +# +# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +. scripts/parseargs.sh + +litmusdir=litmus-tests +if test -d "$litmusdir" -a -r "$litmusdir" -a -x "$litmusdir" +then + : +else + echo ' --- ' error: $litmusdir is not an accessible directory + exit 255 +fi + +# Create any new directories that have appeared in the github litmus +# repo since the last run. +if test "$LKMM_DESTDIR" != "." +then + find $litmusdir -type d -print | + ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh ) +fi + +# Find the checklitmus script. If it is not where we expect it, then +# assume that the caller has the PATH environment variable set +# appropriately. +if test -x scripts/checklitmus.sh +then + clscript=scripts/checklitmus.sh +else + clscript=checklitmus.sh +fi + +# Run the script on all the litmus tests in the specified directory +ret=0 +for i in $litmusdir/*.litmus +do + if ! $clscript $i + then + ret=1 + fi +done +if test "$ret" -ne 0 +then + echo " ^^^ VERIFICATION MISMATCHES" 1>&2 +else + echo All litmus tests verified as was expected. 1>&2 +fi +exit $ret diff --git a/tools/memory-model/scripts/checkghlitmus.sh b/tools/memory-model/scripts/checkghlitmus.sh new file mode 100755 index 000000000..6589fbb6f --- /dev/null +++ b/tools/memory-model/scripts/checkghlitmus.sh @@ -0,0 +1,65 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ +# +# Runs the C-language litmus tests having a maximum number of processes +# to run, defaults to 6. +# +# sh checkghlitmus.sh +# +# Run from the Linux kernel tools/memory-model directory. See the +# parseargs.sh scripts for arguments. + +. scripts/parseargs.sh + +T=/tmp/checkghlitmus.sh.$$ +trap 'rm -rf $T' 0 +mkdir $T + +# Clone the repository if it is not already present. +if test -d litmus +then + : +else + git clone https://github.com/paulmckrcu/litmus + ( cd litmus; git checkout origin/master ) +fi + +# Create any new directories that have appeared in the github litmus +# repo since the last run. +if test "$LKMM_DESTDIR" != "." +then + find litmus -type d -print | + ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh ) +fi + +# Create a list of the C-language litmus tests previously run. +( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) | + sed -e 's/\.out$//' | + xargs -r egrep -l '^ \* Result: (Never|Sometimes|Always|DEADLOCK)' | + xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already + +# Create a list of C-language litmus tests with "Result:" commands and +# no more than the specified number of processes. +find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C +xargs < $T/list-C -r egrep -l '^ \* Result: (Never|Sometimes|Always|DEADLOCK)' > $T/list-C-result +xargs < $T/list-C-result -r grep -L "^P${LKMM_PROCS}" > $T/list-C-result-short + +# Form list of tests without corresponding .litmus.out files +sort $T/list-C-already $T/list-C-result-short | uniq -u > $T/list-C-needed + +# Run any needed tests. +if scripts/runlitmushist.sh < $T/list-C-needed > $T/run.stdout 2> $T/run.stderr +then + errs= +else + errs=1 +fi + +sed < $T/list-C-result-short -e 's,^,scripts/judgelitmus.sh ,' | + sh > $T/judge.stdout 2> $T/judge.stderr + +if test -n "$errs" +then + cat $T/run.stderr 1>&2 +fi +grep '!!!' $T/judge.stdout diff --git a/tools/memory-model/scripts/checklitmus.sh b/tools/memory-model/scripts/checklitmus.sh new file mode 100755 index 000000000..11461ed40 --- /dev/null +++ b/tools/memory-model/scripts/checklitmus.sh @@ -0,0 +1,34 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ +# +# Run a herd7 test and invokes judgelitmus.sh to check the result against +# a "Result:" comment within the litmus test. It also outputs verification +# results to a file whose name is that of the specified litmus test, but +# with ".out" appended. +# +# Usage: +# checklitmus.sh file.litmus +# +# Run this in the directory containing the memory model, specifying the +# pathname of the litmus test to check. The caller is expected to have +# properly set up the LKMM environment variables. +# +# Copyright IBM Corporation, 2018 +# +# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +litmus=$1 +herdoptions=${LKMM_HERD_OPTIONS--conf linux-kernel.cfg} + +if test -f "$litmus" -a -r "$litmus" +then + : +else + echo ' --- ' error: \"$litmus\" is not a readable file + exit 255 +fi + +echo Herd options: $herdoptions > $LKMM_DESTDIR/$litmus.out +/usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $litmus >> $LKMM_DESTDIR/$litmus.out 2>&1 + +scripts/judgelitmus.sh $litmus diff --git a/tools/memory-model/scripts/checklitmushist.sh b/tools/memory-model/scripts/checklitmushist.sh new file mode 100755 index 000000000..1d210ffb7 --- /dev/null +++ b/tools/memory-model/scripts/checklitmushist.sh @@ -0,0 +1,60 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ +# +# Reruns the C-language litmus tests previously run that match the +# specified criteria, and compares the result to that of the previous +# runs from initlitmushist.sh and/or newlitmushist.sh. +# +# sh checklitmushist.sh +# +# Run from the Linux kernel tools/memory-model directory. +# See scripts/parseargs.sh for list of arguments. +# +# Copyright IBM Corporation, 2018 +# +# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +. scripts/parseargs.sh + +T=/tmp/checklitmushist.sh.$$ +trap 'rm -rf $T' 0 +mkdir $T + +if test -d litmus +then + : +else + echo Run scripts/initlitmushist.sh first, need litmus repo. + exit 1 +fi + +# Create the results directory and populate it with subdirectories. +# The initial output is created here to avoid clobbering the output +# generated earlier. +mkdir $T/results +find litmus -type d -print | ( cd $T/results; sed -e 's/^/mkdir -p /' | sh ) + +# Create the list of litmus tests already run, then remove those that +# are excluded by this run's --procs argument. +( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) | + sed -e 's/\.out$//' | + xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already +xargs < $T/list-C-already -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short + +# Redirect output, run tests, then restore destination directory. +destdir="$LKMM_DESTDIR" +LKMM_DESTDIR=$T/results; export LKMM_DESTDIR +scripts/runlitmushist.sh < $T/list-C-short > $T/runlitmushist.sh.out 2>&1 +LKMM_DESTDIR="$destdir"; export LKMM_DESTDIR + +# Move the newly generated .litmus.out files to .litmus.out.new files +# in the destination directory. +cdir=`pwd` +ddir=`awk -v c="$cdir" -v d="$LKMM_DESTDIR" \ + 'END { if (d ~ /^\//) print d; else print c "/" d; }' < /dev/null` +( cd $T/results; find litmus -type f -name '*.litmus.out' -print | + sed -e 's,^.*$,cp & '"$ddir"'/&.new,' | sh ) + +sed < $T/list-C-short -e 's,^,'"$LKMM_DESTDIR/"',' | + sh scripts/cmplitmushist.sh +exit $? diff --git a/tools/memory-model/scripts/cmplitmushist.sh b/tools/memory-model/scripts/cmplitmushist.sh new file mode 100755 index 000000000..0f498aeec --- /dev/null +++ b/tools/memory-model/scripts/cmplitmushist.sh @@ -0,0 +1,87 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ +# +# Compares .out and .out.new files for each name on standard input, +# one full pathname per line. Outputs comparison results followed by +# a summary. +# +# sh cmplitmushist.sh + +T=/tmp/cmplitmushist.sh.$$ +trap 'rm -rf $T' 0 +mkdir $T + +# comparetest oldpath newpath +perfect=0 +obsline=0 +noobsline=0 +obsresult=0 +badcompare=0 +comparetest () { + grep -v 'maxresident)k\|minor)pagefaults\|^Time' $1 > $T/oldout + grep -v 'maxresident)k\|minor)pagefaults\|^Time' $2 > $T/newout + if cmp -s $T/oldout $T/newout && grep -q '^Observation' $1 + then + echo Exact output match: $2 + perfect=`expr "$perfect" + 1` + return 0 + fi + + grep '^Observation' $1 > $T/oldout + grep '^Observation' $2 > $T/newout + if test -s $T/oldout -o -s $T/newout + then + if cmp -s $T/oldout $T/newout + then + echo Matching Observation result and counts: $2 + obsline=`expr "$obsline" + 1` + return 0 + fi + else + echo Missing Observation line "(e.g., herd7 timeout)": $2 + noobsline=`expr "$noobsline" + 1` + return 0 + fi + + grep '^Observation' $1 | awk '{ print $3 }' > $T/oldout + grep '^Observation' $2 | awk '{ print $3 }' > $T/newout + if cmp -s $T/oldout $T/newout + then + echo Matching Observation Always/Sometimes/Never result: $2 + obsresult=`expr "$obsresult" + 1` + return 0 + fi + echo ' !!!' Result changed: $2 + badcompare=`expr "$badcompare" + 1` + return 1 +} + +sed -e 's/^.*$/comparetest &.out &.out.new/' > $T/cmpscript +. $T/cmpscript > $T/cmpscript.out +cat $T/cmpscript.out + +echo ' ---' Summary: 1>&2 +grep '!!!' $T/cmpscript.out 1>&2 +if test "$perfect" -ne 0 +then + echo Exact output matches: $perfect 1>&2 +fi +if test "$obsline" -ne 0 +then + echo Matching Observation result and counts: $obsline 1>&2 +fi +if test "$noobsline" -ne 0 +then + echo Missing Observation line "(e.g., herd7 timeout)": $noobsline 1>&2 +fi +if test "$obsresult" -ne 0 +then + echo Matching Observation Always/Sometimes/Never result: $obsresult 1>&2 +fi +if test "$badcompare" -ne 0 +then + echo "!!!" Result changed: $badcompare 1>&2 + exit 1 +fi + +exit 0 diff --git a/tools/memory-model/scripts/initlitmushist.sh b/tools/memory-model/scripts/initlitmushist.sh new file mode 100755 index 000000000..956b69574 --- /dev/null +++ b/tools/memory-model/scripts/initlitmushist.sh @@ -0,0 +1,68 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ +# +# Runs the C-language litmus tests matching the specified criteria. +# Generates the output for each .litmus file into a corresponding +# .litmus.out file, and does not judge the result. +# +# sh initlitmushist.sh +# +# Run from the Linux kernel tools/memory-model directory. +# See scripts/parseargs.sh for list of arguments. +# +# This script can consume significant wallclock time and CPU, especially as +# the value of --procs rises. On a four-core (eight hardware threads) +# 2.5GHz x86 with a one-minute per-run timeout: +# +# --procs wallclock CPU timeouts tests +# 1 0m11.241s 0m1.086s 0 19 +# 2 1m12.598s 2m8.459s 2 393 +# 3 1m30.007s 6m2.479s 4 2291 +# 4 3m26.042s 18m5.139s 9 3217 +# 5 4m26.661s 23m54.128s 13 3784 +# 6 4m41.900s 26m4.721s 13 4352 +# 7 5m51.463s 35m50.868s 13 4626 +# 8 10m5.235s 68m43.672s 34 5117 +# 9 15m57.80s 105m58.101s 69 5156 +# 10 16m14.13s 103m35.009s 69 5165 +# 20 27m48.55s 198m3.286s 156 5269 +# +# Increasing the timeout on the 20-process run to five minutes increases +# the runtime to about 90 minutes with the CPU time rising to about +# 10 hours. On the other hand, it decreases the number of timeouts to 101. +# +# Note that there are historical tests for which herd7 will fail +# completely, for example, litmus/manual/atomic/C-unlock-wait-00.litmus +# contains a call to spin_unlock_wait(), which no longer exists in either +# the kernel or LKMM. + +. scripts/parseargs.sh + +T=/tmp/initlitmushist.sh.$$ +trap 'rm -rf $T' 0 +mkdir $T + +if test -d litmus +then + : +else + git clone https://github.com/paulmckrcu/litmus + ( cd litmus; git checkout origin/master ) +fi + +# Create any new directories that have appeared in the github litmus +# repo since the last run. +if test "$LKMM_DESTDIR" != "." +then + find litmus -type d -print | + ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh ) +fi + +# Create a list of the C-language litmus tests with no more than the +# specified number of processes (per the --procs argument). +find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C +xargs < $T/list-C -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short + +scripts/runlitmushist.sh < $T/list-C-short + +exit 0 diff --git a/tools/memory-model/scripts/judgelitmus.sh b/tools/memory-model/scripts/judgelitmus.sh new file mode 100755 index 000000000..0cc63875e --- /dev/null +++ b/tools/memory-model/scripts/judgelitmus.sh @@ -0,0 +1,78 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ +# +# Given a .litmus test and the corresponding .litmus.out file, check +# the .litmus.out file against the "Result:" comment to judge whether +# the test ran correctly. +# +# Usage: +# judgelitmus.sh file.litmus +# +# Run this in the directory containing the memory model, specifying the +# pathname of the litmus test to check. +# +# Copyright IBM Corporation, 2018 +# +# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +litmus=$1 + +if test -f "$litmus" -a -r "$litmus" +then + : +else + echo ' --- ' error: \"$litmus\" is not a readable file + exit 255 +fi +if test -f "$LKMM_DESTDIR/$litmus".out -a -r "$LKMM_DESTDIR/$litmus".out +then + : +else + echo ' --- ' error: \"$LKMM_DESTDIR/$litmus\".out is not a readable file + exit 255 +fi +if grep -q '^ \* Result: ' $litmus +then + outcome=`grep -m 1 '^ \* Result: ' $litmus | awk '{ print $3 }'` +else + outcome=specified +fi + +grep '^Observation' $LKMM_DESTDIR/$litmus.out +if grep -q '^Observation' $LKMM_DESTDIR/$litmus.out +then + : +else + echo ' !!! Verification error' $litmus + if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out + then + echo ' !!! Verification error' >> $LKMM_DESTDIR/$litmus.out 2>&1 + fi + exit 255 +fi +if test "$outcome" = DEADLOCK +then + if grep '^Observation' $LKMM_DESTDIR/$litmus.out | grep -q 'Never 0 0$' + then + ret=0 + else + echo " !!! Unexpected non-$outcome verification" $litmus + if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out + then + echo " !!! Unexpected non-$outcome verification" >> $LKMM_DESTDIR/$litmus.out 2>&1 + fi + ret=1 + fi +elif grep '^Observation' $LKMM_DESTDIR/$litmus.out | grep -q $outcome || test "$outcome" = Maybe +then + ret=0 +else + echo " !!! Unexpected non-$outcome verification" $litmus + if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out + then + echo " !!! Unexpected non-$outcome verification" >> $LKMM_DESTDIR/$litmus.out 2>&1 + fi + ret=1 +fi +tail -2 $LKMM_DESTDIR/$litmus.out | head -1 +exit $ret diff --git a/tools/memory-model/scripts/newlitmushist.sh b/tools/memory-model/scripts/newlitmushist.sh new file mode 100755 index 000000000..991f8f814 --- /dev/null +++ b/tools/memory-model/scripts/newlitmushist.sh @@ -0,0 +1,61 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ +# +# Runs the C-language litmus tests matching the specified criteria +# that do not already have a corresponding .litmus.out file, and does +# not judge the result. +# +# sh newlitmushist.sh +# +# Run from the Linux kernel tools/memory-model directory. +# See scripts/parseargs.sh for list of arguments. +# +# Copyright IBM Corporation, 2018 +# +# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +. scripts/parseargs.sh + +T=/tmp/newlitmushist.sh.$$ +trap 'rm -rf $T' 0 +mkdir $T + +if test -d litmus +then + : +else + echo Run scripts/initlitmushist.sh first, need litmus repo. + exit 1 +fi + +# Create any new directories that have appeared in the github litmus +# repo since the last run. +if test "$LKMM_DESTDIR" != "." +then + find litmus -type d -print | + ( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh ) +fi + +# Create a list of the C-language litmus tests previously run. +( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) | + sed -e 's/\.out$//' | + xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already + +# Form full list of litmus tests with no more than the specified +# number of processes (per the --procs argument). +find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C-all +xargs < $T/list-C-all -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short + +# Form list of new tests. Note: This does not handle litmus-test deletion! +sort $T/list-C-already $T/list-C-short | uniq -u > $T/list-C-new + +# Form list of litmus tests that have changed since the last run. +sed < $T/list-C-short -e 's,^.*$,if test & -nt '"$LKMM_DESTDIR"'/&.out; then echo &; fi,' > $T/list-C-script +sh $T/list-C-script > $T/list-C-newer + +# Merge the list of new and of updated litmus tests: These must be (re)run. +sort -u $T/list-C-new $T/list-C-newer > $T/list-C-needed + +scripts/runlitmushist.sh < $T/list-C-needed + +exit 0 diff --git a/tools/memory-model/scripts/parseargs.sh b/tools/memory-model/scripts/parseargs.sh new file mode 100755 index 000000000..40f52080f --- /dev/null +++ b/tools/memory-model/scripts/parseargs.sh @@ -0,0 +1,136 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ +# +# the corresponding .litmus.out file, and does not judge the result. +# +# . scripts/parseargs.sh +# +# Include into other Linux kernel tools/memory-model scripts. +# +# Copyright IBM Corporation, 2018 +# +# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +T=/tmp/parseargs.sh.$$ +mkdir $T + +# Initialize one parameter: initparam name default +initparam () { + echo if test -z '"$'$1'"' > $T/s + echo then >> $T/s + echo $1='"'$2'"' >> $T/s + echo export $1 >> $T/s + echo fi >> $T/s + echo $1_DEF='$'$1 >> $T/s + . $T/s +} + +initparam LKMM_DESTDIR "." +initparam LKMM_HERD_OPTIONS "-conf linux-kernel.cfg" +initparam LKMM_JOBS `getconf _NPROCESSORS_ONLN` +initparam LKMM_PROCS "3" +initparam LKMM_TIMEOUT "1m" + +scriptname=$0 + +usagehelp () { + echo "Usage $scriptname [ arguments ]" + echo " --destdir path (place for .litmus.out, default by .litmus)" + echo " --herdopts -conf linux-kernel.cfg ..." + echo " --jobs N (number of jobs, default one per CPU)" + echo " --procs N (litmus tests with at most this many processes)" + echo " --timeout N (herd7 timeout (e.g., 10s, 1m, 2hr, 1d, '')" + echo "Defaults: --destdir '$LKMM_DESTDIR_DEF' --herdopts '$LKMM_HERD_OPTIONS_DEF' --jobs '$LKMM_JOBS_DEF' --procs '$LKMM_PROCS_DEF' --timeout '$LKMM_TIMEOUT_DEF'" + exit 1 +} + +usage () { + usagehelp 1>&2 +} + +# checkarg --argname argtype $# arg mustmatch cannotmatch +checkarg () { + if test $3 -le 1 + then + echo $1 needs argument $2 matching \"$5\" + usage + fi + if echo "$4" | grep -q -e "$5" + then + : + else + echo $1 $2 \"$4\" must match \"$5\" + usage + fi + if echo "$4" | grep -q -e "$6" + then + echo $1 $2 \"$4\" must not match \"$6\" + usage + fi +} + +while test $# -gt 0 +do + case "$1" in + --destdir) + checkarg --destdir "(path to directory)" "$#" "$2" '.\+' '^--' + LKMM_DESTDIR="$2" + mkdir $LKMM_DESTDIR > /dev/null 2>&1 + if ! test -e "$LKMM_DESTDIR" + then + echo "Cannot create directory --destdir '$LKMM_DESTDIR'" + usage + fi + if test -d "$LKMM_DESTDIR" -a -w "$LKMM_DESTDIR" -a -x "$LKMM_DESTDIR" + then + : + else + echo "Directory --destdir '$LKMM_DESTDIR' insufficient permissions to create files" + usage + fi + shift + ;; + --herdopts|--herdopt) + checkarg --destdir "(herd7 options)" "$#" "$2" '.*' '^--' + LKMM_HERD_OPTIONS="$2" + shift + ;; + -j[1-9]*) + njobs="`echo $1 | sed -e 's/^-j//'`" + trailchars="`echo $njobs | sed -e 's/[0-9]\+\(.*\)$/\1/'`" + if test -n "$trailchars" + then + echo $1 trailing characters "'$trailchars'" + usagehelp + fi + LKMM_JOBS="`echo $njobs | sed -e 's/^\([0-9]\+\).*$/\1/'`" + ;; + --jobs|--job|-j) + checkarg --jobs "(number)" "$#" "$2" '^[1-9][0-9]\+$' '^--' + LKMM_JOBS="$2" + shift + ;; + --procs|--proc) + checkarg --procs "(number)" "$#" "$2" '^[0-9]\+$' '^--' + LKMM_PROCS="$2" + shift + ;; + --timeout) + checkarg --timeout "(timeout spec)" "$#" "$2" '^\([0-9]\+[smhd]\?\|\)$' '^--' + LKMM_TIMEOUT="$2" + shift + ;; + *) + echo Unknown argument $1 + usage + ;; + esac + shift +done +if test -z "$LKMM_TIMEOUT" +then + LKMM_TIMEOUT_CMD=""; export LKMM_TIMEOUT_CMD +else + LKMM_TIMEOUT_CMD="timeout $LKMM_TIMEOUT"; export LKMM_TIMEOUT_CMD +fi +rm -rf $T diff --git a/tools/memory-model/scripts/runlitmushist.sh b/tools/memory-model/scripts/runlitmushist.sh new file mode 100755 index 000000000..6ed376f49 --- /dev/null +++ b/tools/memory-model/scripts/runlitmushist.sh @@ -0,0 +1,87 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ +# +# Runs the C-language litmus tests specified on standard input, using up +# to the specified number of CPUs (defaulting to all of them) and placing +# the results in the specified directory (defaulting to the same place +# the litmus test came from). +# +# sh runlitmushist.sh +# +# Run from the Linux kernel tools/memory-model directory. +# This script uses environment variables produced by parseargs.sh. +# +# Copyright IBM Corporation, 2018 +# +# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> + +T=/tmp/runlitmushist.sh.$$ +trap 'rm -rf $T' 0 +mkdir $T + +if test -d litmus +then + : +else + echo Directory \"litmus\" missing, aborting run. + exit 1 +fi + +# Prefixes for per-CPU scripts +for ((i=0;i<$LKMM_JOBS;i++)) +do + echo dir="$LKMM_DESTDIR" > $T/$i.sh + echo T=$T >> $T/$i.sh + echo herdoptions=\"$LKMM_HERD_OPTIONS\" >> $T/$i.sh + cat << '___EOF___' >> $T/$i.sh + runtest () { + echo ' ... ' /usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $1 '>' $dir/$1.out '2>&1' + if /usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $1 > $dir/$1.out 2>&1 + then + if ! grep -q '^Observation ' $dir/$1.out + then + echo ' !!! Herd failed, no Observation:' $1 + fi + else + exitcode=$? + if test "$exitcode" -eq 124 + then + exitmsg="timed out" + else + exitmsg="failed, exit code $exitcode" + fi + echo ' !!! Herd' ${exitmsg}: $1 + fi + } +___EOF___ +done + +awk -v q="'" -v b='\\' ' +{ + print "echo `grep " q "^P[0-9]" b "+(" q " " $0 " | tail -1 | sed -e " q "s/^P" b "([0-9]" b "+" b ")(.*$/" b "1/" q "` " $0 +}' | bash | +sort -k1n | +awk -v ncpu=$LKMM_JOBS -v t=$T ' +{ + print "runtest " $2 >> t "/" NR % ncpu ".sh"; +} + +END { + for (i = 0; i < ncpu; i++) { + print "sh " t "/" i ".sh > " t "/" i ".sh.out 2>&1 &"; + close(t "/" i ".sh"); + } + print "wait"; +}' | sh +cat $T/*.sh.out +if grep -q '!!!' $T/*.sh.out +then + echo ' ---' Summary: 1>&2 + grep '!!!' $T/*.sh.out 1>&2 + nfail="`grep '!!!' $T/*.sh.out | wc -l`" + echo 'Number of failed herd7 runs (e.g., timeout): ' $nfail 1>&2 + exit 1 +else + echo All runs completed successfully. 1>&2 + exit 0 +fi |