ASR_BASE

Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/marvell/linux/tools/memory-model/.gitignore b/marvell/linux/tools/memory-model/.gitignore
new file mode 100644
index 0000000..b1d34c5
--- /dev/null
+++ b/marvell/linux/tools/memory-model/.gitignore
@@ -0,0 +1 @@
+litmus
diff --git a/marvell/linux/tools/memory-model/Documentation/cheatsheet.txt b/marvell/linux/tools/memory-model/Documentation/cheatsheet.txt
new file mode 100644
index 0000000..33ba98d
--- /dev/null
+++ b/marvell/linux/tools/memory-model/Documentation/cheatsheet.txt
@@ -0,0 +1,30 @@
+                                  Prior Operation     Subsequent Operation
+                                  ---------------  ---------------------------
+                               C  Self  R  W  RMW  Self  R  W  DR  DW  RMW  SV
+                              --  ----  -  -  ---  ----  -  -  --  --  ---  --
+
+Store, e.g., WRITE_ONCE()            Y                                       Y
+Load, e.g., READ_ONCE()              Y                          Y   Y        Y
+Unsuccessful RMW operation           Y                          Y   Y        Y
+rcu_dereference()                    Y                          Y   Y        Y
+Successful *_acquire()               R                   Y  Y   Y   Y    Y   Y
+Successful *_release()         C        Y  Y    Y     W                      Y
+smp_rmb()                               Y       R        Y      Y        R
+smp_wmb()                                  Y    W           Y       Y    W
+smp_mb() & synchronize_rcu()  CP        Y  Y    Y        Y  Y   Y   Y    Y
+Successful full non-void RMW  CP     Y  Y  Y    Y     Y  Y  Y   Y   Y    Y   Y
+smp_mb__before_atomic()       CP        Y  Y    Y        a  a   a   a    Y
+smp_mb__after_atomic()        CP        a  a    Y        Y  Y   Y   Y    Y
+
+
+Key:	C:	Ordering is cumulative
+	P:	Ordering propagates
+	R:	Read, for example, READ_ONCE(), or read portion of RMW
+	W:	Write, for example, WRITE_ONCE(), or write portion of RMW
+	Y:	Provides ordering
+	a:	Provides ordering given intervening RMW atomic operation
+	DR:	Dependent read (address dependency)
+	DW:	Dependent write (address, data, or control dependency)
+	RMW:	Atomic read-modify-write operation
+	SELF:	Orders self, as opposed to accesses before and/or after
+	SV:	Orders later accesses to the same variable
diff --git a/marvell/linux/tools/memory-model/Documentation/explanation.txt b/marvell/linux/tools/memory-model/Documentation/explanation.txt
new file mode 100644
index 0000000..488f11f
--- /dev/null
+++ b/marvell/linux/tools/memory-model/Documentation/explanation.txt
@@ -0,0 +1,2006 @@
+Explanation of the Linux-Kernel Memory Consistency Model
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+:Author: Alan Stern <stern@rowland.harvard.edu>
+:Created: October 2017
+
+.. Contents
+
+  1. INTRODUCTION
+  2. BACKGROUND
+  3. A SIMPLE EXAMPLE
+  4. A SELECTION OF MEMORY MODELS
+  5. ORDERING AND CYCLES
+  6. EVENTS
+  7. THE PROGRAM ORDER RELATION: po AND po-loc
+  8. A WARNING
+  9. DEPENDENCY RELATIONS: data, addr, and ctrl
+  10. THE READS-FROM RELATION: rf, rfi, and rfe
+  11. CACHE COHERENCE AND THE COHERENCE ORDER RELATION: co, coi, and coe
+  12. THE FROM-READS RELATION: fr, fri, and fre
+  13. AN OPERATIONAL MODEL
+  14. PROPAGATION ORDER RELATION: cumul-fence
+  15. DERIVATION OF THE LKMM FROM THE OPERATIONAL MODEL
+  16. SEQUENTIAL CONSISTENCY PER VARIABLE
+  17. ATOMIC UPDATES: rmw
+  18. THE PRESERVED PROGRAM ORDER RELATION: ppo
+  19. AND THEN THERE WAS ALPHA
+  20. THE HAPPENS-BEFORE RELATION: hb
+  21. THE PROPAGATES-BEFORE RELATION: pb
+  22. RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-fence, and rb
+  23. LOCKING
+  24. ODDS AND ENDS
+
+
+
+INTRODUCTION
+------------
+
+The Linux-kernel memory consistency model (LKMM) is rather complex and
+obscure.  This is particularly evident if you read through the
+linux-kernel.bell and linux-kernel.cat files that make up the formal
+version of the model; they are extremely terse and their meanings are
+far from clear.
+
+This document describes the ideas underlying the LKMM, but excluding
+the modeling of bare C (or plain) shared memory accesses.  It is meant
+for people who want to understand how the model was designed.  It does
+not go into the details of the code in the .bell and .cat files;
+rather, it explains in English what the code expresses symbolically.
+
+Sections 2 (BACKGROUND) through 5 (ORDERING AND CYCLES) are aimed
+toward beginners; they explain what memory consistency models are and
+the basic notions shared by all such models.  People already familiar
+with these concepts can skim or skip over them.  Sections 6 (EVENTS)
+through 12 (THE FROM_READS RELATION) describe the fundamental
+relations used in many models.  Starting in Section 13 (AN OPERATIONAL
+MODEL), the workings of the LKMM itself are covered.
+
+Warning: The code examples in this document are not written in the
+proper format for litmus tests.  They don't include a header line, the
+initializations are not enclosed in braces, the global variables are
+not passed by pointers, and they don't have an "exists" clause at the
+end.  Converting them to the right format is left as an exercise for
+the reader.
+
+
+BACKGROUND
+----------
+
+A memory consistency model (or just memory model, for short) is
+something which predicts, given a piece of computer code running on a
+particular kind of system, what values may be obtained by the code's
+load instructions.  The LKMM makes these predictions for code running
+as part of the Linux kernel.
+
+In practice, people tend to use memory models the other way around.
+That is, given a piece of code and a collection of values specified
+for the loads, the model will predict whether it is possible for the
+code to run in such a way that the loads will indeed obtain the
+specified values.  Of course, this is just another way of expressing
+the same idea.
+
+For code running on a uniprocessor system, the predictions are easy:
+Each load instruction must obtain the value written by the most recent
+store instruction accessing the same location (we ignore complicating
+factors such as DMA and mixed-size accesses.)  But on multiprocessor
+systems, with multiple CPUs making concurrent accesses to shared
+memory locations, things aren't so simple.
+
+Different architectures have differing memory models, and the Linux
+kernel supports a variety of architectures.  The LKMM has to be fairly
+permissive, in the sense that any behavior allowed by one of these
+architectures also has to be allowed by the LKMM.
+
+
+A SIMPLE EXAMPLE
+----------------
+
+Here is a simple example to illustrate the basic concepts.  Consider
+some code running as part of a device driver for an input device.  The
+driver might contain an interrupt handler which collects data from the
+device, stores it in a buffer, and sets a flag to indicate the buffer
+is full.  Running concurrently on a different CPU might be a part of
+the driver code being executed by a process in the midst of a read(2)
+system call.  This code tests the flag to see whether the buffer is
+ready, and if it is, copies the data back to userspace.  The buffer
+and the flag are memory locations shared between the two CPUs.
+
+We can abstract out the important pieces of the driver code as follows
+(the reason for using WRITE_ONCE() and READ_ONCE() instead of simple
+assignment statements is discussed later):
+
+	int buf = 0, flag = 0;
+
+	P0()
+	{
+		WRITE_ONCE(buf, 1);
+		WRITE_ONCE(flag, 1);
+	}
+
+	P1()
+	{
+		int r1;
+		int r2 = 0;
+
+		r1 = READ_ONCE(flag);
+		if (r1)
+			r2 = READ_ONCE(buf);
+	}
+
+Here the P0() function represents the interrupt handler running on one
+CPU and P1() represents the read() routine running on another.  The
+value 1 stored in buf represents input data collected from the device.
+Thus, P0 stores the data in buf and then sets flag.  Meanwhile, P1
+reads flag into the private variable r1, and if it is set, reads the
+data from buf into a second private variable r2 for copying to
+userspace.  (Presumably if flag is not set then the driver will wait a
+while and try again.)
+
+This pattern of memory accesses, where one CPU stores values to two
+shared memory locations and another CPU loads from those locations in
+the opposite order, is widely known as the "Message Passing" or MP
+pattern.  It is typical of memory access patterns in the kernel.
+
+Please note that this example code is a simplified abstraction.  Real
+buffers are usually larger than a single integer, real device drivers
+usually use sleep and wakeup mechanisms rather than polling for I/O
+completion, and real code generally doesn't bother to copy values into
+private variables before using them.  All that is beside the point;
+the idea here is simply to illustrate the overall pattern of memory
+accesses by the CPUs.
+
+A memory model will predict what values P1 might obtain for its loads
+from flag and buf, or equivalently, what values r1 and r2 might end up
+with after the code has finished running.
+
+Some predictions are trivial.  For instance, no sane memory model would
+predict that r1 = 42 or r2 = -7, because neither of those values ever
+gets stored in flag or buf.
+
+Some nontrivial predictions are nonetheless quite simple.  For
+instance, P1 might run entirely before P0 begins, in which case r1 and
+r2 will both be 0 at the end.  Or P0 might run entirely before P1
+begins, in which case r1 and r2 will both be 1.
+
+The interesting predictions concern what might happen when the two
+routines run concurrently.  One possibility is that P1 runs after P0's
+store to buf but before the store to flag.  In this case, r1 and r2
+will again both be 0.  (If P1 had been designed to read buf
+unconditionally then we would instead have r1 = 0 and r2 = 1.)
+
+However, the most interesting possibility is where r1 = 1 and r2 = 0.
+If this were to occur it would mean the driver contains a bug, because
+incorrect data would get sent to the user: 0 instead of 1.  As it
+happens, the LKMM does predict this outcome can occur, and the example
+driver code shown above is indeed buggy.
+
+
+A SELECTION OF MEMORY MODELS
+----------------------------
+
+The first widely cited memory model, and the simplest to understand,
+is Sequential Consistency.  According to this model, systems behave as
+if each CPU executed its instructions in order but with unspecified
+timing.  In other words, the instructions from the various CPUs get
+interleaved in a nondeterministic way, always according to some single
+global order that agrees with the order of the instructions in the
+program source for each CPU.  The model says that the value obtained
+by each load is simply the value written by the most recently executed
+store to the same memory location, from any CPU.
+
+For the MP example code shown above, Sequential Consistency predicts
+that the undesired result r1 = 1, r2 = 0 cannot occur.  The reasoning
+goes like this:
+
+	Since r1 = 1, P0 must store 1 to flag before P1 loads 1 from
+	it, as loads can obtain values only from earlier stores.
+
+	P1 loads from flag before loading from buf, since CPUs execute
+	their instructions in order.
+
+	P1 must load 0 from buf before P0 stores 1 to it; otherwise r2
+	would be 1 since a load obtains its value from the most recent
+	store to the same address.
+
+	P0 stores 1 to buf before storing 1 to flag, since it executes
+	its instructions in order.
+
+	Since an instruction (in this case, P1's store to flag) cannot
+	execute before itself, the specified outcome is impossible.
+
+However, real computer hardware almost never follows the Sequential
+Consistency memory model; doing so would rule out too many valuable
+performance optimizations.  On ARM and PowerPC architectures, for
+instance, the MP example code really does sometimes yield r1 = 1 and
+r2 = 0.
+
+x86 and SPARC follow yet a different memory model: TSO (Total Store
+Ordering).  This model predicts that the undesired outcome for the MP
+pattern cannot occur, but in other respects it differs from Sequential
+Consistency.  One example is the Store Buffer (SB) pattern, in which
+each CPU stores to its own shared location and then loads from the
+other CPU's location:
+
+	int x = 0, y = 0;
+
+	P0()
+	{
+		int r0;
+
+		WRITE_ONCE(x, 1);
+		r0 = READ_ONCE(y);
+	}
+
+	P1()
+	{
+		int r1;
+
+		WRITE_ONCE(y, 1);
+		r1 = READ_ONCE(x);
+	}
+
+Sequential Consistency predicts that the outcome r0 = 0, r1 = 0 is
+impossible.  (Exercise: Figure out the reasoning.)  But TSO allows
+this outcome to occur, and in fact it does sometimes occur on x86 and
+SPARC systems.
+
+The LKMM was inspired by the memory models followed by PowerPC, ARM,
+x86, Alpha, and other architectures.  However, it is different in
+detail from each of them.
+
+
+ORDERING AND CYCLES
+-------------------
+
+Memory models are all about ordering.  Often this is temporal ordering
+(i.e., the order in which certain events occur) but it doesn't have to
+be; consider for example the order of instructions in a program's
+source code.  We saw above that Sequential Consistency makes an
+important assumption that CPUs execute instructions in the same order
+as those instructions occur in the code, and there are many other
+instances of ordering playing central roles in memory models.
+
+The counterpart to ordering is a cycle.  Ordering rules out cycles:
+It's not possible to have X ordered before Y, Y ordered before Z, and
+Z ordered before X, because this would mean that X is ordered before
+itself.  The analysis of the MP example under Sequential Consistency
+involved just such an impossible cycle:
+
+	W: P0 stores 1 to flag   executes before
+	X: P1 loads 1 from flag  executes before
+	Y: P1 loads 0 from buf   executes before
+	Z: P0 stores 1 to buf    executes before
+	W: P0 stores 1 to flag.
+
+In short, if a memory model requires certain accesses to be ordered,
+and a certain outcome for the loads in a piece of code can happen only
+if those accesses would form a cycle, then the memory model predicts
+that outcome cannot occur.
+
+The LKMM is defined largely in terms of cycles, as we will see.
+
+
+EVENTS
+------
+
+The LKMM does not work directly with the C statements that make up
+kernel source code.  Instead it considers the effects of those
+statements in a more abstract form, namely, events.  The model
+includes three types of events:
+
+	Read events correspond to loads from shared memory, such as
+	calls to READ_ONCE(), smp_load_acquire(), or
+	rcu_dereference().
+
+	Write events correspond to stores to shared memory, such as
+	calls to WRITE_ONCE(), smp_store_release(), or atomic_set().
+
+	Fence events correspond to memory barriers (also known as
+	fences), such as calls to smp_rmb() or rcu_read_lock().
+
+These categories are not exclusive; a read or write event can also be
+a fence.  This happens with functions like smp_load_acquire() or
+spin_lock().  However, no single event can be both a read and a write.
+Atomic read-modify-write accesses, such as atomic_inc() or xchg(),
+correspond to a pair of events: a read followed by a write.  (The
+write event is omitted for executions where it doesn't occur, such as
+a cmpxchg() where the comparison fails.)
+
+Other parts of the code, those which do not involve interaction with
+shared memory, do not give rise to events.  Thus, arithmetic and
+logical computations, control-flow instructions, or accesses to
+private memory or CPU registers are not of central interest to the
+memory model.  They only affect the model's predictions indirectly.
+For example, an arithmetic computation might determine the value that
+gets stored to a shared memory location (or in the case of an array
+index, the address where the value gets stored), but the memory model
+is concerned only with the store itself -- its value and its address
+-- not the computation leading up to it.
+
+Events in the LKMM can be linked by various relations, which we will
+describe in the following sections.  The memory model requires certain
+of these relations to be orderings, that is, it requires them not to
+have any cycles.
+
+
+THE PROGRAM ORDER RELATION: po AND po-loc
+-----------------------------------------
+
+The most important relation between events is program order (po).  You
+can think of it as the order in which statements occur in the source
+code after branches are taken into account and loops have been
+unrolled.  A better description might be the order in which
+instructions are presented to a CPU's execution unit.  Thus, we say
+that X is po-before Y (written as "X ->po Y" in formulas) if X occurs
+before Y in the instruction stream.
+
+This is inherently a single-CPU relation; two instructions executing
+on different CPUs are never linked by po.  Also, it is by definition
+an ordering so it cannot have any cycles.
+
+po-loc is a sub-relation of po.  It links two memory accesses when the
+first comes before the second in program order and they access the
+same memory location (the "-loc" suffix).
+
+Although this may seem straightforward, there is one subtle aspect to
+program order we need to explain.  The LKMM was inspired by low-level
+architectural memory models which describe the behavior of machine
+code, and it retains their outlook to a considerable extent.  The
+read, write, and fence events used by the model are close in spirit to
+individual machine instructions.  Nevertheless, the LKMM describes
+kernel code written in C, and the mapping from C to machine code can
+be extremely complex.
+
+Optimizing compilers have great freedom in the way they translate
+source code to object code.  They are allowed to apply transformations
+that add memory accesses, eliminate accesses, combine them, split them
+into pieces, or move them around.  The use of READ_ONCE(), WRITE_ONCE(),
+or one of the other atomic or synchronization primitives prevents a
+large number of compiler optimizations.  In particular, it is guaranteed
+that the compiler will not remove such accesses from the generated code
+(unless it can prove the accesses will never be executed), it will not
+change the order in which they occur in the code (within limits imposed
+by the C standard), and it will not introduce extraneous accesses.
+
+The MP and SB examples above used READ_ONCE() and WRITE_ONCE() rather
+than ordinary memory accesses.  Thanks to this usage, we can be certain
+that in the MP example, the compiler won't reorder P0's write event to
+buf and P0's write event to flag, and similarly for the other shared
+memory accesses in the examples.
+
+Since private variables are not shared between CPUs, they can be
+accessed normally without READ_ONCE() or WRITE_ONCE().  In fact, they
+need not even be stored in normal memory at all -- in principle a
+private variable could be stored in a CPU register (hence the convention
+that these variables have names starting with the letter 'r').
+
+
+A WARNING
+---------
+
+The protections provided by READ_ONCE(), WRITE_ONCE(), and others are
+not perfect; and under some circumstances it is possible for the
+compiler to undermine the memory model.  Here is an example.  Suppose
+both branches of an "if" statement store the same value to the same
+location:
+
+	r1 = READ_ONCE(x);
+	if (r1) {
+		WRITE_ONCE(y, 2);
+		...  /* do something */
+	} else {
+		WRITE_ONCE(y, 2);
+		...  /* do something else */
+	}
+
+For this code, the LKMM predicts that the load from x will always be
+executed before either of the stores to y.  However, a compiler could
+lift the stores out of the conditional, transforming the code into
+something resembling:
+
+	r1 = READ_ONCE(x);
+	WRITE_ONCE(y, 2);
+	if (r1) {
+		...  /* do something */
+	} else {
+		...  /* do something else */
+	}
+
+Given this version of the code, the LKMM would predict that the load
+from x could be executed after the store to y.  Thus, the memory
+model's original prediction could be invalidated by the compiler.
+
+Another issue arises from the fact that in C, arguments to many
+operators and function calls can be evaluated in any order.  For
+example:
+
+	r1 = f(5) + g(6);
+
+The object code might call f(5) either before or after g(6); the
+memory model cannot assume there is a fixed program order relation
+between them.  (In fact, if the functions are inlined then the
+compiler might even interleave their object code.)
+
+
+DEPENDENCY RELATIONS: data, addr, and ctrl
+------------------------------------------
+
+We say that two events are linked by a dependency relation when the
+execution of the second event depends in some way on a value obtained
+from memory by the first.  The first event must be a read, and the
+value it obtains must somehow affect what the second event does.
+There are three kinds of dependencies: data, address (addr), and
+control (ctrl).
+
+A read and a write event are linked by a data dependency if the value
+obtained by the read affects the value stored by the write.  As a very
+simple example:
+
+	int x, y;
+
+	r1 = READ_ONCE(x);
+	WRITE_ONCE(y, r1 + 5);
+
+The value stored by the WRITE_ONCE obviously depends on the value
+loaded by the READ_ONCE.  Such dependencies can wind through
+arbitrarily complicated computations, and a write can depend on the
+values of multiple reads.
+
+A read event and another memory access event are linked by an address
+dependency if the value obtained by the read affects the location
+accessed by the other event.  The second event can be either a read or
+a write.  Here's another simple example:
+
+	int a[20];
+	int i;
+
+	r1 = READ_ONCE(i);
+	r2 = READ_ONCE(a[r1]);
+
+Here the location accessed by the second READ_ONCE() depends on the
+index value loaded by the first.  Pointer indirection also gives rise
+to address dependencies, since the address of a location accessed
+through a pointer will depend on the value read earlier from that
+pointer.
+
+Finally, a read event and another memory access event are linked by a
+control dependency if the value obtained by the read affects whether
+the second event is executed at all.  Simple example:
+
+	int x, y;
+
+	r1 = READ_ONCE(x);
+	if (r1)
+		WRITE_ONCE(y, 1984);
+
+Execution of the WRITE_ONCE() is controlled by a conditional expression
+which depends on the value obtained by the READ_ONCE(); hence there is
+a control dependency from the load to the store.
+
+It should be pretty obvious that events can only depend on reads that
+come earlier in program order.  Symbolically, if we have R ->data X,
+R ->addr X, or R ->ctrl X (where R is a read event), then we must also
+have R ->po X.  It wouldn't make sense for a computation to depend
+somehow on a value that doesn't get loaded from shared memory until
+later in the code!
+
+
+THE READS-FROM RELATION: rf, rfi, and rfe
+-----------------------------------------
+
+The reads-from relation (rf) links a write event to a read event when
+the value loaded by the read is the value that was stored by the
+write.  In colloquial terms, the load "reads from" the store.  We
+write W ->rf R to indicate that the load R reads from the store W.  We
+further distinguish the cases where the load and the store occur on
+the same CPU (internal reads-from, or rfi) and where they occur on
+different CPUs (external reads-from, or rfe).
+
+For our purposes, a memory location's initial value is treated as
+though it had been written there by an imaginary initial store that
+executes on a separate CPU before the program runs.
+
+Usage of the rf relation implicitly assumes that loads will always
+read from a single store.  It doesn't apply properly in the presence
+of load-tearing, where a load obtains some of its bits from one store
+and some of them from another store.  Fortunately, use of READ_ONCE()
+and WRITE_ONCE() will prevent load-tearing; it's not possible to have:
+
+	int x = 0;
+
+	P0()
+	{
+		WRITE_ONCE(x, 0x1234);
+	}
+
+	P1()
+	{
+		int r1;
+
+		r1 = READ_ONCE(x);
+	}
+
+and end up with r1 = 0x1200 (partly from x's initial value and partly
+from the value stored by P0).
+
+On the other hand, load-tearing is unavoidable when mixed-size
+accesses are used.  Consider this example:
+
+	union {
+		u32	w;
+		u16	h[2];
+	} x;
+
+	P0()
+	{
+		WRITE_ONCE(x.h[0], 0x1234);
+		WRITE_ONCE(x.h[1], 0x5678);
+	}
+
+	P1()
+	{
+		int r1;
+
+		r1 = READ_ONCE(x.w);
+	}
+
+If r1 = 0x56781234 (little-endian!) at the end, then P1 must have read
+from both of P0's stores.  It is possible to handle mixed-size and
+unaligned accesses in a memory model, but the LKMM currently does not
+attempt to do so.  It requires all accesses to be properly aligned and
+of the location's actual size.
+
+
+CACHE COHERENCE AND THE COHERENCE ORDER RELATION: co, coi, and coe
+------------------------------------------------------------------
+
+Cache coherence is a general principle requiring that in a
+multi-processor system, the CPUs must share a consistent view of the
+memory contents.  Specifically, it requires that for each location in
+shared memory, the stores to that location must form a single global
+ordering which all the CPUs agree on (the coherence order), and this
+ordering must be consistent with the program order for accesses to
+that location.
+
+To put it another way, for any variable x, the coherence order (co) of
+the stores to x is simply the order in which the stores overwrite one
+another.  The imaginary store which establishes x's initial value
+comes first in the coherence order; the store which directly
+overwrites the initial value comes second; the store which overwrites
+that value comes third, and so on.
+
+You can think of the coherence order as being the order in which the
+stores reach x's location in memory (or if you prefer a more
+hardware-centric view, the order in which the stores get written to
+x's cache line).  We write W ->co W' if W comes before W' in the
+coherence order, that is, if the value stored by W gets overwritten,
+directly or indirectly, by the value stored by W'.
+
+Coherence order is required to be consistent with program order.  This
+requirement takes the form of four coherency rules:
+
+	Write-write coherence: If W ->po-loc W' (i.e., W comes before
+	W' in program order and they access the same location), where W
+	and W' are two stores, then W ->co W'.
+
+	Write-read coherence: If W ->po-loc R, where W is a store and R
+	is a load, then R must read from W or from some other store
+	which comes after W in the coherence order.
+
+	Read-write coherence: If R ->po-loc W, where R is a load and W
+	is a store, then the store which R reads from must come before
+	W in the coherence order.
+
+	Read-read coherence: If R ->po-loc R', where R and R' are two
+	loads, then either they read from the same store or else the
+	store read by R comes before the store read by R' in the
+	coherence order.
+
+This is sometimes referred to as sequential consistency per variable,
+because it means that the accesses to any single memory location obey
+the rules of the Sequential Consistency memory model.  (According to
+Wikipedia, sequential consistency per variable and cache coherence
+mean the same thing except that cache coherence includes an extra
+requirement that every store eventually becomes visible to every CPU.)
+
+Any reasonable memory model will include cache coherence.  Indeed, our
+expectation of cache coherence is so deeply ingrained that violations
+of its requirements look more like hardware bugs than programming
+errors:
+
+	int x;
+
+	P0()
+	{
+		WRITE_ONCE(x, 17);
+		WRITE_ONCE(x, 23);
+	}
+
+If the final value stored in x after this code ran was 17, you would
+think your computer was broken.  It would be a violation of the
+write-write coherence rule: Since the store of 23 comes later in
+program order, it must also come later in x's coherence order and
+thus must overwrite the store of 17.
+
+	int x = 0;
+
+	P0()
+	{
+		int r1;
+
+		r1 = READ_ONCE(x);
+		WRITE_ONCE(x, 666);
+	}
+
+If r1 = 666 at the end, this would violate the read-write coherence
+rule: The READ_ONCE() load comes before the WRITE_ONCE() store in
+program order, so it must not read from that store but rather from one
+coming earlier in the coherence order (in this case, x's initial
+value).
+
+	int x = 0;
+
+	P0()
+	{
+		WRITE_ONCE(x, 5);
+	}
+
+	P1()
+	{
+		int r1, r2;
+
+		r1 = READ_ONCE(x);
+		r2 = READ_ONCE(x);
+	}
+
+If r1 = 5 (reading from P0's store) and r2 = 0 (reading from the
+imaginary store which establishes x's initial value) at the end, this
+would violate the read-read coherence rule: The r1 load comes before
+the r2 load in program order, so it must not read from a store that
+comes later in the coherence order.
+
+(As a minor curiosity, if this code had used normal loads instead of
+READ_ONCE() in P1, on Itanium it sometimes could end up with r1 = 5
+and r2 = 0!  This results from parallel execution of the operations
+encoded in Itanium's Very-Long-Instruction-Word format, and it is yet
+another motivation for using READ_ONCE() when accessing shared memory
+locations.)
+
+Just like the po relation, co is inherently an ordering -- it is not
+possible for a store to directly or indirectly overwrite itself!  And
+just like with the rf relation, we distinguish between stores that
+occur on the same CPU (internal coherence order, or coi) and stores
+that occur on different CPUs (external coherence order, or coe).
+
+On the other hand, stores to different memory locations are never
+related by co, just as instructions on different CPUs are never
+related by po.  Coherence order is strictly per-location, or if you
+prefer, each location has its own independent coherence order.
+
+
+THE FROM-READS RELATION: fr, fri, and fre
+-----------------------------------------
+
+The from-reads relation (fr) can be a little difficult for people to
+grok.  It describes the situation where a load reads a value that gets
+overwritten by a store.  In other words, we have R ->fr W when the
+value that R reads is overwritten (directly or indirectly) by W, or
+equivalently, when R reads from a store which comes earlier than W in
+the coherence order.
+
+For example:
+
+	int x = 0;
+
+	P0()
+	{
+		int r1;
+
+		r1 = READ_ONCE(x);
+		WRITE_ONCE(x, 2);
+	}
+
+The value loaded from x will be 0 (assuming cache coherence!), and it
+gets overwritten by the value 2.  Thus there is an fr link from the
+READ_ONCE() to the WRITE_ONCE().  If the code contained any later
+stores to x, there would also be fr links from the READ_ONCE() to
+them.
+
+As with rf, rfi, and rfe, we subdivide the fr relation into fri (when
+the load and the store are on the same CPU) and fre (when they are on
+different CPUs).
+
+Note that the fr relation is determined entirely by the rf and co
+relations; it is not independent.  Given a read event R and a write
+event W for the same location, we will have R ->fr W if and only if
+the write which R reads from is co-before W.  In symbols,
+
+	(R ->fr W) := (there exists W' with W' ->rf R and W' ->co W).
+
+
+AN OPERATIONAL MODEL
+--------------------
+
+The LKMM is based on various operational memory models, meaning that
+the models arise from an abstract view of how a computer system
+operates.  Here are the main ideas, as incorporated into the LKMM.
+
+The system as a whole is divided into the CPUs and a memory subsystem.
+The CPUs are responsible for executing instructions (not necessarily
+in program order), and they communicate with the memory subsystem.
+For the most part, executing an instruction requires a CPU to perform
+only internal operations.  However, loads, stores, and fences involve
+more.
+
+When CPU C executes a store instruction, it tells the memory subsystem
+to store a certain value at a certain location.  The memory subsystem
+propagates the store to all the other CPUs as well as to RAM.  (As a
+special case, we say that the store propagates to its own CPU at the
+time it is executed.)  The memory subsystem also determines where the
+store falls in the location's coherence order.  In particular, it must
+arrange for the store to be co-later than (i.e., to overwrite) any
+other store to the same location which has already propagated to CPU C.
+
+When a CPU executes a load instruction R, it first checks to see
+whether there are any as-yet unexecuted store instructions, for the
+same location, that come before R in program order.  If there are, it
+uses the value of the po-latest such store as the value obtained by R,
+and we say that the store's value is forwarded to R.  Otherwise, the
+CPU asks the memory subsystem for the value to load and we say that R
+is satisfied from memory.  The memory subsystem hands back the value
+of the co-latest store to the location in question which has already
+propagated to that CPU.
+
+(In fact, the picture needs to be a little more complicated than this.
+CPUs have local caches, and propagating a store to a CPU really means
+propagating it to the CPU's local cache.  A local cache can take some
+time to process the stores that it receives, and a store can't be used
+to satisfy one of the CPU's loads until it has been processed.  On
+most architectures, the local caches process stores in
+First-In-First-Out order, and consequently the processing delay
+doesn't matter for the memory model.  But on Alpha, the local caches
+have a partitioned design that results in non-FIFO behavior.  We will
+discuss this in more detail later.)
+
+Note that load instructions may be executed speculatively and may be
+restarted under certain circumstances.  The memory model ignores these
+premature executions; we simply say that the load executes at the
+final time it is forwarded or satisfied.
+
+Executing a fence (or memory barrier) instruction doesn't require a
+CPU to do anything special other than informing the memory subsystem
+about the fence.  However, fences do constrain the way CPUs and the
+memory subsystem handle other instructions, in two respects.
+
+First, a fence forces the CPU to execute various instructions in
+program order.  Exactly which instructions are ordered depends on the
+type of fence:
+
+	Strong fences, including smp_mb() and synchronize_rcu(), force
+	the CPU to execute all po-earlier instructions before any
+	po-later instructions;
+
+	smp_rmb() forces the CPU to execute all po-earlier loads
+	before any po-later loads;
+
+	smp_wmb() forces the CPU to execute all po-earlier stores
+	before any po-later stores;
+
+	Acquire fences, such as smp_load_acquire(), force the CPU to
+	execute the load associated with the fence (e.g., the load
+	part of an smp_load_acquire()) before any po-later
+	instructions;
+
+	Release fences, such as smp_store_release(), force the CPU to
+	execute all po-earlier instructions before the store
+	associated with the fence (e.g., the store part of an
+	smp_store_release()).
+
+Second, some types of fence affect the way the memory subsystem
+propagates stores.  When a fence instruction is executed on CPU C:
+
+	For each other CPU C', smp_wmb() forces all po-earlier stores
+	on C to propagate to C' before any po-later stores do.
+
+	For each other CPU C', any store which propagates to C before
+	a release fence is executed (including all po-earlier
+	stores executed on C) is forced to propagate to C' before the
+	store associated with the release fence does.
+
+	Any store which propagates to C before a strong fence is
+	executed (including all po-earlier stores on C) is forced to
+	propagate to all other CPUs before any instructions po-after
+	the strong fence are executed on C.
+
+The propagation ordering enforced by release fences and strong fences
+affects stores from other CPUs that propagate to CPU C before the
+fence is executed, as well as stores that are executed on C before the
+fence.  We describe this property by saying that release fences and
+strong fences are A-cumulative.  By contrast, smp_wmb() fences are not
+A-cumulative; they only affect the propagation of stores that are
+executed on C before the fence (i.e., those which precede the fence in
+program order).
+
+rcu_read_lock(), rcu_read_unlock(), and synchronize_rcu() fences have
+other properties which we discuss later.
+
+
+PROPAGATION ORDER RELATION: cumul-fence
+---------------------------------------
+
+The fences which affect propagation order (i.e., strong, release, and
+smp_wmb() fences) are collectively referred to as cumul-fences, even
+though smp_wmb() isn't A-cumulative.  The cumul-fence relation is
+defined to link memory access events E and F whenever:
+
+	E and F are both stores on the same CPU and an smp_wmb() fence
+	event occurs between them in program order; or
+
+	F is a release fence and some X comes before F in program order,
+	where either X = E or else E ->rf X; or
+
+	A strong fence event occurs between some X and F in program
+	order, where either X = E or else E ->rf X.
+
+The operational model requires that whenever W and W' are both stores
+and W ->cumul-fence W', then W must propagate to any given CPU
+before W' does.  However, for different CPUs C and C', it does not
+require W to propagate to C before W' propagates to C'.
+
+
+DERIVATION OF THE LKMM FROM THE OPERATIONAL MODEL
+-------------------------------------------------
+
+The LKMM is derived from the restrictions imposed by the design
+outlined above.  These restrictions involve the necessity of
+maintaining cache coherence and the fact that a CPU can't operate on a
+value before it knows what that value is, among other things.
+
+The formal version of the LKMM is defined by five requirements, or
+axioms:
+
+	Sequential consistency per variable: This requires that the
+	system obey the four coherency rules.
+
+	Atomicity: This requires that atomic read-modify-write
+	operations really are atomic, that is, no other stores can
+	sneak into the middle of such an update.
+
+	Happens-before: This requires that certain instructions are
+	executed in a specific order.
+
+	Propagation: This requires that certain stores propagate to
+	CPUs and to RAM in a specific order.
+
+	Rcu: This requires that RCU read-side critical sections and
+	grace periods obey the rules of RCU, in particular, the
+	Grace-Period Guarantee.
+
+The first and second are quite common; they can be found in many
+memory models (such as those for C11/C++11).  The "happens-before" and
+"propagation" axioms have analogs in other memory models as well.  The
+"rcu" axiom is specific to the LKMM.
+
+Each of these axioms is discussed below.
+
+
+SEQUENTIAL CONSISTENCY PER VARIABLE
+-----------------------------------
+
+According to the principle of cache coherence, the stores to any fixed
+shared location in memory form a global ordering.  We can imagine
+inserting the loads from that location into this ordering, by placing
+each load between the store that it reads from and the following
+store.  This leaves the relative positions of loads that read from the
+same store unspecified; let's say they are inserted in program order,
+first for CPU 0, then CPU 1, etc.
+
+You can check that the four coherency rules imply that the rf, co, fr,
+and po-loc relations agree with this global ordering; in other words,
+whenever we have X ->rf Y or X ->co Y or X ->fr Y or X ->po-loc Y, the
+X event comes before the Y event in the global ordering.  The LKMM's
+"coherence" axiom expresses this by requiring the union of these
+relations not to have any cycles.  This means it must not be possible
+to find events
+
+	X0 -> X1 -> X2 -> ... -> Xn -> X0,
+
+where each of the links is either rf, co, fr, or po-loc.  This has to
+hold if the accesses to the fixed memory location can be ordered as
+cache coherence demands.
+
+Although it is not obvious, it can be shown that the converse is also
+true: This LKMM axiom implies that the four coherency rules are
+obeyed.
+
+
+ATOMIC UPDATES: rmw
+-------------------
+
+What does it mean to say that a read-modify-write (rmw) update, such
+as atomic_inc(&x), is atomic?  It means that the memory location (x in
+this case) does not get altered between the read and the write events
+making up the atomic operation.  In particular, if two CPUs perform
+atomic_inc(&x) concurrently, it must be guaranteed that the final
+value of x will be the initial value plus two.  We should never have
+the following sequence of events:
+
+	CPU 0 loads x obtaining 13;
+					CPU 1 loads x obtaining 13;
+	CPU 0 stores 14 to x;
+					CPU 1 stores 14 to x;
+
+where the final value of x is wrong (14 rather than 15).
+
+In this example, CPU 0's increment effectively gets lost because it
+occurs in between CPU 1's load and store.  To put it another way, the
+problem is that the position of CPU 0's store in x's coherence order
+is between the store that CPU 1 reads from and the store that CPU 1
+performs.
+
+The same analysis applies to all atomic update operations.  Therefore,
+to enforce atomicity the LKMM requires that atomic updates follow this
+rule: Whenever R and W are the read and write events composing an
+atomic read-modify-write and W' is the write event which R reads from,
+there must not be any stores coming between W' and W in the coherence
+order.  Equivalently,
+
+	(R ->rmw W) implies (there is no X with R ->fr X and X ->co W),
+
+where the rmw relation links the read and write events making up each
+atomic update.  This is what the LKMM's "atomic" axiom says.
+
+
+THE PRESERVED PROGRAM ORDER RELATION: ppo
+-----------------------------------------
+
+There are many situations where a CPU is obligated to execute two
+instructions in program order.  We amalgamate them into the ppo (for
+"preserved program order") relation, which links the po-earlier
+instruction to the po-later instruction and is thus a sub-relation of
+po.
+
+The operational model already includes a description of one such
+situation: Fences are a source of ppo links.  Suppose X and Y are
+memory accesses with X ->po Y; then the CPU must execute X before Y if
+any of the following hold:
+
+	A strong (smp_mb() or synchronize_rcu()) fence occurs between
+	X and Y;
+
+	X and Y are both stores and an smp_wmb() fence occurs between
+	them;
+
+	X and Y are both loads and an smp_rmb() fence occurs between
+	them;
+
+	X is also an acquire fence, such as smp_load_acquire();
+
+	Y is also a release fence, such as smp_store_release().
+
+Another possibility, not mentioned earlier but discussed in the next
+section, is:
+
+	X and Y are both loads, X ->addr Y (i.e., there is an address
+	dependency from X to Y), and X is a READ_ONCE() or an atomic
+	access.
+
+Dependencies can also cause instructions to be executed in program
+order.  This is uncontroversial when the second instruction is a
+store; either a data, address, or control dependency from a load R to
+a store W will force the CPU to execute R before W.  This is very
+simply because the CPU cannot tell the memory subsystem about W's
+store before it knows what value should be stored (in the case of a
+data dependency), what location it should be stored into (in the case
+of an address dependency), or whether the store should actually take
+place (in the case of a control dependency).
+
+Dependencies to load instructions are more problematic.  To begin with,
+there is no such thing as a data dependency to a load.  Next, a CPU
+has no reason to respect a control dependency to a load, because it
+can always satisfy the second load speculatively before the first, and
+then ignore the result if it turns out that the second load shouldn't
+be executed after all.  And lastly, the real difficulties begin when
+we consider address dependencies to loads.
+
+To be fair about it, all Linux-supported architectures do execute
+loads in program order if there is an address dependency between them.
+After all, a CPU cannot ask the memory subsystem to load a value from
+a particular location before it knows what that location is.  However,
+the split-cache design used by Alpha can cause it to behave in a way
+that looks as if the loads were executed out of order (see the next
+section for more details).  The kernel includes a workaround for this
+problem when the loads come from READ_ONCE(), and therefore the LKMM
+includes address dependencies to loads in the ppo relation.
+
+On the other hand, dependencies can indirectly affect the ordering of
+two loads.  This happens when there is a dependency from a load to a
+store and a second, po-later load reads from that store:
+
+	R ->dep W ->rfi R',
+
+where the dep link can be either an address or a data dependency.  In
+this situation we know it is possible for the CPU to execute R' before
+W, because it can forward the value that W will store to R'.  But it
+cannot execute R' before R, because it cannot forward the value before
+it knows what that value is, or that W and R' do access the same
+location.  However, if there is merely a control dependency between R
+and W then the CPU can speculatively forward W to R' before executing
+R; if the speculation turns out to be wrong then the CPU merely has to
+restart or abandon R'.
+
+(In theory, a CPU might forward a store to a load when it runs across
+an address dependency like this:
+
+	r1 = READ_ONCE(ptr);
+	WRITE_ONCE(*r1, 17);
+	r2 = READ_ONCE(*r1);
+
+because it could tell that the store and the second load access the
+same location even before it knows what the location's address is.
+However, none of the architectures supported by the Linux kernel do
+this.)
+
+Two memory accesses of the same location must always be executed in
+program order if the second access is a store.  Thus, if we have
+
+	R ->po-loc W
+
+(the po-loc link says that R comes before W in program order and they
+access the same location), the CPU is obliged to execute W after R.
+If it executed W first then the memory subsystem would respond to R's
+read request with the value stored by W (or an even later store), in
+violation of the read-write coherence rule.  Similarly, if we had
+
+	W ->po-loc W'
+
+and the CPU executed W' before W, then the memory subsystem would put
+W' before W in the coherence order.  It would effectively cause W to
+overwrite W', in violation of the write-write coherence rule.
+(Interestingly, an early ARMv8 memory model, now obsolete, proposed
+allowing out-of-order writes like this to occur.  The model avoided
+violating the write-write coherence rule by requiring the CPU not to
+send the W write to the memory subsystem at all!)
+
+
+AND THEN THERE WAS ALPHA
+------------------------
+
+As mentioned above, the Alpha architecture is unique in that it does
+not appear to respect address dependencies to loads.  This means that
+code such as the following:
+
+	int x = 0;
+	int y = -1;
+	int *ptr = &y;
+
+	P0()
+	{
+		WRITE_ONCE(x, 1);
+		smp_wmb();
+		WRITE_ONCE(ptr, &x);
+	}
+
+	P1()
+	{
+		int *r1;
+		int r2;
+
+		r1 = ptr;
+		r2 = READ_ONCE(*r1);
+	}
+
+can malfunction on Alpha systems (notice that P1 uses an ordinary load
+to read ptr instead of READ_ONCE()).  It is quite possible that r1 = &x
+and r2 = 0 at the end, in spite of the address dependency.
+
+At first glance this doesn't seem to make sense.  We know that the
+smp_wmb() forces P0's store to x to propagate to P1 before the store
+to ptr does.  And since P1 can't execute its second load
+until it knows what location to load from, i.e., after executing its
+first load, the value x = 1 must have propagated to P1 before the
+second load executed.  So why doesn't r2 end up equal to 1?
+
+The answer lies in the Alpha's split local caches.  Although the two
+stores do reach P1's local cache in the proper order, it can happen
+that the first store is processed by a busy part of the cache while
+the second store is processed by an idle part.  As a result, the x = 1
+value may not become available for P1's CPU to read until after the
+ptr = &x value does, leading to the undesirable result above.  The
+final effect is that even though the two loads really are executed in
+program order, it appears that they aren't.
+
+This could not have happened if the local cache had processed the
+incoming stores in FIFO order.  By contrast, other architectures
+maintain at least the appearance of FIFO order.
+
+In practice, this difficulty is solved by inserting a special fence
+between P1's two loads when the kernel is compiled for the Alpha
+architecture.  In fact, as of version 4.15, the kernel automatically
+adds this fence (called smp_read_barrier_depends() and defined as
+nothing at all on non-Alpha builds) after every READ_ONCE() and atomic
+load.  The effect of the fence is to cause the CPU not to execute any
+po-later instructions until after the local cache has finished
+processing all the stores it has already received.  Thus, if the code
+was changed to:
+
+	P1()
+	{
+		int *r1;
+		int r2;
+
+		r1 = READ_ONCE(ptr);
+		r2 = READ_ONCE(*r1);
+	}
+
+then we would never get r1 = &x and r2 = 0.  By the time P1 executed
+its second load, the x = 1 store would already be fully processed by
+the local cache and available for satisfying the read request.  Thus
+we have yet another reason why shared data should always be read with
+READ_ONCE() or another synchronization primitive rather than accessed
+directly.
+
+The LKMM requires that smp_rmb(), acquire fences, and strong fences
+share this property with smp_read_barrier_depends(): They do not allow
+the CPU to execute any po-later instructions (or po-later loads in the
+case of smp_rmb()) until all outstanding stores have been processed by
+the local cache.  In the case of a strong fence, the CPU first has to
+wait for all of its po-earlier stores to propagate to every other CPU
+in the system; then it has to wait for the local cache to process all
+the stores received as of that time -- not just the stores received
+when the strong fence began.
+
+And of course, none of this matters for any architecture other than
+Alpha.
+
+
+THE HAPPENS-BEFORE RELATION: hb
+-------------------------------
+
+The happens-before relation (hb) links memory accesses that have to
+execute in a certain order.  hb includes the ppo relation and two
+others, one of which is rfe.
+
+W ->rfe R implies that W and R are on different CPUs.  It also means
+that W's store must have propagated to R's CPU before R executed;
+otherwise R could not have read the value stored by W.  Therefore W
+must have executed before R, and so we have W ->hb R.
+
+The equivalent fact need not hold if W ->rfi R (i.e., W and R are on
+the same CPU).  As we have already seen, the operational model allows
+W's value to be forwarded to R in such cases, meaning that R may well
+execute before W does.
+
+It's important to understand that neither coe nor fre is included in
+hb, despite their similarities to rfe.  For example, suppose we have
+W ->coe W'.  This means that W and W' are stores to the same location,
+they execute on different CPUs, and W comes before W' in the coherence
+order (i.e., W' overwrites W).  Nevertheless, it is possible for W' to
+execute before W, because the decision as to which store overwrites
+the other is made later by the memory subsystem.  When the stores are
+nearly simultaneous, either one can come out on top.  Similarly,
+R ->fre W means that W overwrites the value which R reads, but it
+doesn't mean that W has to execute after R.  All that's necessary is
+for the memory subsystem not to propagate W to R's CPU until after R
+has executed, which is possible if W executes shortly before R.
+
+The third relation included in hb is like ppo, in that it only links
+events that are on the same CPU.  However it is more difficult to
+explain, because it arises only indirectly from the requirement of
+cache coherence.  The relation is called prop, and it links two events
+on CPU C in situations where a store from some other CPU comes after
+the first event in the coherence order and propagates to C before the
+second event executes.
+
+This is best explained with some examples.  The simplest case looks
+like this:
+
+	int x;
+
+	P0()
+	{
+		int r1;
+
+		WRITE_ONCE(x, 1);
+		r1 = READ_ONCE(x);
+	}
+
+	P1()
+	{
+		WRITE_ONCE(x, 8);
+	}
+
+If r1 = 8 at the end then P0's accesses must have executed in program
+order.  We can deduce this from the operational model; if P0's load
+had executed before its store then the value of the store would have
+been forwarded to the load, so r1 would have ended up equal to 1, not
+8.  In this case there is a prop link from P0's write event to its read
+event, because P1's store came after P0's store in x's coherence
+order, and P1's store propagated to P0 before P0's load executed.
+
+An equally simple case involves two loads of the same location that
+read from different stores:
+
+	int x = 0;
+
+	P0()
+	{
+		int r1, r2;
+
+		r1 = READ_ONCE(x);
+		r2 = READ_ONCE(x);
+	}
+
+	P1()
+	{
+		WRITE_ONCE(x, 9);
+	}
+
+If r1 = 0 and r2 = 9 at the end then P0's accesses must have executed
+in program order.  If the second load had executed before the first
+then the x = 9 store must have been propagated to P0 before the first
+load executed, and so r1 would have been 9 rather than 0.  In this
+case there is a prop link from P0's first read event to its second,
+because P1's store overwrote the value read by P0's first load, and
+P1's store propagated to P0 before P0's second load executed.
+
+Less trivial examples of prop all involve fences.  Unlike the simple
+examples above, they can require that some instructions are executed
+out of program order.  This next one should look familiar:
+
+	int buf = 0, flag = 0;
+
+	P0()
+	{
+		WRITE_ONCE(buf, 1);
+		smp_wmb();
+		WRITE_ONCE(flag, 1);
+	}
+
+	P1()
+	{
+		int r1;
+		int r2;
+
+		r1 = READ_ONCE(flag);
+		r2 = READ_ONCE(buf);
+	}
+
+This is the MP pattern again, with an smp_wmb() fence between the two
+stores.  If r1 = 1 and r2 = 0 at the end then there is a prop link
+from P1's second load to its first (backwards!).  The reason is
+similar to the previous examples: The value P1 loads from buf gets
+overwritten by P0's store to buf, the fence guarantees that the store
+to buf will propagate to P1 before the store to flag does, and the
+store to flag propagates to P1 before P1 reads flag.
+
+The prop link says that in order to obtain the r1 = 1, r2 = 0 result,
+P1 must execute its second load before the first.  Indeed, if the load
+from flag were executed first, then the buf = 1 store would already
+have propagated to P1 by the time P1's load from buf executed, so r2
+would have been 1 at the end, not 0.  (The reasoning holds even for
+Alpha, although the details are more complicated and we will not go
+into them.)
+
+But what if we put an smp_rmb() fence between P1's loads?  The fence
+would force the two loads to be executed in program order, and it
+would generate a cycle in the hb relation: The fence would create a ppo
+link (hence an hb link) from the first load to the second, and the
+prop relation would give an hb link from the second load to the first.
+Since an instruction can't execute before itself, we are forced to
+conclude that if an smp_rmb() fence is added, the r1 = 1, r2 = 0
+outcome is impossible -- as it should be.
+
+The formal definition of the prop relation involves a coe or fre link,
+followed by an arbitrary number of cumul-fence links, ending with an
+rfe link.  You can concoct more exotic examples, containing more than
+one fence, although this quickly leads to diminishing returns in terms
+of complexity.  For instance, here's an example containing a coe link
+followed by two cumul-fences and an rfe link, utilizing the fact that
+release fences are A-cumulative:
+
+	int x, y, z;
+
+	P0()
+	{
+		int r0;
+
+		WRITE_ONCE(x, 1);
+		r0 = READ_ONCE(z);
+	}
+
+	P1()
+	{
+		WRITE_ONCE(x, 2);
+		smp_wmb();
+		WRITE_ONCE(y, 1);
+	}
+
+	P2()
+	{
+		int r2;
+
+		r2 = READ_ONCE(y);
+		smp_store_release(&z, 1);
+	}
+
+If x = 2, r0 = 1, and r2 = 1 after this code runs then there is a prop
+link from P0's store to its load.  This is because P0's store gets
+overwritten by P1's store since x = 2 at the end (a coe link), the
+smp_wmb() ensures that P1's store to x propagates to P2 before the
+store to y does (the first cumul-fence), the store to y propagates to P2
+before P2's load and store execute, P2's smp_store_release()
+guarantees that the stores to x and y both propagate to P0 before the
+store to z does (the second cumul-fence), and P0's load executes after the
+store to z has propagated to P0 (an rfe link).
+
+In summary, the fact that the hb relation links memory access events
+in the order they execute means that it must not have cycles.  This
+requirement is the content of the LKMM's "happens-before" axiom.
+
+The LKMM defines yet another relation connected to times of
+instruction execution, but it is not included in hb.  It relies on the
+particular properties of strong fences, which we cover in the next
+section.
+
+
+THE PROPAGATES-BEFORE RELATION: pb
+----------------------------------
+
+The propagates-before (pb) relation capitalizes on the special
+features of strong fences.  It links two events E and F whenever some
+store is coherence-later than E and propagates to every CPU and to RAM
+before F executes.  The formal definition requires that E be linked to
+F via a coe or fre link, an arbitrary number of cumul-fences, an
+optional rfe link, a strong fence, and an arbitrary number of hb
+links.  Let's see how this definition works out.
+
+Consider first the case where E is a store (implying that the sequence
+of links begins with coe).  Then there are events W, X, Y, and Z such
+that:
+
+	E ->coe W ->cumul-fence* X ->rfe? Y ->strong-fence Z ->hb* F,
+
+where the * suffix indicates an arbitrary number of links of the
+specified type, and the ? suffix indicates the link is optional (Y may
+be equal to X).  Because of the cumul-fence links, we know that W will
+propagate to Y's CPU before X does, hence before Y executes and hence
+before the strong fence executes.  Because this fence is strong, we
+know that W will propagate to every CPU and to RAM before Z executes.
+And because of the hb links, we know that Z will execute before F.
+Thus W, which comes later than E in the coherence order, will
+propagate to every CPU and to RAM before F executes.
+
+The case where E is a load is exactly the same, except that the first
+link in the sequence is fre instead of coe.
+
+The existence of a pb link from E to F implies that E must execute
+before F.  To see why, suppose that F executed first.  Then W would
+have propagated to E's CPU before E executed.  If E was a store, the
+memory subsystem would then be forced to make E come after W in the
+coherence order, contradicting the fact that E ->coe W.  If E was a
+load, the memory subsystem would then be forced to satisfy E's read
+request with the value stored by W or an even later store,
+contradicting the fact that E ->fre W.
+
+A good example illustrating how pb works is the SB pattern with strong
+fences:
+
+	int x = 0, y = 0;
+
+	P0()
+	{
+		int r0;
+
+		WRITE_ONCE(x, 1);
+		smp_mb();
+		r0 = READ_ONCE(y);
+	}
+
+	P1()
+	{
+		int r1;
+
+		WRITE_ONCE(y, 1);
+		smp_mb();
+		r1 = READ_ONCE(x);
+	}
+
+If r0 = 0 at the end then there is a pb link from P0's load to P1's
+load: an fre link from P0's load to P1's store (which overwrites the
+value read by P0), and a strong fence between P1's store and its load.
+In this example, the sequences of cumul-fence and hb links are empty.
+Note that this pb link is not included in hb as an instance of prop,
+because it does not start and end on the same CPU.
+
+Similarly, if r1 = 0 at the end then there is a pb link from P1's load
+to P0's.  This means that if both r1 and r2 were 0 there would be a
+cycle in pb, which is not possible since an instruction cannot execute
+before itself.  Thus, adding smp_mb() fences to the SB pattern
+prevents the r0 = 0, r1 = 0 outcome.
+
+In summary, the fact that the pb relation links events in the order
+they execute means that it cannot have cycles.  This requirement is
+the content of the LKMM's "propagation" axiom.
+
+
+RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-fence, and rb
+-------------------------------------------------------------
+
+RCU (Read-Copy-Update) is a powerful synchronization mechanism.  It
+rests on two concepts: grace periods and read-side critical sections.
+
+A grace period is the span of time occupied by a call to
+synchronize_rcu().  A read-side critical section (or just critical
+section, for short) is a region of code delimited by rcu_read_lock()
+at the start and rcu_read_unlock() at the end.  Critical sections can
+be nested, although we won't make use of this fact.
+
+As far as memory models are concerned, RCU's main feature is its
+Grace-Period Guarantee, which states that a critical section can never
+span a full grace period.  In more detail, the Guarantee says:
+
+	For any critical section C and any grace period G, at least
+	one of the following statements must hold:
+
+(1)	C ends before G does, and in addition, every store that
+	propagates to C's CPU before the end of C must propagate to
+	every CPU before G ends.
+
+(2)	G starts before C does, and in addition, every store that
+	propagates to G's CPU before the start of G must propagate
+	to every CPU before C starts.
+
+In particular, it is not possible for a critical section to both start
+before and end after a grace period.
+
+Here is a simple example of RCU in action:
+
+	int x, y;
+
+	P0()
+	{
+		rcu_read_lock();
+		WRITE_ONCE(x, 1);
+		WRITE_ONCE(y, 1);
+		rcu_read_unlock();
+	}
+
+	P1()
+	{
+		int r1, r2;
+
+		r1 = READ_ONCE(x);
+		synchronize_rcu();
+		r2 = READ_ONCE(y);
+	}
+
+The Grace Period Guarantee tells us that when this code runs, it will
+never end with r1 = 1 and r2 = 0.  The reasoning is as follows.  r1 = 1
+means that P0's store to x propagated to P1 before P1 called
+synchronize_rcu(), so P0's critical section must have started before
+P1's grace period, contrary to part (2) of the Guarantee.  On the
+other hand, r2 = 0 means that P0's store to y, which occurs before the
+end of the critical section, did not propagate to P1 before the end of
+the grace period, contrary to part (1).  Together the results violate
+the Guarantee.
+
+In the kernel's implementations of RCU, the requirements for stores
+to propagate to every CPU are fulfilled by placing strong fences at
+suitable places in the RCU-related code.  Thus, if a critical section
+starts before a grace period does then the critical section's CPU will
+execute an smp_mb() fence after the end of the critical section and
+some time before the grace period's synchronize_rcu() call returns.
+And if a critical section ends after a grace period does then the
+synchronize_rcu() routine will execute an smp_mb() fence at its start
+and some time before the critical section's opening rcu_read_lock()
+executes.
+
+What exactly do we mean by saying that a critical section "starts
+before" or "ends after" a grace period?  Some aspects of the meaning
+are pretty obvious, as in the example above, but the details aren't
+entirely clear.  The LKMM formalizes this notion by means of the
+rcu-link relation.  rcu-link encompasses a very general notion of
+"before": If E and F are RCU fence events (i.e., rcu_read_lock(),
+rcu_read_unlock(), or synchronize_rcu()) then among other things,
+E ->rcu-link F includes cases where E is po-before some memory-access
+event X, F is po-after some memory-access event Y, and we have any of
+X ->rfe Y, X ->co Y, or X ->fr Y.
+
+The formal definition of the rcu-link relation is more than a little
+obscure, and we won't give it here.  It is closely related to the pb
+relation, and the details don't matter unless you want to comb through
+a somewhat lengthy formal proof.  Pretty much all you need to know
+about rcu-link is the information in the preceding paragraph.
+
+The LKMM also defines the rcu-gp and rcu-rscsi relations.  They bring
+grace periods and read-side critical sections into the picture, in the
+following way:
+
+	E ->rcu-gp F means that E and F are in fact the same event,
+	and that event is a synchronize_rcu() fence (i.e., a grace
+	period).
+
+	E ->rcu-rscsi F means that E and F are the rcu_read_unlock()
+	and rcu_read_lock() fence events delimiting some read-side
+	critical section.  (The 'i' at the end of the name emphasizes
+	that this relation is "inverted": It links the end of the
+	critical section to the start.)
+
+If we think of the rcu-link relation as standing for an extended
+"before", then X ->rcu-gp Y ->rcu-link Z roughly says that X is a
+grace period which ends before Z begins.  (In fact it covers more than
+this, because it also includes cases where some store propagates to
+Z's CPU before Z begins but doesn't propagate to some other CPU until
+after X ends.)  Similarly, X ->rcu-rscsi Y ->rcu-link Z says that X is
+the end of a critical section which starts before Z begins.
+
+The LKMM goes on to define the rcu-fence relation as a sequence of
+rcu-gp and rcu-rscsi links separated by rcu-link links, in which the
+number of rcu-gp links is >= the number of rcu-rscsi links.  For
+example:
+
+	X ->rcu-gp Y ->rcu-link Z ->rcu-rscsi T ->rcu-link U ->rcu-gp V
+
+would imply that X ->rcu-fence V, because this sequence contains two
+rcu-gp links and one rcu-rscsi link.  (It also implies that
+X ->rcu-fence T and Z ->rcu-fence V.)  On the other hand:
+
+	X ->rcu-rscsi Y ->rcu-link Z ->rcu-rscsi T ->rcu-link U ->rcu-gp V
+
+does not imply X ->rcu-fence V, because the sequence contains only
+one rcu-gp link but two rcu-rscsi links.
+
+The rcu-fence relation is important because the Grace Period Guarantee
+means that rcu-fence acts kind of like a strong fence.  In particular,
+E ->rcu-fence F implies not only that E begins before F ends, but also
+that any write po-before E will propagate to every CPU before any
+instruction po-after F can execute.  (However, it does not imply that
+E must execute before F; in fact, each synchronize_rcu() fence event
+is linked to itself by rcu-fence as a degenerate case.)
+
+To prove this in full generality requires some intellectual effort.
+We'll consider just a very simple case:
+
+	G ->rcu-gp W ->rcu-link Z ->rcu-rscsi F.
+
+This formula means that G and W are the same event (a grace period),
+and there are events X, Y and a read-side critical section C such that:
+
+	1. G = W is po-before or equal to X;
+
+	2. X comes "before" Y in some sense (including rfe, co and fr);
+
+	2. Y is po-before Z;
+
+	4. Z is the rcu_read_unlock() event marking the end of C;
+
+	5. F is the rcu_read_lock() event marking the start of C.
+
+From 1 - 4 we deduce that the grace period G ends before the critical
+section C.  Then part (2) of the Grace Period Guarantee says not only
+that G starts before C does, but also that any write which executes on
+G's CPU before G starts must propagate to every CPU before C starts.
+In particular, the write propagates to every CPU before F finishes
+executing and hence before any instruction po-after F can execute.
+This sort of reasoning can be extended to handle all the situations
+covered by rcu-fence.
+
+Finally, the LKMM defines the RCU-before (rb) relation in terms of
+rcu-fence.  This is done in essentially the same way as the pb
+relation was defined in terms of strong-fence.  We will omit the
+details; the end result is that E ->rb F implies E must execute
+before F, just as E ->pb F does (and for much the same reasons).
+
+Putting this all together, the LKMM expresses the Grace Period
+Guarantee by requiring that the rb relation does not contain a cycle.
+Equivalently, this "rcu" axiom requires that there are no events E
+and F with E ->rcu-link F ->rcu-fence E.  Or to put it a third way,
+the axiom requires that there are no cycles consisting of rcu-gp and
+rcu-rscsi alternating with rcu-link, where the number of rcu-gp links
+is >= the number of rcu-rscsi links.
+
+Justifying the axiom isn't easy, but it is in fact a valid
+formalization of the Grace Period Guarantee.  We won't attempt to go
+through the detailed argument, but the following analysis gives a
+taste of what is involved.  Suppose both parts of the Guarantee are
+violated: A critical section starts before a grace period, and some
+store propagates to the critical section's CPU before the end of the
+critical section but doesn't propagate to some other CPU until after
+the end of the grace period.
+
+Putting symbols to these ideas, let L and U be the rcu_read_lock() and
+rcu_read_unlock() fence events delimiting the critical section in
+question, and let S be the synchronize_rcu() fence event for the grace
+period.  Saying that the critical section starts before S means there
+are events Q and R where Q is po-after L (which marks the start of the
+critical section), Q is "before" R in the sense used by the rcu-link
+relation, and R is po-before the grace period S.  Thus we have:
+
+	L ->rcu-link S.
+
+Let W be the store mentioned above, let Y come before the end of the
+critical section and witness that W propagates to the critical
+section's CPU by reading from W, and let Z on some arbitrary CPU be a
+witness that W has not propagated to that CPU, where Z happens after
+some event X which is po-after S.  Symbolically, this amounts to:
+
+	S ->po X ->hb* Z ->fr W ->rf Y ->po U.
+
+The fr link from Z to W indicates that W has not propagated to Z's CPU
+at the time that Z executes.  From this, it can be shown (see the
+discussion of the rcu-link relation earlier) that S and U are related
+by rcu-link:
+
+	S ->rcu-link U.
+
+Since S is a grace period we have S ->rcu-gp S, and since L and U are
+the start and end of the critical section C we have U ->rcu-rscsi L.
+From this we obtain:
+
+	S ->rcu-gp S ->rcu-link U ->rcu-rscsi L ->rcu-link S,
+
+a forbidden cycle.  Thus the "rcu" axiom rules out this violation of
+the Grace Period Guarantee.
+
+For something a little more down-to-earth, let's see how the axiom
+works out in practice.  Consider the RCU code example from above, this
+time with statement labels added:
+
+	int x, y;
+
+	P0()
+	{
+		L: rcu_read_lock();
+		X: WRITE_ONCE(x, 1);
+		Y: WRITE_ONCE(y, 1);
+		U: rcu_read_unlock();
+	}
+
+	P1()
+	{
+		int r1, r2;
+
+		Z: r1 = READ_ONCE(x);
+		S: synchronize_rcu();
+		W: r2 = READ_ONCE(y);
+	}
+
+
+If r2 = 0 at the end then P0's store at Y overwrites the value that
+P1's load at W reads from, so we have W ->fre Y.  Since S ->po W and
+also Y ->po U, we get S ->rcu-link U.  In addition, S ->rcu-gp S
+because S is a grace period.
+
+If r1 = 1 at the end then P1's load at Z reads from P0's store at X,
+so we have X ->rfe Z.  Together with L ->po X and Z ->po S, this
+yields L ->rcu-link S.  And since L and U are the start and end of a
+critical section, we have U ->rcu-rscsi L.
+
+Then U ->rcu-rscsi L ->rcu-link S ->rcu-gp S ->rcu-link U is a
+forbidden cycle, violating the "rcu" axiom.  Hence the outcome is not
+allowed by the LKMM, as we would expect.
+
+For contrast, let's see what can happen in a more complicated example:
+
+	int x, y, z;
+
+	P0()
+	{
+		int r0;
+
+		L0: rcu_read_lock();
+		    r0 = READ_ONCE(x);
+		    WRITE_ONCE(y, 1);
+		U0: rcu_read_unlock();
+	}
+
+	P1()
+	{
+		int r1;
+
+		    r1 = READ_ONCE(y);
+		S1: synchronize_rcu();
+		    WRITE_ONCE(z, 1);
+	}
+
+	P2()
+	{
+		int r2;
+
+		L2: rcu_read_lock();
+		    r2 = READ_ONCE(z);
+		    WRITE_ONCE(x, 1);
+		U2: rcu_read_unlock();
+	}
+
+If r0 = r1 = r2 = 1 at the end, then similar reasoning to before shows
+that U0 ->rcu-rscsi L0 ->rcu-link S1 ->rcu-gp S1 ->rcu-link U2 ->rcu-rscsi
+L2 ->rcu-link U0.  However this cycle is not forbidden, because the
+sequence of relations contains fewer instances of rcu-gp (one) than of
+rcu-rscsi (two).  Consequently the outcome is allowed by the LKMM.
+The following instruction timing diagram shows how it might actually
+occur:
+
+P0			P1			P2
+--------------------	--------------------	--------------------
+rcu_read_lock()
+WRITE_ONCE(y, 1)
+			r1 = READ_ONCE(y)
+			synchronize_rcu() starts
+			.			rcu_read_lock()
+			.			WRITE_ONCE(x, 1)
+r0 = READ_ONCE(x)	.
+rcu_read_unlock()	.
+			synchronize_rcu() ends
+			WRITE_ONCE(z, 1)
+						r2 = READ_ONCE(z)
+						rcu_read_unlock()
+
+This requires P0 and P2 to execute their loads and stores out of
+program order, but of course they are allowed to do so.  And as you
+can see, the Grace Period Guarantee is not violated: The critical
+section in P0 both starts before P1's grace period does and ends
+before it does, and the critical section in P2 both starts after P1's
+grace period does and ends after it does.
+
+Addendum: The LKMM now supports SRCU (Sleepable Read-Copy-Update) in
+addition to normal RCU.  The ideas involved are much the same as
+above, with new relations srcu-gp and srcu-rscsi added to represent
+SRCU grace periods and read-side critical sections.  There is a
+restriction on the srcu-gp and srcu-rscsi links that can appear in an
+rcu-fence sequence (the srcu-rscsi links must be paired with srcu-gp
+links having the same SRCU domain with proper nesting); the details
+are relatively unimportant.
+
+
+LOCKING
+-------
+
+The LKMM includes locking.  In fact, there is special code for locking
+in the formal model, added in order to make tools run faster.
+However, this special code is intended to be more or less equivalent
+to concepts we have already covered.  A spinlock_t variable is treated
+the same as an int, and spin_lock(&s) is treated almost the same as:
+
+	while (cmpxchg_acquire(&s, 0, 1) != 0)
+		cpu_relax();
+
+This waits until s is equal to 0 and then atomically sets it to 1,
+and the read part of the cmpxchg operation acts as an acquire fence.
+An alternate way to express the same thing would be:
+
+	r = xchg_acquire(&s, 1);
+
+along with a requirement that at the end, r = 0.  Similarly,
+spin_trylock(&s) is treated almost the same as:
+
+	return !cmpxchg_acquire(&s, 0, 1);
+
+which atomically sets s to 1 if it is currently equal to 0 and returns
+true if it succeeds (the read part of the cmpxchg operation acts as an
+acquire fence only if the operation is successful).  spin_unlock(&s)
+is treated almost the same as:
+
+	smp_store_release(&s, 0);
+
+The "almost" qualifiers above need some explanation.  In the LKMM, the
+store-release in a spin_unlock() and the load-acquire which forms the
+first half of the atomic rmw update in a spin_lock() or a successful
+spin_trylock() -- we can call these things lock-releases and
+lock-acquires -- have two properties beyond those of ordinary releases
+and acquires.
+
+First, when a lock-acquire reads from a lock-release, the LKMM
+requires that every instruction po-before the lock-release must
+execute before any instruction po-after the lock-acquire.  This would
+naturally hold if the release and acquire operations were on different
+CPUs, but the LKMM says it holds even when they are on the same CPU.
+For example:
+
+	int x, y;
+	spinlock_t s;
+
+	P0()
+	{
+		int r1, r2;
+
+		spin_lock(&s);
+		r1 = READ_ONCE(x);
+		spin_unlock(&s);
+		spin_lock(&s);
+		r2 = READ_ONCE(y);
+		spin_unlock(&s);
+	}
+
+	P1()
+	{
+		WRITE_ONCE(y, 1);
+		smp_wmb();
+		WRITE_ONCE(x, 1);
+	}
+
+Here the second spin_lock() reads from the first spin_unlock(), and
+therefore the load of x must execute before the load of y.  Thus we
+cannot have r1 = 1 and r2 = 0 at the end (this is an instance of the
+MP pattern).
+
+This requirement does not apply to ordinary release and acquire
+fences, only to lock-related operations.  For instance, suppose P0()
+in the example had been written as:
+
+	P0()
+	{
+		int r1, r2, r3;
+
+		r1 = READ_ONCE(x);
+		smp_store_release(&s, 1);
+		r3 = smp_load_acquire(&s);
+		r2 = READ_ONCE(y);
+	}
+
+Then the CPU would be allowed to forward the s = 1 value from the
+smp_store_release() to the smp_load_acquire(), executing the
+instructions in the following order:
+
+		r3 = smp_load_acquire(&s);	// Obtains r3 = 1
+		r2 = READ_ONCE(y);
+		r1 = READ_ONCE(x);
+		smp_store_release(&s, 1);	// Value is forwarded
+
+and thus it could load y before x, obtaining r2 = 0 and r1 = 1.
+
+Second, when a lock-acquire reads from a lock-release, and some other
+stores W and W' occur po-before the lock-release and po-after the
+lock-acquire respectively, the LKMM requires that W must propagate to
+each CPU before W' does.  For example, consider:
+
+	int x, y;
+	spinlock_t x;
+
+	P0()
+	{
+		spin_lock(&s);
+		WRITE_ONCE(x, 1);
+		spin_unlock(&s);
+	}
+
+	P1()
+	{
+		int r1;
+
+		spin_lock(&s);
+		r1 = READ_ONCE(x);
+		WRITE_ONCE(y, 1);
+		spin_unlock(&s);
+	}
+
+	P2()
+	{
+		int r2, r3;
+
+		r2 = READ_ONCE(y);
+		smp_rmb();
+		r3 = READ_ONCE(x);
+	}
+
+If r1 = 1 at the end then the spin_lock() in P1 must have read from
+the spin_unlock() in P0.  Hence the store to x must propagate to P2
+before the store to y does, so we cannot have r2 = 1 and r3 = 0.
+
+These two special requirements for lock-release and lock-acquire do
+not arise from the operational model.  Nevertheless, kernel developers
+have come to expect and rely on them because they do hold on all
+architectures supported by the Linux kernel, albeit for various
+differing reasons.
+
+
+ODDS AND ENDS
+-------------
+
+This section covers material that didn't quite fit anywhere in the
+earlier sections.
+
+The descriptions in this document don't always match the formal
+version of the LKMM exactly.  For example, the actual formal
+definition of the prop relation makes the initial coe or fre part
+optional, and it doesn't require the events linked by the relation to
+be on the same CPU.  These differences are very unimportant; indeed,
+instances where the coe/fre part of prop is missing are of no interest
+because all the other parts (fences and rfe) are already included in
+hb anyway, and where the formal model adds prop into hb, it includes
+an explicit requirement that the events being linked are on the same
+CPU.
+
+Another minor difference has to do with events that are both memory
+accesses and fences, such as those corresponding to smp_load_acquire()
+calls.  In the formal model, these events aren't actually both reads
+and fences; rather, they are read events with an annotation marking
+them as acquires.  (Or write events annotated as releases, in the case
+smp_store_release().)  The final effect is the same.
+
+Although we didn't mention it above, the instruction execution
+ordering provided by the smp_rmb() fence doesn't apply to read events
+that are part of a non-value-returning atomic update.  For instance,
+given:
+
+	atomic_inc(&x);
+	smp_rmb();
+	r1 = READ_ONCE(y);
+
+it is not guaranteed that the load from y will execute after the
+update to x.  This is because the ARMv8 architecture allows
+non-value-returning atomic operations effectively to be executed off
+the CPU.  Basically, the CPU tells the memory subsystem to increment
+x, and then the increment is carried out by the memory hardware with
+no further involvement from the CPU.  Since the CPU doesn't ever read
+the value of x, there is nothing for the smp_rmb() fence to act on.
+
+The LKMM defines a few extra synchronization operations in terms of
+things we have already covered.  In particular, rcu_dereference() is
+treated as READ_ONCE() and rcu_assign_pointer() is treated as
+smp_store_release() -- which is basically how the Linux kernel treats
+them.
+
+There are a few oddball fences which need special treatment:
+smp_mb__before_atomic(), smp_mb__after_atomic(), and
+smp_mb__after_spinlock().  The LKMM uses fence events with special
+annotations for them; they act as strong fences just like smp_mb()
+except for the sets of events that they order.  Instead of ordering
+all po-earlier events against all po-later events, as smp_mb() does,
+they behave as follows:
+
+	smp_mb__before_atomic() orders all po-earlier events against
+	po-later atomic updates and the events following them;
+
+	smp_mb__after_atomic() orders po-earlier atomic updates and
+	the events preceding them against all po-later events;
+
+	smp_mb_after_spinlock() orders po-earlier lock acquisition
+	events and the events preceding them against all po-later
+	events.
+
+Interestingly, RCU and locking each introduce the possibility of
+deadlock.  When faced with code sequences such as:
+
+	spin_lock(&s);
+	spin_lock(&s);
+	spin_unlock(&s);
+	spin_unlock(&s);
+
+or:
+
+	rcu_read_lock();
+	synchronize_rcu();
+	rcu_read_unlock();
+
+what does the LKMM have to say?  Answer: It says there are no allowed
+executions at all, which makes sense.  But this can also lead to
+misleading results, because if a piece of code has multiple possible
+executions, some of which deadlock, the model will report only on the
+non-deadlocking executions.  For example:
+
+	int x, y;
+
+	P0()
+	{
+		int r0;
+
+		WRITE_ONCE(x, 1);
+		r0 = READ_ONCE(y);
+	}
+
+	P1()
+	{
+		rcu_read_lock();
+		if (READ_ONCE(x) > 0) {
+			WRITE_ONCE(y, 36);
+			synchronize_rcu();
+		}
+		rcu_read_unlock();
+	}
+
+Is it possible to end up with r0 = 36 at the end?  The LKMM will tell
+you it is not, but the model won't mention that this is because P1
+will self-deadlock in the executions where it stores 36 in y.
diff --git a/marvell/linux/tools/memory-model/Documentation/recipes.txt b/marvell/linux/tools/memory-model/Documentation/recipes.txt
new file mode 100644
index 0000000..7fe8d7a
--- /dev/null
+++ b/marvell/linux/tools/memory-model/Documentation/recipes.txt
@@ -0,0 +1,570 @@
+This document provides "recipes", that is, litmus tests for commonly
+occurring situations, as well as a few that illustrate subtly broken but
+attractive nuisances.  Many of these recipes include example code from
+v4.13 of the Linux kernel.
+
+The first section covers simple special cases, the second section
+takes off the training wheels to cover more involved examples,
+and the third section provides a few rules of thumb.
+
+
+Simple special cases
+====================
+
+This section presents two simple special cases, the first being where
+there is only one CPU or only one memory location is accessed, and the
+second being use of that old concurrency workhorse, locking.
+
+
+Single CPU or single memory location
+------------------------------------
+
+If there is only one CPU on the one hand or only one variable
+on the other, the code will execute in order.  There are (as
+usual) some things to be careful of:
+
+1.	Some aspects of the C language are unordered.  For example,
+	in the expression "f(x) + g(y)", the order in which f and g are
+	called is not defined; the object code is allowed to use either
+	order or even to interleave the computations.
+
+2.	Compilers are permitted to use the "as-if" rule.  That is, a
+	compiler can emit whatever code it likes for normal accesses,
+	as long as the results of a single-threaded execution appear
+	just as if the compiler had followed all the relevant rules.
+	To see this, compile with a high level of optimization and run
+	the debugger on the resulting binary.
+
+3.	If there is only one variable but multiple CPUs, that variable
+	must be properly aligned and all accesses to that variable must
+	be full sized.	Variables that straddle cachelines or pages void
+	your full-ordering warranty, as do undersized accesses that load
+	from or store to only part of the variable.
+
+4.	If there are multiple CPUs, accesses to shared variables should
+	use READ_ONCE() and WRITE_ONCE() or stronger to prevent load/store
+	tearing, load/store fusing, and invented loads and stores.
+	There are exceptions to this rule, including:
+
+	i.	When there is no possibility of a given shared variable
+		being updated by some other CPU, for example, while
+		holding the update-side lock, reads from that variable
+		need not use READ_ONCE().
+
+	ii.	When there is no possibility of a given shared variable
+		being either read or updated by other CPUs, for example,
+		when running during early boot, reads from that variable
+		need not use READ_ONCE() and writes to that variable
+		need not use WRITE_ONCE().
+
+
+Locking
+-------
+
+Locking is well-known and straightforward, at least if you don't think
+about it too hard.  And the basic rule is indeed quite simple: Any CPU that
+has acquired a given lock sees any changes previously seen or made by any
+CPU before it released that same lock.  Note that this statement is a bit
+stronger than "Any CPU holding a given lock sees all changes made by any
+CPU during the time that CPU was holding this same lock".  For example,
+consider the following pair of code fragments:
+
+	/* See MP+polocks.litmus. */
+	void CPU0(void)
+	{
+		WRITE_ONCE(x, 1);
+		spin_lock(&mylock);
+		WRITE_ONCE(y, 1);
+		spin_unlock(&mylock);
+	}
+
+	void CPU1(void)
+	{
+		spin_lock(&mylock);
+		r0 = READ_ONCE(y);
+		spin_unlock(&mylock);
+		r1 = READ_ONCE(x);
+	}
+
+The basic rule guarantees that if CPU0() acquires mylock before CPU1(),
+then both r0 and r1 must be set to the value 1.  This also has the
+consequence that if the final value of r0 is equal to 1, then the final
+value of r1 must also be equal to 1.  In contrast, the weaker rule would
+say nothing about the final value of r1.
+
+The converse to the basic rule also holds, as illustrated by the
+following litmus test:
+
+	/* See MP+porevlocks.litmus. */
+	void CPU0(void)
+	{
+		r0 = READ_ONCE(y);
+		spin_lock(&mylock);
+		r1 = READ_ONCE(x);
+		spin_unlock(&mylock);
+	}
+
+	void CPU1(void)
+	{
+		spin_lock(&mylock);
+		WRITE_ONCE(x, 1);
+		spin_unlock(&mylock);
+		WRITE_ONCE(y, 1);
+	}
+
+This converse to the basic rule guarantees that if CPU0() acquires
+mylock before CPU1(), then both r0 and r1 must be set to the value 0.
+This also has the consequence that if the final value of r1 is equal
+to 0, then the final value of r0 must also be equal to 0.  In contrast,
+the weaker rule would say nothing about the final value of r0.
+
+These examples show only a single pair of CPUs, but the effects of the
+locking basic rule extend across multiple acquisitions of a given lock
+across multiple CPUs.
+
+However, it is not necessarily the case that accesses ordered by
+locking will be seen as ordered by CPUs not holding that lock.
+Consider this example:
+
+	/* See Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus. */
+	void CPU0(void)
+	{
+		spin_lock(&mylock);
+		WRITE_ONCE(x, 1);
+		WRITE_ONCE(y, 1);
+		spin_unlock(&mylock);
+	}
+
+	void CPU1(void)
+	{
+		spin_lock(&mylock);
+		r0 = READ_ONCE(y);
+		WRITE_ONCE(z, 1);
+		spin_unlock(&mylock);
+	}
+
+	void CPU2(void)
+	{
+		WRITE_ONCE(z, 2);
+		smp_mb();
+		r1 = READ_ONCE(x);
+	}
+
+Counter-intuitive though it might be, it is quite possible to have
+the final value of r0 be 1, the final value of z be 2, and the final
+value of r1 be 0.  The reason for this surprising outcome is that
+CPU2() never acquired the lock, and thus did not benefit from the
+lock's ordering properties.
+
+Ordering can be extended to CPUs not holding the lock by careful use
+of smp_mb__after_spinlock():
+
+	/* See Z6.0+pooncelock+poonceLock+pombonce.litmus. */
+	void CPU0(void)
+	{
+		spin_lock(&mylock);
+		WRITE_ONCE(x, 1);
+		WRITE_ONCE(y, 1);
+		spin_unlock(&mylock);
+	}
+
+	void CPU1(void)
+	{
+		spin_lock(&mylock);
+		smp_mb__after_spinlock();
+		r0 = READ_ONCE(y);
+		WRITE_ONCE(z, 1);
+		spin_unlock(&mylock);
+	}
+
+	void CPU2(void)
+	{
+		WRITE_ONCE(z, 2);
+		smp_mb();
+		r1 = READ_ONCE(x);
+	}
+
+This addition of smp_mb__after_spinlock() strengthens the lock acquisition
+sufficiently to rule out the counter-intuitive outcome.
+
+
+Taking off the training wheels
+==============================
+
+This section looks at more complex examples, including message passing,
+load buffering, release-acquire chains, store buffering.
+Many classes of litmus tests have abbreviated names, which may be found
+here: https://www.cl.cam.ac.uk/~pes20/ppc-supplemental/test6.pdf
+
+
+Message passing (MP)
+--------------------
+
+The MP pattern has one CPU execute a pair of stores to a pair of variables
+and another CPU execute a pair of loads from this same pair of variables,
+but in the opposite order.  The goal is to avoid the counter-intuitive
+outcome in which the first load sees the value written by the second store
+but the second load does not see the value written by the first store.
+In the absence of any ordering, this goal may not be met, as can be seen
+in the MP+poonceonces.litmus litmus test.  This section therefore looks at
+a number of ways of meeting this goal.
+
+
+Release and acquire
+~~~~~~~~~~~~~~~~~~~
+
+Use of smp_store_release() and smp_load_acquire() is one way to force
+the desired MP ordering.  The general approach is shown below:
+
+	/* See MP+pooncerelease+poacquireonce.litmus. */
+	void CPU0(void)
+	{
+		WRITE_ONCE(x, 1);
+		smp_store_release(&y, 1);
+	}
+
+	void CPU1(void)
+	{
+		r0 = smp_load_acquire(&y);
+		r1 = READ_ONCE(x);
+	}
+
+The smp_store_release() macro orders any prior accesses against the
+store, while the smp_load_acquire macro orders the load against any
+subsequent accesses.  Therefore, if the final value of r0 is the value 1,
+the final value of r1 must also be the value 1.
+
+The init_stack_slab() function in lib/stackdepot.c uses release-acquire
+in this way to safely initialize of a slab of the stack.  Working out
+the mutual-exclusion design is left as an exercise for the reader.
+
+
+Assign and dereference
+~~~~~~~~~~~~~~~~~~~~~~
+
+Use of rcu_assign_pointer() and rcu_dereference() is quite similar to the
+use of smp_store_release() and smp_load_acquire(), except that both
+rcu_assign_pointer() and rcu_dereference() operate on RCU-protected
+pointers.  The general approach is shown below:
+
+	/* See MP+onceassign+derefonce.litmus. */
+	int z;
+	int *y = &z;
+	int x;
+
+	void CPU0(void)
+	{
+		WRITE_ONCE(x, 1);
+		rcu_assign_pointer(y, &x);
+	}
+
+	void CPU1(void)
+	{
+		rcu_read_lock();
+		r0 = rcu_dereference(y);
+		r1 = READ_ONCE(*r0);
+		rcu_read_unlock();
+	}
+
+In this example, if the final value of r0 is &x then the final value of
+r1 must be 1.
+
+The rcu_assign_pointer() macro has the same ordering properties as does
+smp_store_release(), but the rcu_dereference() macro orders the load only
+against later accesses that depend on the value loaded.  A dependency
+is present if the value loaded determines the address of a later access
+(address dependency, as shown above), the value written by a later store
+(data dependency), or whether or not a later store is executed in the
+first place (control dependency).  Note that the term "data dependency"
+is sometimes casually used to cover both address and data dependencies.
+
+In lib/prime_numbers.c, the expand_to_next_prime() function invokes
+rcu_assign_pointer(), and the next_prime_number() function invokes
+rcu_dereference().  This combination mediates access to a bit vector
+that is expanded as additional primes are needed.
+
+
+Write and read memory barriers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It is usually better to use smp_store_release() instead of smp_wmb()
+and to use smp_load_acquire() instead of smp_rmb().  However, the older
+smp_wmb() and smp_rmb() APIs are still heavily used, so it is important
+to understand their use cases.  The general approach is shown below:
+
+	/* See MP+fencewmbonceonce+fencermbonceonce.litmus. */
+	void CPU0(void)
+	{
+		WRITE_ONCE(x, 1);
+		smp_wmb();
+		WRITE_ONCE(y, 1);
+	}
+
+	void CPU1(void)
+	{
+		r0 = READ_ONCE(y);
+		smp_rmb();
+		r1 = READ_ONCE(x);
+	}
+
+The smp_wmb() macro orders prior stores against later stores, and the
+smp_rmb() macro orders prior loads against later loads.  Therefore, if
+the final value of r0 is 1, the final value of r1 must also be 1.
+
+The xlog_state_switch_iclogs() function in fs/xfs/xfs_log.c contains
+the following write-side code fragment:
+
+	log->l_curr_block -= log->l_logBBsize;
+	ASSERT(log->l_curr_block >= 0);
+	smp_wmb();
+	log->l_curr_cycle++;
+
+And the xlog_valid_lsn() function in fs/xfs/xfs_log_priv.h contains
+the corresponding read-side code fragment:
+
+	cur_cycle = READ_ONCE(log->l_curr_cycle);
+	smp_rmb();
+	cur_block = READ_ONCE(log->l_curr_block);
+
+Alternatively, consider the following comment in function
+perf_output_put_handle() in kernel/events/ring_buffer.c:
+
+	 *   kernel				user
+	 *
+	 *   if (LOAD ->data_tail) {		LOAD ->data_head
+	 *			(A)		smp_rmb()	(C)
+	 *	STORE $data			LOAD $data
+	 *	smp_wmb()	(B)		smp_mb()	(D)
+	 *	STORE ->data_head		STORE ->data_tail
+	 *   }
+
+The B/C pairing is an example of the MP pattern using smp_wmb() on the
+write side and smp_rmb() on the read side.
+
+Of course, given that smp_mb() is strictly stronger than either smp_wmb()
+or smp_rmb(), any code fragment that would work with smp_rmb() and
+smp_wmb() would also work with smp_mb() replacing either or both of the
+weaker barriers.
+
+
+Load buffering (LB)
+-------------------
+
+The LB pattern has one CPU load from one variable and then store to a
+second, while another CPU loads from the second variable and then stores
+to the first.  The goal is to avoid the counter-intuitive situation where
+each load reads the value written by the other CPU's store.  In the
+absence of any ordering it is quite possible that this may happen, as
+can be seen in the LB+poonceonces.litmus litmus test.
+
+One way of avoiding the counter-intuitive outcome is through the use of a
+control dependency paired with a full memory barrier:
+
+	/* See LB+fencembonceonce+ctrlonceonce.litmus. */
+	void CPU0(void)
+	{
+		r0 = READ_ONCE(x);
+		if (r0)
+			WRITE_ONCE(y, 1);
+	}
+
+	void CPU1(void)
+	{
+		r1 = READ_ONCE(y);
+		smp_mb();
+		WRITE_ONCE(x, 1);
+	}
+
+This pairing of a control dependency in CPU0() with a full memory
+barrier in CPU1() prevents r0 and r1 from both ending up equal to 1.
+
+The A/D pairing from the ring-buffer use case shown earlier also
+illustrates LB.  Here is a repeat of the comment in
+perf_output_put_handle() in kernel/events/ring_buffer.c, showing a
+control dependency on the kernel side and a full memory barrier on
+the user side:
+
+	 *   kernel				user
+	 *
+	 *   if (LOAD ->data_tail) {		LOAD ->data_head
+	 *			(A)		smp_rmb()	(C)
+	 *	STORE $data			LOAD $data
+	 *	smp_wmb()	(B)		smp_mb()	(D)
+	 *	STORE ->data_head		STORE ->data_tail
+	 *   }
+	 *
+	 * Where A pairs with D, and B pairs with C.
+
+The kernel's control dependency between the load from ->data_tail
+and the store to data combined with the user's full memory barrier
+between the load from data and the store to ->data_tail prevents
+the counter-intuitive outcome where the kernel overwrites the data
+before the user gets done loading it.
+
+
+Release-acquire chains
+----------------------
+
+Release-acquire chains are a low-overhead, flexible, and easy-to-use
+method of maintaining order.  However, they do have some limitations that
+need to be fully understood.  Here is an example that maintains order:
+
+	/* See ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus. */
+	void CPU0(void)
+	{
+		WRITE_ONCE(x, 1);
+		smp_store_release(&y, 1);
+	}
+
+	void CPU1(void)
+	{
+		r0 = smp_load_acquire(y);
+		smp_store_release(&z, 1);
+	}
+
+	void CPU2(void)
+	{
+		r1 = smp_load_acquire(z);
+		r2 = READ_ONCE(x);
+	}
+
+In this case, if r0 and r1 both have final values of 1, then r2 must
+also have a final value of 1.
+
+The ordering in this example is stronger than it needs to be.  For
+example, ordering would still be preserved if CPU1()'s smp_load_acquire()
+invocation was replaced with READ_ONCE().
+
+It is tempting to assume that CPU0()'s store to x is globally ordered
+before CPU1()'s store to z, but this is not the case:
+
+	/* See Z6.0+pooncerelease+poacquirerelease+mbonceonce.litmus. */
+	void CPU0(void)
+	{
+		WRITE_ONCE(x, 1);
+		smp_store_release(&y, 1);
+	}
+
+	void CPU1(void)
+	{
+		r0 = smp_load_acquire(y);
+		smp_store_release(&z, 1);
+	}
+
+	void CPU2(void)
+	{
+		WRITE_ONCE(z, 2);
+		smp_mb();
+		r1 = READ_ONCE(x);
+	}
+
+One might hope that if the final value of r0 is 1 and the final value
+of z is 2, then the final value of r1 must also be 1, but it really is
+possible for r1 to have the final value of 0.  The reason, of course,
+is that in this version, CPU2() is not part of the release-acquire chain.
+This situation is accounted for in the rules of thumb below.
+
+Despite this limitation, release-acquire chains are low-overhead as
+well as simple and powerful, at least as memory-ordering mechanisms go.
+
+
+Store buffering
+---------------
+
+Store buffering can be thought of as upside-down load buffering, so
+that one CPU first stores to one variable and then loads from a second,
+while another CPU stores to the second variable and then loads from the
+first.  Preserving order requires nothing less than full barriers:
+
+	/* See SB+fencembonceonces.litmus. */
+	void CPU0(void)
+	{
+		WRITE_ONCE(x, 1);
+		smp_mb();
+		r0 = READ_ONCE(y);
+	}
+
+	void CPU1(void)
+	{
+		WRITE_ONCE(y, 1);
+		smp_mb();
+		r1 = READ_ONCE(x);
+	}
+
+Omitting either smp_mb() will allow both r0 and r1 to have final
+values of 0, but providing both full barriers as shown above prevents
+this counter-intuitive outcome.
+
+This pattern most famously appears as part of Dekker's locking
+algorithm, but it has a much more practical use within the Linux kernel
+of ordering wakeups.  The following comment taken from waitqueue_active()
+in include/linux/wait.h shows the canonical pattern:
+
+ *      CPU0 - waker                    CPU1 - waiter
+ *
+ *                                      for (;;) {
+ *      @cond = true;                     prepare_to_wait(&wq_head, &wait, state);
+ *      smp_mb();                         // smp_mb() from set_current_state()
+ *      if (waitqueue_active(wq_head))         if (@cond)
+ *        wake_up(wq_head);                      break;
+ *                                        schedule();
+ *                                      }
+ *                                      finish_wait(&wq_head, &wait);
+
+On CPU0, the store is to @cond and the load is in waitqueue_active().
+On CPU1, prepare_to_wait() contains both a store to wq_head and a call
+to set_current_state(), which contains an smp_mb() barrier; the load is
+"if (@cond)".  The full barriers prevent the undesirable outcome where
+CPU1 puts the waiting task to sleep and CPU0 fails to wake it up.
+
+Note that use of locking can greatly simplify this pattern.
+
+
+Rules of thumb
+==============
+
+There might seem to be no pattern governing what ordering primitives are
+needed in which situations, but this is not the case.  There is a pattern
+based on the relation between the accesses linking successive CPUs in a
+given litmus test.  There are three types of linkage:
+
+1.	Write-to-read, where the next CPU reads the value that the
+	previous CPU wrote.  The LB litmus-test patterns contain only
+	this type of relation.	In formal memory-modeling texts, this
+	relation is called "reads-from" and is usually abbreviated "rf".
+
+2.	Read-to-write, where the next CPU overwrites the value that the
+	previous CPU read.  The SB litmus test contains only this type
+	of relation.  In formal memory-modeling texts, this relation is
+	often called "from-reads" and is sometimes abbreviated "fr".
+
+3.	Write-to-write, where the next CPU overwrites the value written
+	by the previous CPU.  The Z6.0 litmus test pattern contains a
+	write-to-write relation between the last access of CPU1() and
+	the first access of CPU2().  In formal memory-modeling texts,
+	this relation is often called "coherence order" and is sometimes
+	abbreviated "co".  In the C++ standard, it is instead called
+	"modification order" and often abbreviated "mo".
+
+The strength of memory ordering required for a given litmus test to
+avoid a counter-intuitive outcome depends on the types of relations
+linking the memory accesses for the outcome in question:
+
+o	If all links are write-to-read links, then the weakest
+	possible ordering within each CPU suffices.  For example, in
+	the LB litmus test, a control dependency was enough to do the
+	job.
+
+o	If all but one of the links are write-to-read links, then a
+	release-acquire chain suffices.  Both the MP and the ISA2
+	litmus tests illustrate this case.
+
+o	If more than one of the links are something other than
+	write-to-read links, then a full memory barrier is required
+	between each successive pair of non-write-to-read links.  This
+	case is illustrated by the Z6.0 litmus tests, both in the
+	locking and in the release-acquire sections.
+
+However, if you find yourself having to stretch these rules of thumb
+to fit your situation, you should consider creating a litmus test and
+running it on the model.
diff --git a/marvell/linux/tools/memory-model/Documentation/references.txt b/marvell/linux/tools/memory-model/Documentation/references.txt
new file mode 100644
index 0000000..b177f3e
--- /dev/null
+++ b/marvell/linux/tools/memory-model/Documentation/references.txt
@@ -0,0 +1,114 @@
+This document provides background reading for memory models and related
+tools.  These documents are aimed at kernel hackers who are interested
+in memory models.
+
+
+Hardware manuals and models
+===========================
+
+o	SPARC International Inc. (Ed.). 1994. "The SPARC Architecture
+	Reference Manual Version 9". SPARC International Inc.
+
+o	Compaq Computer Corporation (Ed.). 2002. "Alpha Architecture
+	Reference Manual".  Compaq Computer Corporation.
+
+o	Intel Corporation (Ed.). 2002. "A Formal Specification of Intel
+	Itanium Processor Family Memory Ordering". Intel Corporation.
+
+o	Intel Corporation (Ed.). 2002. "Intel 64 and IA-32 Architectures
+	Software Developer’s Manual". Intel Corporation.
+
+o	Peter Sewell, Susmit Sarkar, Scott Owens, Francesco Zappa Nardelli,
+	and Magnus O. Myreen. 2010. "x86-TSO: A Rigorous and Usable
+	Programmer's Model for x86 Multiprocessors". Commun. ACM 53, 7
+	(July, 2010), 89-97. http://doi.acm.org/10.1145/1785414.1785443
+
+o	IBM Corporation (Ed.). 2009. "Power ISA Version 2.06". IBM
+	Corporation.
+
+o	ARM Ltd. (Ed.). 2009. "ARM Barrier Litmus Tests and Cookbook".
+	ARM Ltd.
+
+o	Susmit Sarkar, Peter Sewell, Jade Alglave, Luc Maranget, and
+	Derek Williams.  2011. "Understanding POWER Multiprocessors". In
+	Proceedings of the 32Nd ACM SIGPLAN Conference on Programming
+	Language Design and Implementation (PLDI ’11). ACM, New York,
+	NY, USA, 175–186.
+
+o	Susmit Sarkar, Kayvan Memarian, Scott Owens, Mark Batty,
+	Peter Sewell, Luc Maranget, Jade Alglave, and Derek Williams.
+	2012. "Synchronising C/C++ and POWER". In Proceedings of the 33rd
+	ACM SIGPLAN Conference on Programming Language Design and
+	Implementation (PLDI '12). ACM, New York, NY, USA, 311-322.
+
+o	ARM Ltd. (Ed.). 2014. "ARM Architecture Reference Manual (ARMv8,
+	for ARMv8-A architecture profile)". ARM Ltd.
+
+o	Imagination Technologies, LTD. 2015. "MIPS(R) Architecture
+	For Programmers, Volume II-A: The MIPS64(R) Instruction,
+	Set Reference Manual". Imagination Technologies,
+	LTD. https://imgtec.com/?do-download=4302.
+
+o	Shaked Flur, Kathryn E. Gray, Christopher Pulte, Susmit
+	Sarkar, Ali Sezgin, Luc Maranget, Will Deacon, and Peter
+	Sewell. 2016. "Modelling the ARMv8 Architecture, Operationally:
+	Concurrency and ISA". In Proceedings of the 43rd Annual ACM
+	SIGPLAN-SIGACT Symposium on Principles of Programming Languages
+	(POPL ’16). ACM, New York, NY, USA, 608–621.
+
+o	Shaked Flur, Susmit Sarkar, Christopher Pulte, Kyndylan Nienhuis,
+	Luc Maranget, Kathryn E. Gray, Ali Sezgin, Mark Batty, and Peter
+	Sewell. 2017. "Mixed-size Concurrency: ARM, POWER, C/C++11,
+	and SC". In Proceedings of the 44th ACM SIGPLAN Symposium on
+	Principles of Programming Languages (POPL 2017). ACM, New York,
+	NY, USA, 429–442.
+
+o	Christopher Pulte, Shaked Flur, Will Deacon, Jon French,
+	Susmit Sarkar, and Peter Sewell. 2018. "Simplifying ARM concurrency:
+	multicopy-atomic axiomatic and operational models for ARMv8". In
+	Proceedings of the ACM on Programming Languages, Volume 2, Issue
+	POPL, Article No. 19. ACM, New York, NY, USA.
+
+
+Linux-kernel memory model
+=========================
+
+o	Jade Alglave, Luc Maranget, Paul E. McKenney, Andrea Parri, and
+	Alan Stern.  2018. "Frightening small children and disconcerting
+	grown-ups: Concurrency in the Linux kernel". In Proceedings of
+	the 23rd International Conference on Architectural Support for
+	Programming Languages and Operating Systems (ASPLOS 2018). ACM,
+	New York, NY, USA, 405-418.  Webpage: http://diy.inria.fr/linux/.
+
+o	Jade Alglave, Luc Maranget, Paul E. McKenney, Andrea Parri, and
+	Alan Stern.  2017.  "A formal kernel memory-ordering model (part 1)"
+	Linux Weekly News.  https://lwn.net/Articles/718628/
+
+o	Jade Alglave, Luc Maranget, Paul E. McKenney, Andrea Parri, and
+	Alan Stern.  2017.  "A formal kernel memory-ordering model (part 2)"
+	Linux Weekly News.  https://lwn.net/Articles/720550/
+
+
+Memory-model tooling
+====================
+
+o	Daniel Jackson. 2002. "Alloy: A Lightweight Object Modelling
+	Notation". ACM Trans. Softw. Eng. Methodol. 11, 2 (April 2002),
+	256–290. http://doi.acm.org/10.1145/505145.505149
+
+o	Jade Alglave, Luc Maranget, and Michael Tautschnig. 2014. "Herding
+	Cats: Modelling, Simulation, Testing, and Data Mining for Weak
+	Memory". ACM Trans. Program. Lang. Syst. 36, 2, Article 7 (July
+	2014), 7:1–7:74 pages.
+
+o	Jade Alglave, Patrick Cousot, and Luc Maranget. 2016. "Syntax and
+	semantics of the weak consistency model specification language
+	cat". CoRR abs/1608.07531 (2016). http://arxiv.org/abs/1608.07531
+
+
+Memory-model comparisons
+========================
+
+o	Paul E. McKenney, Ulrich Weigand, Andrea Parri, and Boqun
+	Feng. 2016. "Linux-Kernel Memory Model". (6 June 2016).
+	http://open-std.org/JTC1/SC22/WG21/docs/papers/2016/p0124r2.html.
diff --git a/marvell/linux/tools/memory-model/README b/marvell/linux/tools/memory-model/README
new file mode 100644
index 0000000..fc07b52
--- /dev/null
+++ b/marvell/linux/tools/memory-model/README
@@ -0,0 +1,272 @@
+		=====================================
+		LINUX KERNEL MEMORY CONSISTENCY MODEL
+		=====================================
+
+============
+INTRODUCTION
+============
+
+This directory contains the memory consistency model (memory model, for
+short) of the Linux kernel, written in the "cat" language and executable
+by the externally provided "herd7" simulator, which exhaustively explores
+the state space of small litmus tests.
+
+In addition, the "klitmus7" tool (also externally provided) may be used
+to convert a litmus test to a Linux kernel module, which in turn allows
+that litmus test to be exercised within the Linux kernel.
+
+
+============
+REQUIREMENTS
+============
+
+Version 7.52 or higher of the "herd7" and "klitmus7" tools must be
+downloaded separately:
+
+  https://github.com/herd/herdtools7
+
+See "herdtools7/INSTALL.md" for installation instructions.
+
+Note that although these tools usually provide backwards compatibility,
+this is not absolutely guaranteed.  Therefore, if a later version does
+not work, please try using the exact version called out above.
+
+
+==================
+BASIC USAGE: HERD7
+==================
+
+The memory model is used, in conjunction with "herd7", to exhaustively
+explore the state space of small litmus tests.
+
+For example, to run SB+fencembonceonces.litmus against the memory model:
+
+  $ herd7 -conf linux-kernel.cfg litmus-tests/SB+fencembonceonces.litmus
+
+Here is the corresponding output:
+
+  Test SB+fencembonceonces Allowed
+  States 3
+  0:r0=0; 1:r0=1;
+  0:r0=1; 1:r0=0;
+  0:r0=1; 1:r0=1;
+  No
+  Witnesses
+  Positive: 0 Negative: 3
+  Condition exists (0:r0=0 /\ 1:r0=0)
+  Observation SB+fencembonceonces Never 0 3
+  Time SB+fencembonceonces 0.01
+  Hash=d66d99523e2cac6b06e66f4c995ebb48
+
+The "Positive: 0 Negative: 3" and the "Never 0 3" each indicate that
+this litmus test's "exists" clause can not be satisfied.
+
+See "herd7 -help" or "herdtools7/doc/" for more information.
+
+
+=====================
+BASIC USAGE: KLITMUS7
+=====================
+
+The "klitmus7" tool converts a litmus test into a Linux kernel module,
+which may then be loaded and run.
+
+For example, to run SB+fencembonceonces.litmus against hardware:
+
+  $ mkdir mymodules
+  $ klitmus7 -o mymodules litmus-tests/SB+fencembonceonces.litmus
+  $ cd mymodules ; make
+  $ sudo sh run.sh
+
+The corresponding output includes:
+
+  Test SB+fencembonceonces Allowed
+  Histogram (3 states)
+  644580  :>0:r0=1; 1:r0=0;
+  644328  :>0:r0=0; 1:r0=1;
+  711092  :>0:r0=1; 1:r0=1;
+  No
+  Witnesses
+  Positive: 0, Negative: 2000000
+  Condition exists (0:r0=0 /\ 1:r0=0) is NOT validated
+  Hash=d66d99523e2cac6b06e66f4c995ebb48
+  Observation SB+fencembonceonces Never 0 2000000
+  Time SB+fencembonceonces 0.16
+
+The "Positive: 0 Negative: 2000000" and the "Never 0 2000000" indicate
+that during two million trials, the state specified in this litmus
+test's "exists" clause was not reached.
+
+And, as with "herd7", please see "klitmus7 -help" or "herdtools7/doc/"
+for more information.
+
+
+====================
+DESCRIPTION OF FILES
+====================
+
+Documentation/cheatsheet.txt
+	Quick-reference guide to the Linux-kernel memory model.
+
+Documentation/explanation.txt
+	Describes the memory model in detail.
+
+Documentation/recipes.txt
+	Lists common memory-ordering patterns.
+
+Documentation/references.txt
+	Provides background reading.
+
+linux-kernel.bell
+	Categorizes the relevant instructions, including memory
+	references, memory barriers, atomic read-modify-write operations,
+	lock acquisition/release, and RCU operations.
+
+	More formally, this file (1) lists the subtypes of the various
+	event types used by the memory model and (2) performs RCU
+	read-side critical section nesting analysis.
+
+linux-kernel.cat
+	Specifies what reorderings are forbidden by memory references,
+	memory barriers, atomic read-modify-write operations, and RCU.
+
+	More formally, this file specifies what executions are forbidden
+	by the memory model.  Allowed executions are those which
+	satisfy the model's "coherence", "atomic", "happens-before",
+	"propagation", and "rcu" axioms, which are defined in the file.
+
+linux-kernel.cfg
+	Convenience file that gathers the common-case herd7 command-line
+	arguments.
+
+linux-kernel.def
+	Maps from C-like syntax to herd7's internal litmus-test
+	instruction-set architecture.
+
+litmus-tests
+	Directory containing a few representative litmus tests, which
+	are listed in litmus-tests/README.  A great deal more litmus
+	tests are available at https://github.com/paulmckrcu/litmus.
+
+lock.cat
+	Provides a front-end analysis of lock acquisition and release,
+	for example, associating a lock acquisition with the preceding
+	and following releases and checking for self-deadlock.
+
+	More formally, this file defines a performance-enhanced scheme
+	for generation of the possible reads-from and coherence order
+	relations on the locking primitives.
+
+README
+	This file.
+
+scripts	Various scripts, see scripts/README.
+
+
+===========
+LIMITATIONS
+===========
+
+The Linux-kernel memory model (LKMM) has the following limitations:
+
+1.	Compiler optimizations are not accurately modeled.  Of course,
+	the use of READ_ONCE() and WRITE_ONCE() limits the compiler's
+	ability to optimize, but under some circumstances it is possible
+	for the compiler to undermine the memory model.  For more
+	information, see Documentation/explanation.txt (in particular,
+	the "THE PROGRAM ORDER RELATION: po AND po-loc" and "A WARNING"
+	sections).
+
+	Note that this limitation in turn limits LKMM's ability to
+	accurately model address, control, and data dependencies.
+	For example, if the compiler can deduce the value of some variable
+	carrying a dependency, then the compiler can break that dependency
+	by substituting a constant of that value.
+
+2.	Multiple access sizes for a single variable are not supported,
+	and neither are misaligned or partially overlapping accesses.
+
+3.	Exceptions and interrupts are not modeled.  In some cases,
+	this limitation can be overcome by modeling the interrupt or
+	exception with an additional process.
+
+4.	I/O such as MMIO or DMA is not supported.
+
+5.	Self-modifying code (such as that found in the kernel's
+	alternatives mechanism, function tracer, Berkeley Packet Filter
+	JIT compiler, and module loader) is not supported.
+
+6.	Complete modeling of all variants of atomic read-modify-write
+	operations, locking primitives, and RCU is not provided.
+	For example, call_rcu() and rcu_barrier() are not supported.
+	However, a substantial amount of support is provided for these
+	operations, as shown in the linux-kernel.def file.
+
+	a.	When rcu_assign_pointer() is passed NULL, the Linux
+		kernel provides no ordering, but LKMM models this
+		case as a store release.
+
+	b.	The "unless" RMW operations are not currently modeled:
+		atomic_long_add_unless(), atomic_add_unless(),
+		atomic_inc_unless_negative(), and
+		atomic_dec_unless_positive().  These can be emulated
+		in litmus tests, for example, by using atomic_cmpxchg().
+
+	c.	The call_rcu() function is not modeled.  It can be
+		emulated in litmus tests by adding another process that
+		invokes synchronize_rcu() and the body of the callback
+		function, with (for example) a release-acquire from
+		the site of the emulated call_rcu() to the beginning
+		of the additional process.
+
+	d.	The rcu_barrier() function is not modeled.  It can be
+		emulated in litmus tests emulating call_rcu() via
+		(for example) a release-acquire from the end of each
+		additional call_rcu() process to the site of the
+		emulated rcu-barrier().
+
+	e.	Although sleepable RCU (SRCU) is now modeled, there
+		are some subtle differences between its semantics and
+		those in the Linux kernel.  For example, the kernel
+		might interpret the following sequence as two partially
+		overlapping SRCU read-side critical sections:
+
+			 1  r1 = srcu_read_lock(&my_srcu);
+			 2  do_something_1();
+			 3  r2 = srcu_read_lock(&my_srcu);
+			 4  do_something_2();
+			 5  srcu_read_unlock(&my_srcu, r1);
+			 6  do_something_3();
+			 7  srcu_read_unlock(&my_srcu, r2);
+
+		In contrast, LKMM will interpret this as a nested pair of
+		SRCU read-side critical sections, with the outer critical
+		section spanning lines 1-7 and the inner critical section
+		spanning lines 3-5.
+
+		This difference would be more of a concern had anyone
+		identified a reasonable use case for partially overlapping
+		SRCU read-side critical sections.  For more information,
+		please see: https://paulmck.livejournal.com/40593.html
+
+	f.	Reader-writer locking is not modeled.  It can be
+		emulated in litmus tests using atomic read-modify-write
+		operations.
+
+The "herd7" tool has some additional limitations of its own, apart from
+the memory model:
+
+1.	Non-trivial data structures such as arrays or structures are
+	not supported.	However, pointers are supported, allowing trivial
+	linked lists to be constructed.
+
+2.	Dynamic memory allocation is not supported, although this can
+	be worked around in some cases by supplying multiple statically
+	allocated variables.
+
+Some of these limitations may be overcome in the future, but others are
+more likely to be addressed by incorporating the Linux-kernel memory model
+into other tools.
+
+Finally, please note that LKMM is subject to change as hardware, use cases,
+and compilers evolve.
diff --git a/marvell/linux/tools/memory-model/linux-kernel.bell b/marvell/linux/tools/memory-model/linux-kernel.bell
new file mode 100644
index 0000000..5be86b1
--- /dev/null
+++ b/marvell/linux/tools/memory-model/linux-kernel.bell
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0+
+(*
+ * Copyright (C) 2015 Jade Alglave <j.alglave@ucl.ac.uk>,
+ * Copyright (C) 2016 Luc Maranget <luc.maranget@inria.fr> for Inria
+ * Copyright (C) 2017 Alan Stern <stern@rowland.harvard.edu>,
+ *                    Andrea Parri <parri.andrea@gmail.com>
+ *
+ * An earlier version of this file appeared in the companion webpage for
+ * "Frightening small children and disconcerting grown-ups: Concurrency
+ * in the Linux kernel" by Alglave, Maranget, McKenney, Parri, and Stern,
+ * which appeared in ASPLOS 2018.
+ *)
+
+"Linux-kernel memory consistency model"
+
+enum Accesses = 'once (*READ_ONCE,WRITE_ONCE*) ||
+		'release (*smp_store_release*) ||
+		'acquire (*smp_load_acquire*) ||
+		'noreturn (* R of non-return RMW *)
+instructions R[{'once,'acquire,'noreturn}]
+instructions W[{'once,'release}]
+instructions RMW[{'once,'acquire,'release}]
+
+enum Barriers = 'wmb (*smp_wmb*) ||
+		'rmb (*smp_rmb*) ||
+		'mb (*smp_mb*) ||
+		'barrier (*barrier*) ||
+		'rcu-lock (*rcu_read_lock*)  ||
+		'rcu-unlock (*rcu_read_unlock*) ||
+		'sync-rcu (*synchronize_rcu*) ||
+		'before-atomic (*smp_mb__before_atomic*) ||
+		'after-atomic (*smp_mb__after_atomic*) ||
+		'after-spinlock (*smp_mb__after_spinlock*) ||
+		'after-unlock-lock (*smp_mb__after_unlock_lock*)
+instructions F[Barriers]
+
+(* SRCU *)
+enum SRCU = 'srcu-lock || 'srcu-unlock || 'sync-srcu
+instructions SRCU[SRCU]
+(* All srcu events *)
+let Srcu = Srcu-lock | Srcu-unlock | Sync-srcu
+
+(* Compute matching pairs of nested Rcu-lock and Rcu-unlock *)
+let rcu-rscs = let rec
+	    unmatched-locks = Rcu-lock \ domain(matched)
+	and unmatched-unlocks = Rcu-unlock \ range(matched)
+	and unmatched = unmatched-locks | unmatched-unlocks
+	and unmatched-po = [unmatched] ; po ; [unmatched]
+	and unmatched-locks-to-unlocks =
+		[unmatched-locks] ; po ; [unmatched-unlocks]
+	and matched = matched | (unmatched-locks-to-unlocks \
+		(unmatched-po ; unmatched-po))
+	in matched
+
+(* Validate nesting *)
+flag ~empty Rcu-lock \ domain(rcu-rscs) as unbalanced-rcu-locking
+flag ~empty Rcu-unlock \ range(rcu-rscs) as unbalanced-rcu-locking
+
+(* Compute matching pairs of nested Srcu-lock and Srcu-unlock *)
+let srcu-rscs = let rec
+	    unmatched-locks = Srcu-lock \ domain(matched)
+	and unmatched-unlocks = Srcu-unlock \ range(matched)
+	and unmatched = unmatched-locks | unmatched-unlocks
+	and unmatched-po = ([unmatched] ; po ; [unmatched]) & loc
+	and unmatched-locks-to-unlocks =
+		([unmatched-locks] ; po ; [unmatched-unlocks]) & loc
+	and matched = matched | (unmatched-locks-to-unlocks \
+		(unmatched-po ; unmatched-po))
+	in matched
+
+(* Validate nesting *)
+flag ~empty Srcu-lock \ domain(srcu-rscs) as unbalanced-srcu-locking
+flag ~empty Srcu-unlock \ range(srcu-rscs) as unbalanced-srcu-locking
+
+(* Check for use of synchronize_srcu() inside an RCU critical section *)
+flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep
+
+(* Validate SRCU dynamic match *)
+flag ~empty different-values(srcu-rscs) as srcu-bad-nesting
+
+(* Compute marked and plain memory accesses *)
+let Marked = (~M) | IW | Once | Release | Acquire | domain(rmw) | range(rmw) |
+		LKR | LKW | UL | LF | RL | RU
+let Plain = M \ Marked
diff --git a/marvell/linux/tools/memory-model/linux-kernel.cat b/marvell/linux/tools/memory-model/linux-kernel.cat
new file mode 100644
index 0000000..2a9b4fe
--- /dev/null
+++ b/marvell/linux/tools/memory-model/linux-kernel.cat
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0+
+(*
+ * Copyright (C) 2015 Jade Alglave <j.alglave@ucl.ac.uk>,
+ * Copyright (C) 2016 Luc Maranget <luc.maranget@inria.fr> for Inria
+ * Copyright (C) 2017 Alan Stern <stern@rowland.harvard.edu>,
+ *                    Andrea Parri <parri.andrea@gmail.com>
+ *
+ * An earlier version of this file appeared in the companion webpage for
+ * "Frightening small children and disconcerting grown-ups: Concurrency
+ * in the Linux kernel" by Alglave, Maranget, McKenney, Parri, and Stern,
+ * which appeared in ASPLOS 2018.
+ *)
+
+"Linux-kernel memory consistency model"
+
+(*
+ * File "lock.cat" handles locks and is experimental.
+ * It can be replaced by include "cos.cat" for tests that do not use locks.
+ *)
+
+include "lock.cat"
+
+(*******************)
+(* Basic relations *)
+(*******************)
+
+(* Release Acquire *)
+let acq-po = [Acquire] ; po ; [M]
+let po-rel = [M] ; po ; [Release]
+let po-unlock-rf-lock-po = po ; [UL] ; rf ; [LKR] ; po
+
+(* Fences *)
+let R4rmb = R \ Noreturn	(* Reads for which rmb works *)
+let rmb = [R4rmb] ; fencerel(Rmb) ; [R4rmb]
+let wmb = [W] ; fencerel(Wmb) ; [W]
+let mb = ([M] ; fencerel(Mb) ; [M]) |
+	([M] ; fencerel(Before-atomic) ; [RMW] ; po? ; [M]) |
+	([M] ; po? ; [RMW] ; fencerel(After-atomic) ; [M]) |
+	([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M]) |
+	([M] ; po ; [UL] ; (co | po) ; [LKW] ;
+		fencerel(After-unlock-lock) ; [M])
+let gp = po ; [Sync-rcu | Sync-srcu] ; po?
+let strong-fence = mb | gp
+
+let nonrw-fence = strong-fence | po-rel | acq-po
+let fence = nonrw-fence | wmb | rmb
+let barrier = fencerel(Barrier | Rmb | Wmb | Mb | Sync-rcu | Sync-srcu |
+		Before-atomic | After-atomic | Acquire | Release |
+		Rcu-lock | Rcu-unlock | Srcu-lock | Srcu-unlock) |
+	(po ; [Release]) | ([Acquire] ; po)
+
+(**********************************)
+(* Fundamental coherence ordering *)
+(**********************************)
+
+(* Sequential Consistency Per Variable *)
+let com = rf | co | fr
+acyclic po-loc | com as coherence
+
+(* Atomic Read-Modify-Write *)
+empty rmw & (fre ; coe) as atomic
+
+(**********************************)
+(* Instruction execution ordering *)
+(**********************************)
+
+(* Preserved Program Order *)
+let dep = addr | data
+let rwdep = (dep | ctrl) ; [W]
+let overwrite = co | fr
+let to-w = rwdep | (overwrite & int) | (addr ; [Plain] ; wmb)
+let to-r = addr | (dep ; [Marked] ; rfi)
+let ppo = to-r | to-w | fence | (po-unlock-rf-lock-po & int)
+
+(* Propagation: Ordering from release operations and strong fences. *)
+let A-cumul(r) = (rfe ; [Marked])? ; r
+let cumul-fence = [Marked] ; (A-cumul(strong-fence | po-rel) | wmb |
+	po-unlock-rf-lock-po) ; [Marked]
+let prop = [Marked] ; (overwrite & ext)? ; cumul-fence* ;
+	[Marked] ; rfe? ; [Marked]
+
+(*
+ * Happens Before: Ordering from the passage of time.
+ * No fences needed here for prop because relation confined to one process.
+ *)
+let hb = [Marked] ; (ppo | rfe | ((prop \ id) & int)) ; [Marked]
+acyclic hb as happens-before
+
+(****************************************)
+(* Write and fence propagation ordering *)
+(****************************************)
+
+(* Propagation: Each non-rf link needs a strong fence. *)
+let pb = prop ; strong-fence ; hb* ; [Marked]
+acyclic pb as propagation
+
+(*******)
+(* RCU *)
+(*******)
+
+(*
+ * Effects of read-side critical sections proceed from the rcu_read_unlock()
+ * or srcu_read_unlock() backwards on the one hand, and from the
+ * rcu_read_lock() or srcu_read_lock() forwards on the other hand.
+ *
+ * In the definition of rcu-fence below, the po term at the left-hand side
+ * of each disjunct and the po? term at the right-hand end have been factored
+ * out.  They have been moved into the definitions of rcu-link and rb.
+ * This was necessary in order to apply the "& loc" tests correctly.
+ *)
+let rcu-gp = [Sync-rcu]		(* Compare with gp *)
+let srcu-gp = [Sync-srcu]
+let rcu-rscsi = rcu-rscs^-1
+let srcu-rscsi = srcu-rscs^-1
+
+(*
+ * The synchronize_rcu() strong fence is special in that it can order not
+ * one but two non-rf relations, but only in conjunction with an RCU
+ * read-side critical section.
+ *)
+let rcu-link = po? ; hb* ; pb* ; prop ; po
+
+(*
+ * Any sequence containing at least as many grace periods as RCU read-side
+ * critical sections (joined by rcu-link) induces order like a generalized
+ * inter-CPU strong fence.
+ * Likewise for SRCU grace periods and read-side critical sections, provided
+ * the synchronize_srcu() and srcu_read_[un]lock() calls refer to the same
+ * struct srcu_struct location.
+ *)
+let rec rcu-order = rcu-gp | srcu-gp |
+	(rcu-gp ; rcu-link ; rcu-rscsi) |
+	((srcu-gp ; rcu-link ; srcu-rscsi) & loc) |
+	(rcu-rscsi ; rcu-link ; rcu-gp) |
+	((srcu-rscsi ; rcu-link ; srcu-gp) & loc) |
+	(rcu-gp ; rcu-link ; rcu-order ; rcu-link ; rcu-rscsi) |
+	((srcu-gp ; rcu-link ; rcu-order ; rcu-link ; srcu-rscsi) & loc) |
+	(rcu-rscsi ; rcu-link ; rcu-order ; rcu-link ; rcu-gp) |
+	((srcu-rscsi ; rcu-link ; rcu-order ; rcu-link ; srcu-gp) & loc) |
+	(rcu-order ; rcu-link ; rcu-order)
+let rcu-fence = po ; rcu-order ; po?
+let fence = fence | rcu-fence
+let strong-fence = strong-fence | rcu-fence
+
+(* rb orders instructions just as pb does *)
+let rb = prop ; rcu-fence ; hb* ; pb* ; [Marked]
+
+irreflexive rb as rcu
+
+(*
+ * The happens-before, propagation, and rcu constraints are all
+ * expressions of temporal ordering.  They could be replaced by
+ * a single constraint on an "executes-before" relation, xb:
+ *
+ * let xb = hb | pb | rb
+ * acyclic xb as executes-before
+ *)
+
+(*********************************)
+(* Plain accesses and data races *)
+(*********************************)
+
+(* Warn about plain writes and marked accesses in the same region *)
+let mixed-accesses = ([Plain & W] ; (po-loc \ barrier) ; [Marked]) |
+	([Marked] ; (po-loc \ barrier) ; [Plain & W])
+flag ~empty mixed-accesses as mixed-accesses
+
+(* Executes-before and visibility *)
+let xbstar = (hb | pb | rb)*
+let vis = cumul-fence* ; rfe? ; [Marked] ;
+	((strong-fence ; [Marked] ; xbstar) | (xbstar & int))
+
+(* Boundaries for lifetimes of plain accesses *)
+let w-pre-bounded = [Marked] ; (addr | fence)?
+let r-pre-bounded = [Marked] ; (addr | nonrw-fence |
+	([R4rmb] ; fencerel(Rmb) ; [~Noreturn]))?
+let w-post-bounded = fence? ; [Marked]
+let r-post-bounded = (nonrw-fence | ([~Noreturn] ; fencerel(Rmb) ; [R4rmb]))? ;
+	[Marked]
+
+(* Visibility and executes-before for plain accesses *)
+let ww-vis = fence | (strong-fence ; xbstar ; w-pre-bounded) |
+	(w-post-bounded ; vis ; w-pre-bounded)
+let wr-vis = fence | (strong-fence ; xbstar ; r-pre-bounded) |
+	(w-post-bounded ; vis ; r-pre-bounded)
+let rw-xbstar = fence | (r-post-bounded ; xbstar ; w-pre-bounded)
+
+(* Potential races *)
+let pre-race = ext & ((Plain * M) | ((M \ IW) * Plain))
+
+(* Coherence requirements for plain accesses *)
+let wr-incoh = pre-race & rf & rw-xbstar^-1
+let rw-incoh = pre-race & fr & wr-vis^-1
+let ww-incoh = pre-race & co & ww-vis^-1
+empty (wr-incoh | rw-incoh | ww-incoh) as plain-coherence
+
+(* Actual races *)
+let ww-nonrace = ww-vis & ((Marked * W) | rw-xbstar) & ((W * Marked) | wr-vis)
+let ww-race = (pre-race & co) \ ww-nonrace
+let wr-race = (pre-race & (co? ; rf)) \ wr-vis \ rw-xbstar^-1
+let rw-race = (pre-race & fr) \ rw-xbstar
+
+flag ~empty (ww-race | wr-race | rw-race) as data-race
diff --git a/marvell/linux/tools/memory-model/linux-kernel.cfg b/marvell/linux/tools/memory-model/linux-kernel.cfg
new file mode 100644
index 0000000..3c8098e
--- /dev/null
+++ b/marvell/linux/tools/memory-model/linux-kernel.cfg
@@ -0,0 +1,21 @@
+macros linux-kernel.def
+bell linux-kernel.bell
+model linux-kernel.cat
+graph columns
+squished true
+showevents noregs
+movelabel true
+fontsize 8
+xscale 2.0
+yscale 1.5
+arrowsize 0.8
+showinitrf false
+showfinalrf false
+showinitwrites false
+splines spline
+pad 0.1
+edgeattr hb,color,indigo
+edgeattr co,color,blue
+edgeattr mb,color,darkgreen
+edgeattr wmb,color,darkgreen
+edgeattr rmb,color,darkgreen
diff --git a/marvell/linux/tools/memory-model/linux-kernel.def b/marvell/linux/tools/memory-model/linux-kernel.def
new file mode 100644
index 0000000..ef0f3c1
--- /dev/null
+++ b/marvell/linux/tools/memory-model/linux-kernel.def
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// An earlier version of this file appeared in the companion webpage for
+// "Frightening small children and disconcerting grown-ups: Concurrency
+// in the Linux kernel" by Alglave, Maranget, McKenney, Parri, and Stern,
+// which appeared in ASPLOS 2018.
+
+// ONCE
+READ_ONCE(X) __load{once}(X)
+WRITE_ONCE(X,V) { __store{once}(X,V); }
+
+// Release Acquire and friends
+smp_store_release(X,V) { __store{release}(*X,V); }
+smp_load_acquire(X) __load{acquire}(*X)
+rcu_assign_pointer(X,V) { __store{release}(X,V); }
+rcu_dereference(X) __load{once}(X)
+smp_store_mb(X,V) { __store{once}(X,V); __fence{mb}; }
+
+// Fences
+smp_mb() { __fence{mb}; }
+smp_rmb() { __fence{rmb}; }
+smp_wmb() { __fence{wmb}; }
+smp_mb__before_atomic() { __fence{before-atomic}; }
+smp_mb__after_atomic() { __fence{after-atomic}; }
+smp_mb__after_spinlock() { __fence{after-spinlock}; }
+smp_mb__after_unlock_lock() { __fence{after-unlock-lock}; }
+barrier() { __fence{barrier}; }
+
+// Exchange
+xchg(X,V)  __xchg{mb}(X,V)
+xchg_relaxed(X,V) __xchg{once}(X,V)
+xchg_release(X,V) __xchg{release}(X,V)
+xchg_acquire(X,V) __xchg{acquire}(X,V)
+cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
+cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
+cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
+cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
+
+// Spinlocks
+spin_lock(X) { __lock(X); }
+spin_unlock(X) { __unlock(X); }
+spin_trylock(X) __trylock(X)
+spin_is_locked(X) __islocked(X)
+
+// RCU
+rcu_read_lock() { __fence{rcu-lock}; }
+rcu_read_unlock() { __fence{rcu-unlock}; }
+synchronize_rcu() { __fence{sync-rcu}; }
+synchronize_rcu_expedited() { __fence{sync-rcu}; }
+
+// SRCU
+srcu_read_lock(X)  __srcu{srcu-lock}(X)
+srcu_read_unlock(X,Y) { __srcu{srcu-unlock}(X,Y); }
+synchronize_srcu(X)  { __srcu{sync-srcu}(X); }
+synchronize_srcu_expedited(X)  { __srcu{sync-srcu}(X); }
+
+// Atomic
+atomic_read(X) READ_ONCE(*X)
+atomic_set(X,V) { WRITE_ONCE(*X,V); }
+atomic_read_acquire(X) smp_load_acquire(X)
+atomic_set_release(X,V) { smp_store_release(X,V); }
+
+atomic_add(V,X) { __atomic_op(X,+,V); }
+atomic_sub(V,X) { __atomic_op(X,-,V); }
+atomic_inc(X)   { __atomic_op(X,+,1); }
+atomic_dec(X)   { __atomic_op(X,-,1); }
+
+atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V)
+atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V)
+atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V)
+atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V)
+atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V)
+atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V)
+atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V)
+atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V)
+
+atomic_inc_return(X) __atomic_op_return{mb}(X,+,1)
+atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1)
+atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1)
+atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1)
+atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1)
+atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1)
+atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1)
+atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1)
+
+atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V)
+atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V)
+atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V)
+atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V)
+atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V)
+atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V)
+atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V)
+atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V)
+
+atomic_dec_return(X) __atomic_op_return{mb}(X,-,1)
+atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1)
+atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1)
+atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1)
+atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1)
+atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1)
+atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1)
+atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1)
+
+atomic_xchg(X,V) __xchg{mb}(X,V)
+atomic_xchg_relaxed(X,V) __xchg{once}(X,V)
+atomic_xchg_release(X,V) __xchg{release}(X,V)
+atomic_xchg_acquire(X,V) __xchg{acquire}(X,V)
+atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
+atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
+atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
+atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
+
+atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) == 0
+atomic_dec_and_test(X)  __atomic_op_return{mb}(X,-,1) == 0
+atomic_inc_and_test(X)  __atomic_op_return{mb}(X,+,1) == 0
+atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0
diff --git a/marvell/linux/tools/memory-model/litmus-tests/.gitignore b/marvell/linux/tools/memory-model/litmus-tests/.gitignore
new file mode 100644
index 0000000..6e2ddc5
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/.gitignore
@@ -0,0 +1 @@
+*.litmus.out
diff --git a/marvell/linux/tools/memory-model/litmus-tests/CoRR+poonceonce+Once.litmus b/marvell/linux/tools/memory-model/litmus-tests/CoRR+poonceonce+Once.litmus
new file mode 100644
index 0000000..967f9f2
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/CoRR+poonceonce+Once.litmus
@@ -0,0 +1,26 @@
+C CoRR+poonceonce+Once
+
+(*
+ * Result: Never
+ *
+ * Test of read-read coherence, that is, whether or not two successive
+ * reads from the same variable are ordered.
+ *)
+
+{}
+
+P0(int *x)
+{
+	WRITE_ONCE(*x, 1);
+}
+
+P1(int *x)
+{
+	int r0;
+	int r1;
+
+	r0 = READ_ONCE(*x);
+	r1 = READ_ONCE(*x);
+}
+
+exists (1:r0=1 /\ 1:r1=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/CoRW+poonceonce+Once.litmus b/marvell/linux/tools/memory-model/litmus-tests/CoRW+poonceonce+Once.litmus
new file mode 100644
index 0000000..4635739
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/CoRW+poonceonce+Once.litmus
@@ -0,0 +1,25 @@
+C CoRW+poonceonce+Once
+
+(*
+ * Result: Never
+ *
+ * Test of read-write coherence, that is, whether or not a read from
+ * a given variable and a later write to that same variable are ordered.
+ *)
+
+{}
+
+P0(int *x)
+{
+	int r0;
+
+	r0 = READ_ONCE(*x);
+	WRITE_ONCE(*x, 1);
+}
+
+P1(int *x)
+{
+	WRITE_ONCE(*x, 2);
+}
+
+exists (x=2 /\ 0:r0=2)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/CoWR+poonceonce+Once.litmus b/marvell/linux/tools/memory-model/litmus-tests/CoWR+poonceonce+Once.litmus
new file mode 100644
index 0000000..bb068c9
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/CoWR+poonceonce+Once.litmus
@@ -0,0 +1,25 @@
+C CoWR+poonceonce+Once
+
+(*
+ * Result: Never
+ *
+ * Test of write-read coherence, that is, whether or not a write to a
+ * given variable and a later read from that same variable are ordered.
+ *)
+
+{}
+
+P0(int *x)
+{
+	int r0;
+
+	WRITE_ONCE(*x, 1);
+	r0 = READ_ONCE(*x);
+}
+
+P1(int *x)
+{
+	WRITE_ONCE(*x, 2);
+}
+
+exists (x=1 /\ 0:r0=2)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/CoWW+poonceonce.litmus b/marvell/linux/tools/memory-model/litmus-tests/CoWW+poonceonce.litmus
new file mode 100644
index 0000000..0d9f0a9
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/CoWW+poonceonce.litmus
@@ -0,0 +1,18 @@
+C CoWW+poonceonce
+
+(*
+ * Result: Never
+ *
+ * Test of write-write coherence, that is, whether or not two successive
+ * writes to the same variable are ordered.
+ *)
+
+{}
+
+P0(int *x)
+{
+	WRITE_ONCE(*x, 1);
+	WRITE_ONCE(*x, 2);
+}
+
+exists (x=1)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/IRIW+fencembonceonces+OnceOnce.litmus b/marvell/linux/tools/memory-model/litmus-tests/IRIW+fencembonceonces+OnceOnce.litmus
new file mode 100644
index 0000000..e729d27
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/IRIW+fencembonceonces+OnceOnce.litmus
@@ -0,0 +1,45 @@
+C IRIW+fencembonceonces+OnceOnce
+
+(*
+ * Result: Never
+ *
+ * Test of independent reads from independent writes with smp_mb()
+ * between each pairs of reads.  In other words, is smp_mb() sufficient to
+ * cause two different reading processes to agree on the order of a pair
+ * of writes, where each write is to a different variable by a different
+ * process?  This litmus test exercises LKMM's "propagation" rule.
+ *)
+
+{}
+
+P0(int *x)
+{
+	WRITE_ONCE(*x, 1);
+}
+
+P1(int *x, int *y)
+{
+	int r0;
+	int r1;
+
+	r0 = READ_ONCE(*x);
+	smp_mb();
+	r1 = READ_ONCE(*y);
+}
+
+P2(int *y)
+{
+	WRITE_ONCE(*y, 1);
+}
+
+P3(int *x, int *y)
+{
+	int r0;
+	int r1;
+
+	r0 = READ_ONCE(*y);
+	smp_mb();
+	r1 = READ_ONCE(*x);
+}
+
+exists (1:r0=1 /\ 1:r1=0 /\ 3:r0=1 /\ 3:r1=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/IRIW+poonceonces+OnceOnce.litmus b/marvell/linux/tools/memory-model/litmus-tests/IRIW+poonceonces+OnceOnce.litmus
new file mode 100644
index 0000000..4b54dd6
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/IRIW+poonceonces+OnceOnce.litmus
@@ -0,0 +1,43 @@
+C IRIW+poonceonces+OnceOnce
+
+(*
+ * Result: Sometimes
+ *
+ * Test of independent reads from independent writes with nothing
+ * between each pairs of reads.  In other words, is anything at all
+ * needed to cause two different reading processes to agree on the order
+ * of a pair of writes, where each write is to a different variable by a
+ * different process?
+ *)
+
+{}
+
+P0(int *x)
+{
+	WRITE_ONCE(*x, 1);
+}
+
+P1(int *x, int *y)
+{
+	int r0;
+	int r1;
+
+	r0 = READ_ONCE(*x);
+	r1 = READ_ONCE(*y);
+}
+
+P2(int *y)
+{
+	WRITE_ONCE(*y, 1);
+}
+
+P3(int *x, int *y)
+{
+	int r0;
+	int r1;
+
+	r0 = READ_ONCE(*y);
+	r1 = READ_ONCE(*x);
+}
+
+exists (1:r0=1 /\ 1:r1=0 /\ 3:r0=1 /\ 3:r1=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus b/marvell/linux/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus
new file mode 100644
index 0000000..094d58d
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/ISA2+pooncelock+pooncelock+pombonce.litmus
@@ -0,0 +1,40 @@
+C ISA2+pooncelock+pooncelock+pombonce
+
+(*
+ * Result: Never
+ *
+ * This test shows that write-write ordering provided by locks
+ * (in P0() and P1()) is visible to external process P2().
+ *)
+
+{}
+
+P0(int *x, int *y, spinlock_t *mylock)
+{
+	spin_lock(mylock);
+	WRITE_ONCE(*x, 1);
+	WRITE_ONCE(*y, 1);
+	spin_unlock(mylock);
+}
+
+P1(int *y, int *z, spinlock_t *mylock)
+{
+	int r0;
+
+	spin_lock(mylock);
+	r0 = READ_ONCE(*y);
+	WRITE_ONCE(*z, 1);
+	spin_unlock(mylock);
+}
+
+P2(int *x, int *z)
+{
+	int r1;
+	int r2;
+
+	r2 = READ_ONCE(*z);
+	smp_mb();
+	r1 = READ_ONCE(*x);
+}
+
+exists (1:r0=1 /\ 2:r2=1 /\ 2:r1=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/ISA2+poonceonces.litmus b/marvell/linux/tools/memory-model/litmus-tests/ISA2+poonceonces.litmus
new file mode 100644
index 0000000..b321aa6
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/ISA2+poonceonces.litmus
@@ -0,0 +1,37 @@
+C ISA2+poonceonces
+
+(*
+ * Result: Sometimes
+ *
+ * Given a release-acquire chain ordering the first process's store
+ * against the last process's load, is ordering preserved if all of the
+ * smp_store_release() invocations are replaced by WRITE_ONCE() and all
+ * of the smp_load_acquire() invocations are replaced by READ_ONCE()?
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+	WRITE_ONCE(*x, 1);
+	WRITE_ONCE(*y, 1);
+}
+
+P1(int *y, int *z)
+{
+	int r0;
+
+	r0 = READ_ONCE(*y);
+	WRITE_ONCE(*z, 1);
+}
+
+P2(int *x, int *z)
+{
+	int r0;
+	int r1;
+
+	r0 = READ_ONCE(*z);
+	r1 = READ_ONCE(*x);
+}
+
+exists (1:r0=1 /\ 2:r0=1 /\ 2:r1=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus b/marvell/linux/tools/memory-model/litmus-tests/ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus
new file mode 100644
index 0000000..025b046
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus
@@ -0,0 +1,39 @@
+C ISA2+pooncerelease+poacquirerelease+poacquireonce
+
+(*
+ * Result: Never
+ *
+ * This litmus test demonstrates that a release-acquire chain suffices
+ * to order P0()'s initial write against P2()'s final read.  The reason
+ * that the release-acquire chain suffices is because in all but one
+ * case (P2() to P0()), each process reads from the preceding process's
+ * write.  In memory-model-speak, there is only one non-reads-from
+ * (AKA non-rf) link, so release-acquire is all that is needed.
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+	WRITE_ONCE(*x, 1);
+	smp_store_release(y, 1);
+}
+
+P1(int *y, int *z)
+{
+	int r0;
+
+	r0 = smp_load_acquire(y);
+	smp_store_release(z, 1);
+}
+
+P2(int *x, int *z)
+{
+	int r0;
+	int r1;
+
+	r0 = smp_load_acquire(z);
+	r1 = READ_ONCE(*x);
+}
+
+exists (1:r0=1 /\ 2:r0=1 /\ 2:r1=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/LB+fencembonceonce+ctrlonceonce.litmus b/marvell/linux/tools/memory-model/litmus-tests/LB+fencembonceonce+ctrlonceonce.litmus
new file mode 100644
index 0000000..4727f5a
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/LB+fencembonceonce+ctrlonceonce.litmus
@@ -0,0 +1,34 @@
+C LB+fencembonceonce+ctrlonceonce
+
+(*
+ * Result: Never
+ *
+ * This litmus test demonstrates that lightweight ordering suffices for
+ * the load-buffering pattern, in other words, preventing all processes
+ * reading from the preceding process's write.  In this example, the
+ * combination of a control dependency and a full memory barrier are enough
+ * to do the trick.  (But the full memory barrier could be replaced with
+ * another control dependency and order would still be maintained.)
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+	int r0;
+
+	r0 = READ_ONCE(*x);
+	if (r0)
+		WRITE_ONCE(*y, 1);
+}
+
+P1(int *x, int *y)
+{
+	int r0;
+
+	r0 = READ_ONCE(*y);
+	smp_mb();
+	WRITE_ONCE(*x, 1);
+}
+
+exists (0:r0=1 /\ 1:r0=1)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/LB+poacquireonce+pooncerelease.litmus b/marvell/linux/tools/memory-model/litmus-tests/LB+poacquireonce+pooncerelease.litmus
new file mode 100644
index 0000000..07b9904
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/LB+poacquireonce+pooncerelease.litmus
@@ -0,0 +1,29 @@
+C LB+poacquireonce+pooncerelease
+
+(*
+ * Result: Never
+ *
+ * Does a release-acquire pair suffice for the load-buffering litmus
+ * test, where each process reads from one of two variables then writes
+ * to the other?
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+	int r0;
+
+	r0 = READ_ONCE(*x);
+	smp_store_release(y, 1);
+}
+
+P1(int *x, int *y)
+{
+	int r0;
+
+	r0 = smp_load_acquire(y);
+	WRITE_ONCE(*x, 1);
+}
+
+exists (0:r0=1 /\ 1:r0=1)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/LB+poonceonces.litmus b/marvell/linux/tools/memory-model/litmus-tests/LB+poonceonces.litmus
new file mode 100644
index 0000000..74c49cb
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/LB+poonceonces.litmus
@@ -0,0 +1,28 @@
+C LB+poonceonces
+
+(*
+ * Result: Sometimes
+ *
+ * Can the counter-intuitive outcome for the load-buffering pattern
+ * be prevented even with no explicit ordering?
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+	int r0;
+
+	r0 = READ_ONCE(*x);
+	WRITE_ONCE(*y, 1);
+}
+
+P1(int *x, int *y)
+{
+	int r0;
+
+	r0 = READ_ONCE(*y);
+	WRITE_ONCE(*x, 1);
+}
+
+exists (0:r0=1 /\ 1:r0=1)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus b/marvell/linux/tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus
new file mode 100644
index 0000000..a273da9
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus
@@ -0,0 +1,30 @@
+C MP+fencewmbonceonce+fencermbonceonce
+
+(*
+ * Result: Never
+ *
+ * This litmus test demonstrates that smp_wmb() and smp_rmb() provide
+ * sufficient ordering for the message-passing pattern.  However, it
+ * is usually better to use smp_store_release() and smp_load_acquire().
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+	WRITE_ONCE(*x, 1);
+	smp_wmb();
+	WRITE_ONCE(*y, 1);
+}
+
+P1(int *x, int *y)
+{
+	int r0;
+	int r1;
+
+	r0 = READ_ONCE(*y);
+	smp_rmb();
+	r1 = READ_ONCE(*x);
+}
+
+exists (1:r0=1 /\ 1:r1=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/MP+onceassign+derefonce.litmus b/marvell/linux/tools/memory-model/litmus-tests/MP+onceassign+derefonce.litmus
new file mode 100644
index 0000000..97731b4
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/MP+onceassign+derefonce.litmus
@@ -0,0 +1,34 @@
+C MP+onceassign+derefonce
+
+(*
+ * Result: Never
+ *
+ * This litmus test demonstrates that rcu_assign_pointer() and
+ * rcu_dereference() suffice to ensure that an RCU reader will not see
+ * pre-initialization garbage when it traverses an RCU-protected data
+ * structure containing a newly inserted element.
+ *)
+
+{
+y=z;
+z=0;
+}
+
+P0(int *x, int **y)
+{
+	WRITE_ONCE(*x, 1);
+	rcu_assign_pointer(*y, x);
+}
+
+P1(int *x, int **y)
+{
+	int *r0;
+	int r1;
+
+	rcu_read_lock();
+	r0 = rcu_dereference(*y);
+	r1 = READ_ONCE(*r0);
+	rcu_read_unlock();
+}
+
+exists (1:r0=x /\ 1:r1=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus b/marvell/linux/tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus
new file mode 100644
index 0000000..50f4d62
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus
@@ -0,0 +1,35 @@
+C MP+polockmbonce+poacquiresilsil
+
+(*
+ * Result: Never
+ *
+ * Do spinlocks combined with smp_mb__after_spinlock() provide order
+ * to outside observers using spin_is_locked() to sense the lock-held
+ * state, ordered by acquire?  Note that when the first spin_is_locked()
+ * returns false and the second true, we know that the smp_load_acquire()
+ * executed before the lock was acquired (loosely speaking).
+ *)
+
+{
+}
+
+P0(spinlock_t *lo, int *x)
+{
+	spin_lock(lo);
+	smp_mb__after_spinlock();
+	WRITE_ONCE(*x, 1);
+	spin_unlock(lo);
+}
+
+P1(spinlock_t *lo, int *x)
+{
+	int r1;
+	int r2;
+	int r3;
+
+	r1 = smp_load_acquire(x);
+	r2 = spin_is_locked(lo);
+	r3 = spin_is_locked(lo);
+}
+
+exists (1:r1=1 /\ 1:r2=0 /\ 1:r3=1)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus b/marvell/linux/tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus
new file mode 100644
index 0000000..abf81e7
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus
@@ -0,0 +1,34 @@
+C MP+polockonce+poacquiresilsil
+
+(*
+ * Result: Sometimes
+ *
+ * Do spinlocks provide order to outside observers using spin_is_locked()
+ * to sense the lock-held state, ordered by acquire?  Note that when the
+ * first spin_is_locked() returns false and the second true, we know that
+ * the smp_load_acquire() executed before the lock was acquired (loosely
+ * speaking).
+ *)
+
+{
+}
+
+P0(spinlock_t *lo, int *x)
+{
+	spin_lock(lo);
+	WRITE_ONCE(*x, 1);
+	spin_unlock(lo);
+}
+
+P1(spinlock_t *lo, int *x)
+{
+	int r1;
+	int r2;
+	int r3;
+
+	r1 = smp_load_acquire(x);
+	r2 = spin_is_locked(lo);
+	r3 = spin_is_locked(lo);
+}
+
+exists (1:r1=1 /\ 1:r2=0 /\ 1:r3=1)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/MP+polocks.litmus b/marvell/linux/tools/memory-model/litmus-tests/MP+polocks.litmus
new file mode 100644
index 0000000..712a4fc
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/MP+polocks.litmus
@@ -0,0 +1,35 @@
+C MP+polocks
+
+(*
+ * Result: Never
+ *
+ * This litmus test demonstrates how lock acquisitions and releases can
+ * stand in for smp_load_acquire() and smp_store_release(), respectively.
+ * In other words, when holding a given lock (or indeed after releasing a
+ * given lock), a CPU is not only guaranteed to see the accesses that other
+ * CPUs made while previously holding that lock, it is also guaranteed
+ * to see all prior accesses by those other CPUs.
+ *)
+
+{}
+
+P0(int *x, int *y, spinlock_t *mylock)
+{
+	WRITE_ONCE(*x, 1);
+	spin_lock(mylock);
+	WRITE_ONCE(*y, 1);
+	spin_unlock(mylock);
+}
+
+P1(int *x, int *y, spinlock_t *mylock)
+{
+	int r0;
+	int r1;
+
+	spin_lock(mylock);
+	r0 = READ_ONCE(*y);
+	spin_unlock(mylock);
+	r1 = READ_ONCE(*x);
+}
+
+exists (1:r0=1 /\ 1:r1=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/MP+poonceonces.litmus b/marvell/linux/tools/memory-model/litmus-tests/MP+poonceonces.litmus
new file mode 100644
index 0000000..172f014
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/MP+poonceonces.litmus
@@ -0,0 +1,27 @@
+C MP+poonceonces
+
+(*
+ * Result: Sometimes
+ *
+ * Can the counter-intuitive message-passing outcome be prevented with
+ * no ordering at all?
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+	WRITE_ONCE(*x, 1);
+	WRITE_ONCE(*y, 1);
+}
+
+P1(int *x, int *y)
+{
+	int r0;
+	int r1;
+
+	r0 = READ_ONCE(*y);
+	r1 = READ_ONCE(*x);
+}
+
+exists (1:r0=1 /\ 1:r1=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/MP+pooncerelease+poacquireonce.litmus b/marvell/linux/tools/memory-model/litmus-tests/MP+pooncerelease+poacquireonce.litmus
new file mode 100644
index 0000000..d52c684
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/MP+pooncerelease+poacquireonce.litmus
@@ -0,0 +1,28 @@
+C MP+pooncerelease+poacquireonce
+
+(*
+ * Result: Never
+ *
+ * This litmus test demonstrates that smp_store_release() and
+ * smp_load_acquire() provide sufficient ordering for the message-passing
+ * pattern.
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+	WRITE_ONCE(*x, 1);
+	smp_store_release(y, 1);
+}
+
+P1(int *x, int *y)
+{
+	int r0;
+	int r1;
+
+	r0 = smp_load_acquire(y);
+	r1 = READ_ONCE(*x);
+}
+
+exists (1:r0=1 /\ 1:r1=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/MP+porevlocks.litmus b/marvell/linux/tools/memory-model/litmus-tests/MP+porevlocks.litmus
new file mode 100644
index 0000000..72c9276
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/MP+porevlocks.litmus
@@ -0,0 +1,35 @@
+C MP+porevlocks
+
+(*
+ * Result: Never
+ *
+ * This litmus test demonstrates how lock acquisitions and releases can
+ * stand in for smp_load_acquire() and smp_store_release(), respectively.
+ * In other words, when holding a given lock (or indeed after releasing a
+ * given lock), a CPU is not only guaranteed to see the accesses that other
+ * CPUs made while previously holding that lock, it is also guaranteed to
+ * see all prior accesses by those other CPUs.
+ *)
+
+{}
+
+P0(int *x, int *y, spinlock_t *mylock)
+{
+	int r0;
+	int r1;
+
+	r0 = READ_ONCE(*y);
+	spin_lock(mylock);
+	r1 = READ_ONCE(*x);
+	spin_unlock(mylock);
+}
+
+P1(int *x, int *y, spinlock_t *mylock)
+{
+	spin_lock(mylock);
+	WRITE_ONCE(*x, 1);
+	spin_unlock(mylock);
+	WRITE_ONCE(*y, 1);
+}
+
+exists (0:r0=1 /\ 0:r1=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/R+fencembonceonces.litmus b/marvell/linux/tools/memory-model/litmus-tests/R+fencembonceonces.litmus
new file mode 100644
index 0000000..222a0b8
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/R+fencembonceonces.litmus
@@ -0,0 +1,30 @@
+C R+fencembonceonces
+
+(*
+ * Result: Never
+ *
+ * This is the fully ordered (via smp_mb()) version of one of the classic
+ * counterintuitive litmus tests that illustrates the effects of store
+ * propagation delays.  Note that weakening either of the barriers would
+ * cause the resulting test to be allowed.
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+	WRITE_ONCE(*x, 1);
+	smp_mb();
+	WRITE_ONCE(*y, 1);
+}
+
+P1(int *x, int *y)
+{
+	int r0;
+
+	WRITE_ONCE(*y, 2);
+	smp_mb();
+	r0 = READ_ONCE(*x);
+}
+
+exists (y=2 /\ 1:r0=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/R+poonceonces.litmus b/marvell/linux/tools/memory-model/litmus-tests/R+poonceonces.litmus
new file mode 100644
index 0000000..5386f12
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/R+poonceonces.litmus
@@ -0,0 +1,27 @@
+C R+poonceonces
+
+(*
+ * Result: Sometimes
+ *
+ * This is the unordered (thus lacking smp_mb()) version of one of the
+ * classic counterintuitive litmus tests that illustrates the effects of
+ * store propagation delays.
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+	WRITE_ONCE(*x, 1);
+	WRITE_ONCE(*y, 1);
+}
+
+P1(int *x, int *y)
+{
+	int r0;
+
+	WRITE_ONCE(*y, 2);
+	r0 = READ_ONCE(*x);
+}
+
+exists (y=2 /\ 1:r0=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/README b/marvell/linux/tools/memory-model/litmus-tests/README
new file mode 100644
index 0000000..681f906
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/README
@@ -0,0 +1,253 @@
+============
+LITMUS TESTS
+============
+
+CoRR+poonceonce+Once.litmus
+	Test of read-read coherence, that is, whether or not two
+	successive reads from the same variable are ordered.
+
+CoRW+poonceonce+Once.litmus
+	Test of read-write coherence, that is, whether or not a read
+	from a given variable followed by a write to that same variable
+	are ordered.
+
+CoWR+poonceonce+Once.litmus
+	Test of write-read coherence, that is, whether or not a write
+	to a given variable followed by a read from that same variable
+	are ordered.
+
+CoWW+poonceonce.litmus
+	Test of write-write coherence, that is, whether or not two
+	successive writes to the same variable are ordered.
+
+IRIW+fencembonceonces+OnceOnce.litmus
+	Test of independent reads from independent writes with smp_mb()
+	between each pairs of reads.  In other words, is smp_mb()
+	sufficient to cause two different reading processes to agree on
+	the order of a pair of writes, where each write is to a different
+	variable by a different process?  This litmus test is forbidden
+	by LKMM's propagation rule.
+
+IRIW+poonceonces+OnceOnce.litmus
+	Test of independent reads from independent writes with nothing
+	between each pairs of reads.  In other words, is anything at all
+	needed to cause two different reading processes to agree on the
+	order of a pair of writes, where each write is to a different
+	variable by a different process?
+
+ISA2+pooncelock+pooncelock+pombonce.litmus
+	Tests whether the ordering provided by a lock-protected S
+	litmus test is visible to an external process whose accesses are
+	separated by smp_mb().  This addition of an external process to
+	S is otherwise known as ISA2.
+
+ISA2+poonceonces.litmus
+	As below, but with store-release replaced with WRITE_ONCE()
+	and load-acquire replaced with READ_ONCE().
+
+ISA2+pooncerelease+poacquirerelease+poacquireonce.litmus
+	Can a release-acquire chain order a prior store against
+	a later load?
+
+LB+fencembonceonce+ctrlonceonce.litmus
+	Does a control dependency and an smp_mb() suffice for the
+	load-buffering litmus test, where each process reads from one
+	of two variables then writes to the other?
+
+LB+poacquireonce+pooncerelease.litmus
+	Does a release-acquire pair suffice for the load-buffering
+	litmus test, where each process reads from one of two variables then
+	writes to the other?
+
+LB+poonceonces.litmus
+	As above, but with store-release replaced with WRITE_ONCE()
+	and load-acquire replaced with READ_ONCE().
+
+MP+onceassign+derefonce.litmus
+	As below, but with rcu_assign_pointer() and an rcu_dereference().
+
+MP+polockmbonce+poacquiresilsil.litmus
+	Protect the access with a lock and an smp_mb__after_spinlock()
+	in one process, and use an acquire load followed by a pair of
+	spin_is_locked() calls in the other process.
+
+MP+polockonce+poacquiresilsil.litmus
+	Protect the access with a lock in one process, and use an
+	acquire load followed by a pair of spin_is_locked() calls
+	in the other process.
+
+MP+polocks.litmus
+	As below, but with the second access of the writer process
+	and the first access of reader process protected by a lock.
+
+MP+poonceonces.litmus
+	As below, but without the smp_rmb() and smp_wmb().
+
+MP+pooncerelease+poacquireonce.litmus
+	As below, but with a release-acquire chain.
+
+MP+porevlocks.litmus
+	As below, but with the first access of the writer process
+	and the second access of reader process protected by a lock.
+
+MP+fencewmbonceonce+fencermbonceonce.litmus
+	Does a smp_wmb() (between the stores) and an smp_rmb() (between
+	the loads) suffice for the message-passing litmus test, where one
+	process writes data and then a flag, and the other process reads
+	the flag and then the data.  (This is similar to the ISA2 tests,
+	but with two processes instead of three.)
+
+R+fencembonceonces.litmus
+	This is the fully ordered (via smp_mb()) version of one of
+	the classic counterintuitive litmus tests that illustrates the
+	effects of store propagation delays.
+
+R+poonceonces.litmus
+	As above, but without the smp_mb() invocations.
+
+SB+fencembonceonces.litmus
+	This is the fully ordered (again, via smp_mb() version of store
+	buffering, which forms the core of Dekker's mutual-exclusion
+	algorithm.
+
+SB+poonceonces.litmus
+	As above, but without the smp_mb() invocations.
+
+SB+rfionceonce-poonceonces.litmus
+	This litmus test demonstrates that LKMM is not fully multicopy
+	atomic.  (Neither is it other multicopy atomic.)  This litmus test
+	also demonstrates the "locations" debugging aid, which designates
+	additional registers and locations to be printed out in the dump
+	of final states in the herd7 output.  Without the "locations"
+	statement, only those registers and locations mentioned in the
+	"exists" clause will be printed.
+
+S+poonceonces.litmus
+	As below, but without the smp_wmb() and acquire load.
+
+S+fencewmbonceonce+poacquireonce.litmus
+	Can a smp_wmb(), instead of a release, and an acquire order
+	a prior store against a subsequent store?
+
+WRC+poonceonces+Once.litmus
+WRC+pooncerelease+fencermbonceonce+Once.litmus
+	These two are members of an extension of the MP litmus-test
+	class in which the first write is moved to a separate process.
+	The second is forbidden because smp_store_release() is
+	A-cumulative in LKMM.
+
+Z6.0+pooncelock+pooncelock+pombonce.litmus
+	Is the ordering provided by a spin_unlock() and a subsequent
+	spin_lock() sufficient to make ordering apparent to accesses
+	by a process not holding the lock?
+
+Z6.0+pooncelock+poonceLock+pombonce.litmus
+	As above, but with smp_mb__after_spinlock() immediately
+	following the spin_lock().
+
+Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus
+	Is the ordering provided by a release-acquire chain sufficient
+	to make ordering apparent to accesses by a process that does
+	not participate in that release-acquire chain?
+
+A great many more litmus tests are available here:
+
+	https://github.com/paulmckrcu/litmus
+
+==================
+LITMUS TEST NAMING
+==================
+
+Litmus tests are usually named based on their contents, which means that
+looking at the name tells you what the litmus test does.  The naming
+scheme covers litmus tests having a single cycle that passes through
+each process exactly once, so litmus tests not fitting this description
+are named on an ad-hoc basis.
+
+The structure of a litmus-test name is the litmus-test class, a plus
+sign ("+"), and one string for each process, separated by plus signs.
+The end of the name is ".litmus".
+
+The litmus-test classes may be found in the infamous test6.pdf:
+https://www.cl.cam.ac.uk/~pes20/ppc-supplemental/test6.pdf
+Each class defines the pattern of accesses and of the variables accessed.
+For example, if the one process writes to a pair of variables, and
+the other process reads from these same variables, the corresponding
+litmus-test class is "MP" (message passing), which may be found on the
+left-hand end of the second row of tests on page one of test6.pdf.
+
+The strings used to identify the actions carried out by each process are
+complex due to a desire to have short(er) names.  Thus, there is a tool to
+generate these strings from a given litmus test's actions.  For example,
+consider the processes from SB+rfionceonce-poonceonces.litmus:
+
+	P0(int *x, int *y)
+	{
+		int r1;
+		int r2;
+
+		WRITE_ONCE(*x, 1);
+		r1 = READ_ONCE(*x);
+		r2 = READ_ONCE(*y);
+	}
+
+	P1(int *x, int *y)
+	{
+		int r3;
+		int r4;
+
+		WRITE_ONCE(*y, 1);
+		r3 = READ_ONCE(*y);
+		r4 = READ_ONCE(*x);
+	}
+
+The next step is to construct a space-separated list of descriptors,
+interleaving descriptions of the relation between a pair of consecutive
+accesses with descriptions of the second access in the pair.
+
+P0()'s WRITE_ONCE() is read by its first READ_ONCE(), which is a
+reads-from link (rf) and internal to the P0() process.  This is
+"rfi", which is an abbreviation for "reads-from internal".  Because
+some of the tools string these abbreviations together with space
+characters separating processes, the first character is capitalized,
+resulting in "Rfi".
+
+P0()'s second access is a READ_ONCE(), as opposed to (for example)
+smp_load_acquire(), so next is "Once".  Thus far, we have "Rfi Once".
+
+P0()'s third access is also a READ_ONCE(), but to y rather than x.
+This is related to P0()'s second access by program order ("po"),
+to a different variable ("d"), and both accesses are reads ("RR").
+The resulting descriptor is "PodRR".  Because P0()'s third access is
+READ_ONCE(), we add another "Once" descriptor.
+
+A from-read ("fre") relation links P0()'s third to P1()'s first
+access, and the resulting descriptor is "Fre".  P1()'s first access is
+WRITE_ONCE(), which as before gives the descriptor "Once".  The string
+thus far is thus "Rfi Once PodRR Once Fre Once".
+
+The remainder of P1() is similar to P0(), which means we add
+"Rfi Once PodRR Once".  Another fre links P1()'s last access to
+P0()'s first access, which is WRITE_ONCE(), so we add "Fre Once".
+The full string is thus:
+
+	Rfi Once PodRR Once Fre Once Rfi Once PodRR Once Fre Once
+
+This string can be given to the "norm7" and "classify7" tools to
+produce the name:
+
+	$ norm7 -bell linux-kernel.bell \
+		Rfi Once PodRR Once Fre Once Rfi Once PodRR Once Fre Once | \
+	  sed -e 's/:.*//g'
+	SB+rfionceonce-poonceonces
+
+Adding the ".litmus" suffix: SB+rfionceonce-poonceonces.litmus
+
+The descriptors that describe connections between consecutive accesses
+within the cycle through a given litmus test can be provided by the herd7
+tool (Rfi, Po, Fre, and so on) or by the linux-kernel.bell file (Once,
+Release, Acquire, and so on).
+
+To see the full list of descriptors, execute the following command:
+
+	$ diyone7 -bell linux-kernel.bell -show edges
diff --git a/marvell/linux/tools/memory-model/litmus-tests/S+fencewmbonceonce+poacquireonce.litmus b/marvell/linux/tools/memory-model/litmus-tests/S+fencewmbonceonce+poacquireonce.litmus
new file mode 100644
index 0000000..1847982
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/S+fencewmbonceonce+poacquireonce.litmus
@@ -0,0 +1,27 @@
+C S+fencewmbonceonce+poacquireonce
+
+(*
+ * Result: Never
+ *
+ * Can a smp_wmb(), instead of a release, and an acquire order a prior
+ * store against a subsequent store?
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+	WRITE_ONCE(*x, 2);
+	smp_wmb();
+	WRITE_ONCE(*y, 1);
+}
+
+P1(int *x, int *y)
+{
+	int r0;
+
+	r0 = smp_load_acquire(y);
+	WRITE_ONCE(*x, 1);
+}
+
+exists (x=2 /\ 1:r0=1)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/S+poonceonces.litmus b/marvell/linux/tools/memory-model/litmus-tests/S+poonceonces.litmus
new file mode 100644
index 0000000..8c9c2f8
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/S+poonceonces.litmus
@@ -0,0 +1,28 @@
+C S+poonceonces
+
+(*
+ * Result: Sometimes
+ *
+ * Starting with a two-process release-acquire chain ordering P0()'s
+ * first store against P1()'s final load, if the smp_store_release()
+ * is replaced by WRITE_ONCE() and the smp_load_acquire() replaced by
+ * READ_ONCE(), is ordering preserved?
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+	WRITE_ONCE(*x, 2);
+	WRITE_ONCE(*y, 1);
+}
+
+P1(int *x, int *y)
+{
+	int r0;
+
+	r0 = READ_ONCE(*y);
+	WRITE_ONCE(*x, 1);
+}
+
+exists (x=2 /\ 1:r0=1)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/SB+fencembonceonces.litmus b/marvell/linux/tools/memory-model/litmus-tests/SB+fencembonceonces.litmus
new file mode 100644
index 0000000..ed5fff1
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/SB+fencembonceonces.litmus
@@ -0,0 +1,32 @@
+C SB+fencembonceonces
+
+(*
+ * Result: Never
+ *
+ * This litmus test demonstrates that full memory barriers suffice to
+ * order the store-buffering pattern, where each process writes to the
+ * variable that the preceding process reads.  (Locking and RCU can also
+ * suffice, but not much else.)
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+	int r0;
+
+	WRITE_ONCE(*x, 1);
+	smp_mb();
+	r0 = READ_ONCE(*y);
+}
+
+P1(int *x, int *y)
+{
+	int r0;
+
+	WRITE_ONCE(*y, 1);
+	smp_mb();
+	r0 = READ_ONCE(*x);
+}
+
+exists (0:r0=0 /\ 1:r0=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/SB+poonceonces.litmus b/marvell/linux/tools/memory-model/litmus-tests/SB+poonceonces.litmus
new file mode 100644
index 0000000..10d5507
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/SB+poonceonces.litmus
@@ -0,0 +1,29 @@
+C SB+poonceonces
+
+(*
+ * Result: Sometimes
+ *
+ * This litmus test demonstrates that at least some ordering is required
+ * to order the store-buffering pattern, where each process writes to the
+ * variable that the preceding process reads.
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+	int r0;
+
+	WRITE_ONCE(*x, 1);
+	r0 = READ_ONCE(*y);
+}
+
+P1(int *x, int *y)
+{
+	int r0;
+
+	WRITE_ONCE(*y, 1);
+	r0 = READ_ONCE(*x);
+}
+
+exists (0:r0=0 /\ 1:r0=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus b/marvell/linux/tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus
new file mode 100644
index 0000000..04a1660
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/SB+rfionceonce-poonceonces.litmus
@@ -0,0 +1,32 @@
+C SB+rfionceonce-poonceonces
+
+(*
+ * Result: Sometimes
+ *
+ * This litmus test demonstrates that LKMM is not fully multicopy atomic.
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+	int r1;
+	int r2;
+
+	WRITE_ONCE(*x, 1);
+	r1 = READ_ONCE(*x);
+	r2 = READ_ONCE(*y);
+}
+
+P1(int *x, int *y)
+{
+	int r3;
+	int r4;
+
+	WRITE_ONCE(*y, 1);
+	r3 = READ_ONCE(*y);
+	r4 = READ_ONCE(*x);
+}
+
+locations [0:r1; 1:r3; x; y] (* Debug aid: Print things not in "exists". *)
+exists (0:r2=0 /\ 1:r4=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/WRC+poonceonces+Once.litmus b/marvell/linux/tools/memory-model/litmus-tests/WRC+poonceonces+Once.litmus
new file mode 100644
index 0000000..6a2bc12
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/WRC+poonceonces+Once.litmus
@@ -0,0 +1,35 @@
+C WRC+poonceonces+Once
+
+(*
+ * Result: Sometimes
+ *
+ * This litmus test is an extension of the message-passing pattern,
+ * where the first write is moved to a separate process.  Note that this
+ * test has no ordering at all.
+ *)
+
+{}
+
+P0(int *x)
+{
+	WRITE_ONCE(*x, 1);
+}
+
+P1(int *x, int *y)
+{
+	int r0;
+
+	r0 = READ_ONCE(*x);
+	WRITE_ONCE(*y, 1);
+}
+
+P2(int *x, int *y)
+{
+	int r0;
+	int r1;
+
+	r0 = READ_ONCE(*y);
+	r1 = READ_ONCE(*x);
+}
+
+exists (1:r0=1 /\ 2:r0=1 /\ 2:r1=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/WRC+pooncerelease+fencermbonceonce+Once.litmus b/marvell/linux/tools/memory-model/litmus-tests/WRC+pooncerelease+fencermbonceonce+Once.litmus
new file mode 100644
index 0000000..e994725
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/WRC+pooncerelease+fencermbonceonce+Once.litmus
@@ -0,0 +1,38 @@
+C WRC+pooncerelease+fencermbonceonce+Once
+
+(*
+ * Result: Never
+ *
+ * This litmus test is an extension of the message-passing pattern, where
+ * the first write is moved to a separate process.  Because it features
+ * a release and a read memory barrier, it should be forbidden.  More
+ * specifically, this litmus test is forbidden because smp_store_release()
+ * is A-cumulative in LKMM.
+ *)
+
+{}
+
+P0(int *x)
+{
+	WRITE_ONCE(*x, 1);
+}
+
+P1(int *x, int *y)
+{
+	int r0;
+
+	r0 = READ_ONCE(*x);
+	smp_store_release(y, 1);
+}
+
+P2(int *x, int *y)
+{
+	int r0;
+	int r1;
+
+	r0 = READ_ONCE(*y);
+	smp_rmb();
+	r1 = READ_ONCE(*x);
+}
+
+exists (1:r0=1 /\ 2:r0=1 /\ 2:r1=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/Z6.0+pooncelock+poonceLock+pombonce.litmus b/marvell/linux/tools/memory-model/litmus-tests/Z6.0+pooncelock+poonceLock+pombonce.litmus
new file mode 100644
index 0000000..415248f
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/Z6.0+pooncelock+poonceLock+pombonce.litmus
@@ -0,0 +1,42 @@
+C Z6.0+pooncelock+poonceLock+pombonce
+
+(*
+ * Result: Never
+ *
+ * This litmus test demonstrates how smp_mb__after_spinlock() may be
+ * used to ensure that accesses in different critical sections for a
+ * given lock running on different CPUs are nevertheless seen in order
+ * by CPUs not holding that lock.
+ *)
+
+{}
+
+P0(int *x, int *y, spinlock_t *mylock)
+{
+	spin_lock(mylock);
+	WRITE_ONCE(*x, 1);
+	WRITE_ONCE(*y, 1);
+	spin_unlock(mylock);
+}
+
+P1(int *y, int *z, spinlock_t *mylock)
+{
+	int r0;
+
+	spin_lock(mylock);
+	smp_mb__after_spinlock();
+	r0 = READ_ONCE(*y);
+	WRITE_ONCE(*z, 1);
+	spin_unlock(mylock);
+}
+
+P2(int *x, int *z)
+{
+	int r1;
+
+	WRITE_ONCE(*z, 2);
+	smp_mb();
+	r1 = READ_ONCE(*x);
+}
+
+exists (1:r0=1 /\ z=2 /\ 2:r1=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/Z6.0+pooncelock+pooncelock+pombonce.litmus b/marvell/linux/tools/memory-model/litmus-tests/Z6.0+pooncelock+pooncelock+pombonce.litmus
new file mode 100644
index 0000000..10a2aa0
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/Z6.0+pooncelock+pooncelock+pombonce.litmus
@@ -0,0 +1,40 @@
+C Z6.0+pooncelock+pooncelock+pombonce
+
+(*
+ * Result: Sometimes
+ *
+ * This example demonstrates that a pair of accesses made by different
+ * processes each while holding a given lock will not necessarily be
+ * seen as ordered by a third process not holding that lock.
+ *)
+
+{}
+
+P0(int *x, int *y, spinlock_t *mylock)
+{
+	spin_lock(mylock);
+	WRITE_ONCE(*x, 1);
+	WRITE_ONCE(*y, 1);
+	spin_unlock(mylock);
+}
+
+P1(int *y, int *z, spinlock_t *mylock)
+{
+	int r0;
+
+	spin_lock(mylock);
+	r0 = READ_ONCE(*y);
+	WRITE_ONCE(*z, 1);
+	spin_unlock(mylock);
+}
+
+P2(int *x, int *z)
+{
+	int r1;
+
+	WRITE_ONCE(*z, 2);
+	smp_mb();
+	r1 = READ_ONCE(*x);
+}
+
+exists (1:r0=1 /\ z=2 /\ 2:r1=0)
diff --git a/marvell/linux/tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus b/marvell/linux/tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus
new file mode 100644
index 0000000..88e70b8
--- /dev/null
+++ b/marvell/linux/tools/memory-model/litmus-tests/Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus
@@ -0,0 +1,42 @@
+C Z6.0+pooncerelease+poacquirerelease+fencembonceonce
+
+(*
+ * Result: Sometimes
+ *
+ * This litmus test shows that a release-acquire chain, while sufficient
+ * when there is but one non-reads-from (AKA non-rf) link, does not suffice
+ * if there is more than one.  Of the three processes, only P1() reads from
+ * P0's write, which means that there are two non-rf links: P1() to P2()
+ * is a write-to-write link (AKA a "coherence" or just "co" link) and P2()
+ * to P0() is a read-to-write link (AKA a "from-reads" or just "fr" link).
+ * When there are two or more non-rf links, you typically will need one
+ * full barrier for each non-rf link.  (Exceptions include some cases
+ * involving locking.)
+ *)
+
+{}
+
+P0(int *x, int *y)
+{
+	WRITE_ONCE(*x, 1);
+	smp_store_release(y, 1);
+}
+
+P1(int *y, int *z)
+{
+	int r0;
+
+	r0 = smp_load_acquire(y);
+	smp_store_release(z, 1);
+}
+
+P2(int *x, int *z)
+{
+	int r1;
+
+	WRITE_ONCE(*z, 2);
+	smp_mb();
+	r1 = READ_ONCE(*x);
+}
+
+exists (1:r0=1 /\ z=2 /\ 2:r1=0)
diff --git a/marvell/linux/tools/memory-model/lock.cat b/marvell/linux/tools/memory-model/lock.cat
new file mode 100644
index 0000000..9f3b5b3
--- /dev/null
+++ b/marvell/linux/tools/memory-model/lock.cat
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0+
+(*
+ * Copyright (C) 2016 Luc Maranget <luc.maranget@inria.fr> for Inria
+ * Copyright (C) 2017 Alan Stern <stern@rowland.harvard.edu>
+ *)
+
+(*
+ * Generate coherence orders and handle lock operations
+ *)
+
+include "cross.cat"
+
+(*
+ * The lock-related events generated by herd7 are as follows:
+ *
+ * LKR		Lock-Read: the read part of a spin_lock() or successful
+ *			spin_trylock() read-modify-write event pair
+ * LKW		Lock-Write: the write part of a spin_lock() or successful
+ *			spin_trylock() RMW event pair
+ * UL		Unlock: a spin_unlock() event
+ * LF		Lock-Fail: a failed spin_trylock() event
+ * RL		Read-Locked: a spin_is_locked() event which returns True
+ * RU		Read-Unlocked: a spin_is_locked() event which returns False
+ *
+ * LKR and LKW events always come paired, like all RMW event sequences.
+ *
+ * LKR, LF, RL, and RU are read events; LKR has Acquire ordering.
+ * LKW and UL are write events; UL has Release ordering.
+ * LKW, LF, RL, and RU have no ordering properties.
+ *)
+
+(* Backward compatibility *)
+let RL = try RL with emptyset
+let RU = try RU with emptyset
+
+(* Treat RL as a kind of LF: a read with no ordering properties *)
+let LF = LF | RL
+
+(* There should be no ordinary R or W accesses to spinlocks *)
+let ALL-LOCKS = LKR | LKW | UL | LF | RU
+flag ~empty [M \ IW] ; loc ; [ALL-LOCKS] as mixed-lock-accesses
+
+(* Link Lock-Reads to their RMW-partner Lock-Writes *)
+let lk-rmw = ([LKR] ; po-loc ; [LKW]) \ (po ; po)
+let rmw = rmw | lk-rmw
+
+(* The litmus test is invalid if an LKR/LKW event is not part of an RMW pair *)
+flag ~empty LKW \ range(lk-rmw) as unpaired-LKW
+flag ~empty LKR \ domain(lk-rmw) as unpaired-LKR
+
+(*
+ * An LKR must always see an unlocked value; spin_lock() calls nested
+ * inside a critical section (for the same lock) always deadlock.
+ *)
+empty ([LKW] ; po-loc ; [LKR]) \ (po-loc ; [UL] ; po-loc) as lock-nest
+
+(* The final value of a spinlock should not be tested *)
+flag ~empty [FW] ; loc ; [ALL-LOCKS] as lock-final
+
+(*
+ * Put lock operations in their appropriate classes, but leave UL out of W
+ * until after the co relation has been generated.
+ *)
+let R = R | LKR | LF | RU
+let W = W | LKW
+
+let Release = Release | UL
+let Acquire = Acquire | LKR
+
+(* Match LKW events to their corresponding UL events *)
+let critical = ([LKW] ; po-loc ; [UL]) \ (po-loc ; [LKW | UL] ; po-loc)
+
+flag ~empty UL \ range(critical) as unmatched-unlock
+
+(* Allow up to one unmatched LKW per location; more must deadlock *)
+let UNMATCHED-LKW = LKW \ domain(critical)
+empty ([UNMATCHED-LKW] ; loc ; [UNMATCHED-LKW]) \ id as unmatched-locks
+
+(* rfi for LF events: link each LKW to the LF events in its critical section *)
+let rfi-lf = ([LKW] ; po-loc ; [LF]) \ ([LKW] ; po-loc ; [UL] ; po-loc)
+
+(* rfe for LF events *)
+let all-possible-rfe-lf =
+	(*
+	 * Given an LF event r, compute the possible rfe edges for that event
+	 * (all those starting from LKW events in other threads),
+	 * and then convert that relation to a set of single-edge relations.
+	 *)
+	let possible-rfe-lf r =
+		let pair-to-relation p = p ++ 0
+		in map pair-to-relation ((LKW * {r}) & loc & ext)
+	(* Do this for each LF event r that isn't in rfi-lf *)
+	in map possible-rfe-lf (LF \ range(rfi-lf))
+
+(* Generate all rf relations for LF events *)
+with rfe-lf from cross(all-possible-rfe-lf)
+let rf-lf = rfe-lf | rfi-lf
+
+(*
+ * RU, i.e., spin_is_locked() returning False, is slightly different.
+ * We rely on the memory model to rule out cases where spin_is_locked()
+ * within one of the lock's critical sections returns False.
+ *)
+
+(*
+ * rf for RU events: an RU may read from an external UL or the initial write,
+ * or from the last po-previous UL
+ *)
+let all-possible-rf-ru =
+	let possible-rf-ru r =
+		let pair-to-relation p = p ++ 0
+		in map pair-to-relation ((((UL | IW) * {r}) & loc & ext) |
+			(((UL * {r}) & po-loc) \ ([UL] ; po-loc ; [LKW] ; po-loc)))
+	in map possible-rf-ru RU
+
+(* Generate all rf relations for RU events *)
+with rf-ru from cross(all-possible-rf-ru)
+
+(* Final rf relation *)
+let rf = rf | rf-lf | rf-ru
+
+(* Generate all co relations, including LKW events but not UL *)
+let co0 = co0 | ([IW] ; loc ; [LKW]) |
+	(([LKW] ; loc ; [UNMATCHED-LKW]) \ [UNMATCHED-LKW])
+include "cos-opt.cat"
+let W = W | UL
+let M = R | W
+
+(* Merge UL events into co *)
+let co = (co | critical | (critical^-1 ; co))+
+let coe = co & ext
+let coi = co & int
+
+(* Merge LKR events into rf *)
+let rf = rf | ([IW | UL] ; singlestep(co) ; lk-rmw^-1)
+let rfe = rf & ext
+let rfi = rf & int
+
+let fr = rf^-1 ; co
+let fre = fr & ext
+let fri = fr & int
+
+show co,rf,fr
diff --git a/marvell/linux/tools/memory-model/scripts/README b/marvell/linux/tools/memory-model/scripts/README
new file mode 100644
index 0000000..095c7eb
--- /dev/null
+++ b/marvell/linux/tools/memory-model/scripts/README
@@ -0,0 +1,70 @@
+			============
+			LKMM SCRIPTS
+			============
+
+
+These scripts are run from the tools/memory-model directory.
+
+checkalllitmus.sh
+
+	Run all litmus tests in the litmus-tests directory, checking
+	the results against the expected results recorded in the
+	"Result:" comment lines.
+
+checkghlitmus.sh
+
+	Run all litmus tests in the https://github.com/paulmckrcu/litmus
+	archive that are C-language and that have "Result:" comment lines
+	documenting expected results, comparing the actual results to
+	those expected.
+
+checklitmushist.sh
+
+	Run all litmus tests having .litmus.out files from previous
+	initlitmushist.sh or newlitmushist.sh runs, comparing the
+	herd7 output to that of the original runs.
+
+checklitmus.sh
+
+	Check a single litmus test against its "Result:" expected result.
+
+cmplitmushist.sh
+
+	Compare output from two different runs of the same litmus tests,
+	with the absolute pathnames of the tests to run provided one
+	name per line on standard input.  Not normally run manually,
+	provided instead for use by other scripts.
+
+initlitmushist.sh
+
+	Run all litmus tests having no more than the specified number
+	of processes given a specified timeout, recording the results
+	in .litmus.out files.
+
+judgelitmus.sh
+
+	Given a .litmus file and its .litmus.out herd7 output, check the
+	.litmus.out file against the .litmus file's "Result:" comment to
+	judge whether the test ran correctly.  Not normally run manually,
+	provided instead for use by other scripts.
+
+newlitmushist.sh
+
+	For all new or updated litmus tests having no more than the
+	specified number of processes given a specified timeout, run
+	and record the results in .litmus.out files.
+
+parseargs.sh
+
+	Parse command-line arguments.  Not normally run manually,
+	provided instead for use by other scripts.
+
+runlitmushist.sh
+
+	Run the litmus tests whose absolute pathnames are provided one
+	name per line on standard input.  Not normally run manually,
+	provided instead for use by other scripts.
+
+README
+
+	This file
diff --git a/marvell/linux/tools/memory-model/scripts/checkalllitmus.sh b/marvell/linux/tools/memory-model/scripts/checkalllitmus.sh
new file mode 100755
index 0000000..3c0c7fb
--- /dev/null
+++ b/marvell/linux/tools/memory-model/scripts/checkalllitmus.sh
@@ -0,0 +1,66 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Run herd7 tests on all .litmus files in the litmus-tests directory
+# and check each file's result against a "Result:" comment within that
+# litmus test.  If the verification result does not match that specified
+# in the litmus test, this script prints an error message prefixed with
+# "^^^".  It also outputs verification results to a file whose name is
+# that of the specified litmus test, but with ".out" appended.
+#
+# Usage:
+#	checkalllitmus.sh
+#
+# Run this in the directory containing the memory model.
+#
+# This script makes no attempt to run the litmus tests concurrently.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+. scripts/parseargs.sh
+
+litmusdir=litmus-tests
+if test -d "$litmusdir" -a -r "$litmusdir" -a -x "$litmusdir"
+then
+	:
+else
+	echo ' --- ' error: $litmusdir is not an accessible directory
+	exit 255
+fi
+
+# Create any new directories that have appeared in the github litmus
+# repo since the last run.
+if test "$LKMM_DESTDIR" != "."
+then
+	find $litmusdir -type d -print |
+	( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
+fi
+
+# Find the checklitmus script.  If it is not where we expect it, then
+# assume that the caller has the PATH environment variable set
+# appropriately.
+if test -x scripts/checklitmus.sh
+then
+	clscript=scripts/checklitmus.sh
+else
+	clscript=checklitmus.sh
+fi
+
+# Run the script on all the litmus tests in the specified directory
+ret=0
+for i in $litmusdir/*.litmus
+do
+	if ! $clscript $i
+	then
+		ret=1
+	fi
+done
+if test "$ret" -ne 0
+then
+	echo " ^^^ VERIFICATION MISMATCHES" 1>&2
+else
+	echo All litmus tests verified as was expected. 1>&2
+fi
+exit $ret
diff --git a/marvell/linux/tools/memory-model/scripts/checkghlitmus.sh b/marvell/linux/tools/memory-model/scripts/checkghlitmus.sh
new file mode 100755
index 0000000..6589fbb
--- /dev/null
+++ b/marvell/linux/tools/memory-model/scripts/checkghlitmus.sh
@@ -0,0 +1,65 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Runs the C-language litmus tests having a maximum number of processes
+# to run, defaults to 6.
+#
+# sh checkghlitmus.sh
+#
+# Run from the Linux kernel tools/memory-model directory.  See the
+# parseargs.sh scripts for arguments.
+
+. scripts/parseargs.sh
+
+T=/tmp/checkghlitmus.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+# Clone the repository if it is not already present.
+if test -d litmus
+then
+	:
+else
+	git clone https://github.com/paulmckrcu/litmus
+	( cd litmus; git checkout origin/master )
+fi
+
+# Create any new directories that have appeared in the github litmus
+# repo since the last run.
+if test "$LKMM_DESTDIR" != "."
+then
+	find litmus -type d -print |
+	( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
+fi
+
+# Create a list of the C-language litmus tests previously run.
+( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) |
+	sed -e 's/\.out$//' |
+	xargs -r egrep -l '^ \* Result: (Never|Sometimes|Always|DEADLOCK)' |
+	xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already
+
+# Create a list of C-language litmus tests with "Result:" commands and
+# no more than the specified number of processes.
+find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C
+xargs < $T/list-C -r egrep -l '^ \* Result: (Never|Sometimes|Always|DEADLOCK)' > $T/list-C-result
+xargs < $T/list-C-result -r grep -L "^P${LKMM_PROCS}" > $T/list-C-result-short
+
+# Form list of tests without corresponding .litmus.out files
+sort $T/list-C-already $T/list-C-result-short | uniq -u > $T/list-C-needed
+
+# Run any needed tests.
+if scripts/runlitmushist.sh < $T/list-C-needed > $T/run.stdout 2> $T/run.stderr
+then
+	errs=
+else
+	errs=1
+fi
+
+sed < $T/list-C-result-short -e 's,^,scripts/judgelitmus.sh ,' |
+	sh > $T/judge.stdout 2> $T/judge.stderr
+
+if test -n "$errs"
+then
+	cat $T/run.stderr 1>&2
+fi
+grep '!!!' $T/judge.stdout
diff --git a/marvell/linux/tools/memory-model/scripts/checklitmus.sh b/marvell/linux/tools/memory-model/scripts/checklitmus.sh
new file mode 100755
index 0000000..11461ed
--- /dev/null
+++ b/marvell/linux/tools/memory-model/scripts/checklitmus.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Run a herd7 test and invokes judgelitmus.sh to check the result against
+# a "Result:" comment within the litmus test.  It also outputs verification
+# results to a file whose name is that of the specified litmus test, but
+# with ".out" appended.
+#
+# Usage:
+#	checklitmus.sh file.litmus
+#
+# Run this in the directory containing the memory model, specifying the
+# pathname of the litmus test to check.  The caller is expected to have
+# properly set up the LKMM environment variables.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+litmus=$1
+herdoptions=${LKMM_HERD_OPTIONS--conf linux-kernel.cfg}
+
+if test -f "$litmus" -a -r "$litmus"
+then
+	:
+else
+	echo ' --- ' error: \"$litmus\" is not a readable file
+	exit 255
+fi
+
+echo Herd options: $herdoptions > $LKMM_DESTDIR/$litmus.out
+/usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $litmus >> $LKMM_DESTDIR/$litmus.out 2>&1
+
+scripts/judgelitmus.sh $litmus
diff --git a/marvell/linux/tools/memory-model/scripts/checklitmushist.sh b/marvell/linux/tools/memory-model/scripts/checklitmushist.sh
new file mode 100755
index 0000000..1d210ff
--- /dev/null
+++ b/marvell/linux/tools/memory-model/scripts/checklitmushist.sh
@@ -0,0 +1,60 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Reruns the C-language litmus tests previously run that match the
+# specified criteria, and compares the result to that of the previous
+# runs from initlitmushist.sh and/or newlitmushist.sh.
+#
+# sh checklitmushist.sh
+#
+# Run from the Linux kernel tools/memory-model directory.
+# See scripts/parseargs.sh for list of arguments.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+. scripts/parseargs.sh
+
+T=/tmp/checklitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+if test -d litmus
+then
+	:
+else
+	echo Run scripts/initlitmushist.sh first, need litmus repo.
+	exit 1
+fi
+
+# Create the results directory and populate it with subdirectories.
+# The initial output is created here to avoid clobbering the output
+# generated earlier.
+mkdir $T/results
+find litmus -type d -print | ( cd $T/results; sed -e 's/^/mkdir -p /' | sh )
+
+# Create the list of litmus tests already run, then remove those that
+# are excluded by this run's --procs argument.
+( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) |
+	sed -e 's/\.out$//' |
+	xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already
+xargs < $T/list-C-already -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short
+
+# Redirect output, run tests, then restore destination directory.
+destdir="$LKMM_DESTDIR"
+LKMM_DESTDIR=$T/results; export LKMM_DESTDIR
+scripts/runlitmushist.sh < $T/list-C-short > $T/runlitmushist.sh.out 2>&1
+LKMM_DESTDIR="$destdir"; export LKMM_DESTDIR
+
+# Move the newly generated .litmus.out files to .litmus.out.new files
+# in the destination directory.
+cdir=`pwd`
+ddir=`awk -v c="$cdir" -v d="$LKMM_DESTDIR" \
+	'END { if (d ~ /^\//) print d; else print c "/" d; }' < /dev/null`
+( cd $T/results; find litmus -type f -name '*.litmus.out' -print |
+  sed -e 's,^.*$,cp & '"$ddir"'/&.new,' | sh )
+
+sed < $T/list-C-short -e 's,^,'"$LKMM_DESTDIR/"',' |
+	sh scripts/cmplitmushist.sh
+exit $?
diff --git a/marvell/linux/tools/memory-model/scripts/cmplitmushist.sh b/marvell/linux/tools/memory-model/scripts/cmplitmushist.sh
new file mode 100755
index 0000000..0f498ae
--- /dev/null
+++ b/marvell/linux/tools/memory-model/scripts/cmplitmushist.sh
@@ -0,0 +1,87 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Compares .out and .out.new files for each name on standard input,
+# one full pathname per line.  Outputs comparison results followed by
+# a summary.
+#
+# sh cmplitmushist.sh
+
+T=/tmp/cmplitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+# comparetest oldpath newpath
+perfect=0
+obsline=0
+noobsline=0
+obsresult=0
+badcompare=0
+comparetest () {
+	grep -v 'maxresident)k\|minor)pagefaults\|^Time' $1 > $T/oldout
+	grep -v 'maxresident)k\|minor)pagefaults\|^Time' $2 > $T/newout
+	if cmp -s $T/oldout $T/newout && grep -q '^Observation' $1
+	then
+		echo Exact output match: $2
+		perfect=`expr "$perfect" + 1`
+		return 0
+	fi
+
+	grep '^Observation' $1 > $T/oldout
+	grep '^Observation' $2 > $T/newout
+	if test -s $T/oldout -o -s $T/newout
+	then
+		if cmp -s $T/oldout $T/newout
+		then
+			echo Matching Observation result and counts: $2
+			obsline=`expr "$obsline" + 1`
+			return 0
+		fi
+	else
+		echo Missing Observation line "(e.g., herd7 timeout)": $2
+		noobsline=`expr "$noobsline" + 1`
+		return 0
+	fi
+
+	grep '^Observation' $1 | awk '{ print $3 }' > $T/oldout
+	grep '^Observation' $2 | awk '{ print $3 }' > $T/newout
+	if cmp -s $T/oldout $T/newout
+	then
+		echo Matching Observation Always/Sometimes/Never result: $2
+		obsresult=`expr "$obsresult" + 1`
+		return 0
+	fi
+	echo ' !!!' Result changed: $2
+	badcompare=`expr "$badcompare" + 1`
+	return 1
+}
+
+sed -e 's/^.*$/comparetest &.out &.out.new/' > $T/cmpscript
+. $T/cmpscript > $T/cmpscript.out
+cat $T/cmpscript.out
+
+echo ' ---' Summary: 1>&2
+grep '!!!' $T/cmpscript.out 1>&2
+if test "$perfect" -ne 0
+then
+	echo Exact output matches: $perfect 1>&2
+fi
+if test "$obsline" -ne 0
+then
+	echo Matching Observation result and counts: $obsline 1>&2
+fi
+if test "$noobsline" -ne 0
+then
+	echo Missing Observation line "(e.g., herd7 timeout)": $noobsline 1>&2
+fi
+if test "$obsresult" -ne 0
+then
+	echo Matching Observation Always/Sometimes/Never result: $obsresult 1>&2
+fi
+if test "$badcompare" -ne 0
+then
+	echo "!!!" Result changed: $badcompare 1>&2
+	exit 1
+fi
+
+exit 0
diff --git a/marvell/linux/tools/memory-model/scripts/initlitmushist.sh b/marvell/linux/tools/memory-model/scripts/initlitmushist.sh
new file mode 100755
index 0000000..956b695
--- /dev/null
+++ b/marvell/linux/tools/memory-model/scripts/initlitmushist.sh
@@ -0,0 +1,68 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Runs the C-language litmus tests matching the specified criteria.
+# Generates the output for each .litmus file into a corresponding
+# .litmus.out file, and does not judge the result.
+#
+# sh initlitmushist.sh
+#
+# Run from the Linux kernel tools/memory-model directory.
+# See scripts/parseargs.sh for list of arguments.
+#
+# This script can consume significant wallclock time and CPU, especially as
+# the value of --procs rises.  On a four-core (eight hardware threads)
+# 2.5GHz x86 with a one-minute per-run timeout:
+#
+# --procs wallclock CPU		timeouts	tests
+#	1 0m11.241s 0m1.086s           0	   19
+#	2 1m12.598s 2m8.459s           2	  393
+#	3 1m30.007s 6m2.479s           4	 2291
+#	4 3m26.042s 18m5.139s	       9	 3217
+#	5 4m26.661s 23m54.128s	      13	 3784
+#	6 4m41.900s 26m4.721s         13	 4352
+#	7 5m51.463s 35m50.868s        13	 4626
+#	8 10m5.235s 68m43.672s        34	 5117
+#	9 15m57.80s 105m58.101s       69	 5156
+#      10 16m14.13s 103m35.009s       69         5165
+#      20 27m48.55s 198m3.286s       156         5269
+#
+# Increasing the timeout on the 20-process run to five minutes increases
+# the runtime to about 90 minutes with the CPU time rising to about
+# 10 hours.  On the other hand, it decreases the number of timeouts to 101.
+#
+# Note that there are historical tests for which herd7 will fail
+# completely, for example, litmus/manual/atomic/C-unlock-wait-00.litmus
+# contains a call to spin_unlock_wait(), which no longer exists in either
+# the kernel or LKMM.
+
+. scripts/parseargs.sh
+
+T=/tmp/initlitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+if test -d litmus
+then
+	:
+else
+	git clone https://github.com/paulmckrcu/litmus
+	( cd litmus; git checkout origin/master )
+fi
+
+# Create any new directories that have appeared in the github litmus
+# repo since the last run.
+if test "$LKMM_DESTDIR" != "."
+then
+	find litmus -type d -print |
+	( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
+fi
+
+# Create a list of the C-language litmus tests with no more than the
+# specified number of processes (per the --procs argument).
+find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C
+xargs < $T/list-C -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short
+
+scripts/runlitmushist.sh < $T/list-C-short
+
+exit 0
diff --git a/marvell/linux/tools/memory-model/scripts/judgelitmus.sh b/marvell/linux/tools/memory-model/scripts/judgelitmus.sh
new file mode 100755
index 0000000..0cc6387
--- /dev/null
+++ b/marvell/linux/tools/memory-model/scripts/judgelitmus.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Given a .litmus test and the corresponding .litmus.out file, check
+# the .litmus.out file against the "Result:" comment to judge whether
+# the test ran correctly.
+#
+# Usage:
+#	judgelitmus.sh file.litmus
+#
+# Run this in the directory containing the memory model, specifying the
+# pathname of the litmus test to check.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+litmus=$1
+
+if test -f "$litmus" -a -r "$litmus"
+then
+	:
+else
+	echo ' --- ' error: \"$litmus\" is not a readable file
+	exit 255
+fi
+if test -f "$LKMM_DESTDIR/$litmus".out -a -r "$LKMM_DESTDIR/$litmus".out
+then
+	:
+else
+	echo ' --- ' error: \"$LKMM_DESTDIR/$litmus\".out is not a readable file
+	exit 255
+fi
+if grep -q '^ \* Result: ' $litmus
+then
+	outcome=`grep -m 1 '^ \* Result: ' $litmus | awk '{ print $3 }'`
+else
+	outcome=specified
+fi
+
+grep '^Observation' $LKMM_DESTDIR/$litmus.out
+if grep -q '^Observation' $LKMM_DESTDIR/$litmus.out
+then
+	:
+else
+	echo ' !!! Verification error' $litmus
+	if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out
+	then
+		echo ' !!! Verification error' >> $LKMM_DESTDIR/$litmus.out 2>&1
+	fi
+	exit 255
+fi
+if test "$outcome" = DEADLOCK
+then
+	if grep '^Observation' $LKMM_DESTDIR/$litmus.out | grep -q 'Never 0 0$'
+	then
+		ret=0
+	else
+		echo " !!! Unexpected non-$outcome verification" $litmus
+		if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out
+		then
+			echo " !!! Unexpected non-$outcome verification" >> $LKMM_DESTDIR/$litmus.out 2>&1
+		fi
+		ret=1
+	fi
+elif grep '^Observation' $LKMM_DESTDIR/$litmus.out | grep -q $outcome || test "$outcome" = Maybe
+then
+	ret=0
+else
+	echo " !!! Unexpected non-$outcome verification" $litmus
+	if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out
+	then
+		echo " !!! Unexpected non-$outcome verification" >> $LKMM_DESTDIR/$litmus.out 2>&1
+	fi
+	ret=1
+fi
+tail -2 $LKMM_DESTDIR/$litmus.out | head -1
+exit $ret
diff --git a/marvell/linux/tools/memory-model/scripts/newlitmushist.sh b/marvell/linux/tools/memory-model/scripts/newlitmushist.sh
new file mode 100755
index 0000000..991f8f8
--- /dev/null
+++ b/marvell/linux/tools/memory-model/scripts/newlitmushist.sh
@@ -0,0 +1,61 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Runs the C-language litmus tests matching the specified criteria
+# that do not already have a corresponding .litmus.out file, and does
+# not judge the result.
+#
+# sh newlitmushist.sh
+#
+# Run from the Linux kernel tools/memory-model directory.
+# See scripts/parseargs.sh for list of arguments.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+. scripts/parseargs.sh
+
+T=/tmp/newlitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+if test -d litmus
+then
+	:
+else
+	echo Run scripts/initlitmushist.sh first, need litmus repo.
+	exit 1
+fi
+
+# Create any new directories that have appeared in the github litmus
+# repo since the last run.
+if test "$LKMM_DESTDIR" != "."
+then
+	find litmus -type d -print |
+	( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
+fi
+
+# Create a list of the C-language litmus tests previously run.
+( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) |
+	sed -e 's/\.out$//' |
+	xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already
+
+# Form full list of litmus tests with no more than the specified
+# number of processes (per the --procs argument).
+find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C-all
+xargs < $T/list-C-all -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short
+
+# Form list of new tests.  Note: This does not handle litmus-test deletion!
+sort $T/list-C-already $T/list-C-short | uniq -u > $T/list-C-new
+
+# Form list of litmus tests that have changed since the last run.
+sed < $T/list-C-short -e 's,^.*$,if test & -nt '"$LKMM_DESTDIR"'/&.out; then echo &; fi,' > $T/list-C-script
+sh $T/list-C-script > $T/list-C-newer
+
+# Merge the list of new and of updated litmus tests: These must be (re)run.
+sort -u $T/list-C-new $T/list-C-newer > $T/list-C-needed
+
+scripts/runlitmushist.sh < $T/list-C-needed
+
+exit 0
diff --git a/marvell/linux/tools/memory-model/scripts/parseargs.sh b/marvell/linux/tools/memory-model/scripts/parseargs.sh
new file mode 100755
index 0000000..40f5208
--- /dev/null
+++ b/marvell/linux/tools/memory-model/scripts/parseargs.sh
@@ -0,0 +1,136 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# the corresponding .litmus.out file, and does not judge the result.
+#
+# . scripts/parseargs.sh
+#
+# Include into other Linux kernel tools/memory-model scripts.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+T=/tmp/parseargs.sh.$$
+mkdir $T
+
+# Initialize one parameter: initparam name default
+initparam () {
+	echo if test -z '"$'$1'"' > $T/s
+	echo then >> $T/s
+	echo	$1='"'$2'"' >> $T/s
+	echo	export $1 >> $T/s
+	echo fi >> $T/s
+	echo $1_DEF='$'$1  >> $T/s
+	. $T/s
+}
+
+initparam LKMM_DESTDIR "."
+initparam LKMM_HERD_OPTIONS "-conf linux-kernel.cfg"
+initparam LKMM_JOBS `getconf _NPROCESSORS_ONLN`
+initparam LKMM_PROCS "3"
+initparam LKMM_TIMEOUT "1m"
+
+scriptname=$0
+
+usagehelp () {
+	echo "Usage $scriptname [ arguments ]"
+	echo "      --destdir path (place for .litmus.out, default by .litmus)"
+	echo "      --herdopts -conf linux-kernel.cfg ..."
+	echo "      --jobs N (number of jobs, default one per CPU)"
+	echo "      --procs N (litmus tests with at most this many processes)"
+	echo "      --timeout N (herd7 timeout (e.g., 10s, 1m, 2hr, 1d, '')"
+	echo "Defaults: --destdir '$LKMM_DESTDIR_DEF' --herdopts '$LKMM_HERD_OPTIONS_DEF' --jobs '$LKMM_JOBS_DEF' --procs '$LKMM_PROCS_DEF' --timeout '$LKMM_TIMEOUT_DEF'"
+	exit 1
+}
+
+usage () {
+	usagehelp 1>&2
+}
+
+# checkarg --argname argtype $# arg mustmatch cannotmatch
+checkarg () {
+	if test $3 -le 1
+	then
+		echo $1 needs argument $2 matching \"$5\"
+		usage
+	fi
+	if echo "$4" | grep -q -e "$5"
+	then
+		:
+	else
+		echo $1 $2 \"$4\" must match \"$5\"
+		usage
+	fi
+	if echo "$4" | grep -q -e "$6"
+	then
+		echo $1 $2 \"$4\" must not match \"$6\"
+		usage
+	fi
+}
+
+while test $# -gt 0
+do
+	case "$1" in
+	--destdir)
+		checkarg --destdir "(path to directory)" "$#" "$2" '.\+' '^--'
+		LKMM_DESTDIR="$2"
+		mkdir $LKMM_DESTDIR > /dev/null 2>&1
+		if ! test -e "$LKMM_DESTDIR"
+		then
+			echo "Cannot create directory --destdir '$LKMM_DESTDIR'"
+			usage
+		fi
+		if test -d "$LKMM_DESTDIR" -a -w "$LKMM_DESTDIR" -a -x "$LKMM_DESTDIR"
+		then
+			:
+		else
+			echo "Directory --destdir '$LKMM_DESTDIR' insufficient permissions to create files"
+			usage
+		fi
+		shift
+		;;
+	--herdopts|--herdopt)
+		checkarg --destdir "(herd7 options)" "$#" "$2" '.*' '^--'
+		LKMM_HERD_OPTIONS="$2"
+		shift
+		;;
+	-j[1-9]*)
+		njobs="`echo $1 | sed -e 's/^-j//'`"
+		trailchars="`echo $njobs | sed -e 's/[0-9]\+\(.*\)$/\1/'`"
+		if test -n "$trailchars"
+		then
+			echo $1 trailing characters "'$trailchars'"
+			usagehelp
+		fi
+		LKMM_JOBS="`echo $njobs | sed -e 's/^\([0-9]\+\).*$/\1/'`"
+		;;
+	--jobs|--job|-j)
+		checkarg --jobs "(number)" "$#" "$2" '^[1-9][0-9]\+$' '^--'
+		LKMM_JOBS="$2"
+		shift
+		;;
+	--procs|--proc)
+		checkarg --procs "(number)" "$#" "$2" '^[0-9]\+$' '^--'
+		LKMM_PROCS="$2"
+		shift
+		;;
+	--timeout)
+		checkarg --timeout "(timeout spec)" "$#" "$2" '^\([0-9]\+[smhd]\?\|\)$' '^--'
+		LKMM_TIMEOUT="$2"
+		shift
+		;;
+	*)
+		echo Unknown argument $1
+		usage
+		;;
+	esac
+	shift
+done
+if test -z "$LKMM_TIMEOUT"
+then
+	LKMM_TIMEOUT_CMD=""; export LKMM_TIMEOUT_CMD
+else
+	LKMM_TIMEOUT_CMD="timeout $LKMM_TIMEOUT"; export LKMM_TIMEOUT_CMD
+fi
+rm -rf $T
diff --git a/marvell/linux/tools/memory-model/scripts/runlitmushist.sh b/marvell/linux/tools/memory-model/scripts/runlitmushist.sh
new file mode 100755
index 0000000..6ed376f
--- /dev/null
+++ b/marvell/linux/tools/memory-model/scripts/runlitmushist.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Runs the C-language litmus tests specified on standard input, using up
+# to the specified number of CPUs (defaulting to all of them) and placing
+# the results in the specified directory (defaulting to the same place
+# the litmus test came from).
+#
+# sh runlitmushist.sh
+#
+# Run from the Linux kernel tools/memory-model directory.
+# This script uses environment variables produced by parseargs.sh.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+T=/tmp/runlitmushist.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
+if test -d litmus
+then
+	:
+else
+	echo Directory \"litmus\" missing, aborting run.
+	exit 1
+fi
+
+# Prefixes for per-CPU scripts
+for ((i=0;i<$LKMM_JOBS;i++))
+do
+	echo dir="$LKMM_DESTDIR" > $T/$i.sh
+	echo T=$T >> $T/$i.sh
+	echo herdoptions=\"$LKMM_HERD_OPTIONS\" >> $T/$i.sh
+	cat << '___EOF___' >> $T/$i.sh
+	runtest () {
+		echo ' ... ' /usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $1 '>' $dir/$1.out '2>&1'
+		if /usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $1 > $dir/$1.out 2>&1
+		then
+			if ! grep -q '^Observation ' $dir/$1.out
+			then
+				echo ' !!! Herd failed, no Observation:' $1
+			fi
+		else
+			exitcode=$?
+			if test "$exitcode" -eq 124
+			then
+				exitmsg="timed out"
+			else
+				exitmsg="failed, exit code $exitcode"
+			fi
+			echo ' !!! Herd' ${exitmsg}: $1
+		fi
+	}
+___EOF___
+done
+
+awk -v q="'" -v b='\\' '
+{
+	print "echo `grep " q "^P[0-9]" b "+(" q " " $0 " | tail -1 | sed -e " q "s/^P" b "([0-9]" b "+" b ")(.*$/" b "1/" q "` " $0
+}' | bash |
+sort -k1n |
+awk -v ncpu=$LKMM_JOBS -v t=$T '
+{
+	print "runtest " $2 >> t "/" NR % ncpu ".sh";
+}
+
+END {
+	for (i = 0; i < ncpu; i++) {
+		print "sh " t "/" i ".sh > " t "/" i ".sh.out 2>&1 &";
+		close(t "/" i ".sh");
+	}
+	print "wait";
+}' | sh
+cat $T/*.sh.out
+if grep -q '!!!' $T/*.sh.out
+then
+	echo ' ---' Summary: 1>&2
+	grep '!!!' $T/*.sh.out 1>&2
+	nfail="`grep '!!!' $T/*.sh.out | wc -l`"
+	echo 'Number of failed herd7 runs (e.g., timeout): ' $nfail 1>&2
+	exit 1
+else
+	echo All runs completed successfully. 1>&2
+	exit 0
+fi