blob: 905a2c66d96d9a9039c71cb651b9f9c9727a9e52 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copied from the kernel sources:
*
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
*/
#ifndef _TOOLS_LINUX_ASM_POWERPC_BARRIER_H
#define _TOOLS_LINUX_ASM_POWERPC_BARRIER_H
/*
* Memory barrier.
* The sync instruction guarantees that all memory accesses initiated
* by this processor have been performed (with respect to all other
* mechanisms that access memory). The eieio instruction is a barrier
* providing an ordering (separately) for (a) cacheable stores and (b)
* loads and stores to non-cacheable memory (e.g. I/O devices).
*
* mb() prevents loads and stores being reordered across this point.
* rmb() prevents loads being reordered across this point.
* wmb() prevents stores being reordered across this point.
*
* *mb() variants without smp_ prefix must order all types of memory
* operations with one another. sync is the only instruction sufficient
* to do this.
*/
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
#if defined(__powerpc64__)
#define smp_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory")
#define smp_store_release(p, v) \
do { \
smp_lwsync(); \
WRITE_ONCE(*p, v); \
} while (0)
#define smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
smp_lwsync(); \
___p1; \
})
#endif /* defined(__powerpc64__) */
#endif /* _TOOLS_LINUX_ASM_POWERPC_BARRIER_H */
|