blob: dc953d22e3c68dcacb9486b028b806a8321823ce [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
4 */
5#ifndef _ASM_POWERPC_BARRIER_H
6#define _ASM_POWERPC_BARRIER_H
7
8#include <asm/asm-const.h>
9
10/*
11 * Memory barrier.
12 * The sync instruction guarantees that all memory accesses initiated
13 * by this processor have been performed (with respect to all other
14 * mechanisms that access memory). The eieio instruction is a barrier
15 * providing an ordering (separately) for (a) cacheable stores and (b)
16 * loads and stores to non-cacheable memory (e.g. I/O devices).
17 *
18 * mb() prevents loads and stores being reordered across this point.
19 * rmb() prevents loads being reordered across this point.
20 * wmb() prevents stores being reordered across this point.
21 * read_barrier_depends() prevents data-dependent loads being reordered
22 * across this point (nop on PPC).
23 *
24 * *mb() variants without smp_ prefix must order all types of memory
25 * operations with one another. sync is the only instruction sufficient
26 * to do this.
27 *
28 * For the smp_ barriers, ordering is for cacheable memory operations
29 * only. We have to use the sync instruction for smp_mb(), since lwsync
30 * doesn't order loads with respect to previous stores. Lwsync can be
31 * used for smp_rmb() and smp_wmb().
32 *
33 * However, on CPUs that don't support lwsync, lwsync actually maps to a
34 * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
35 */
36#define mb() __asm__ __volatile__ ("sync" : : : "memory")
37#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
38#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
39
40/* The sub-arch has lwsync */
41#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
42# define SMPWMB LWSYNC
43#else
44# define SMPWMB eieio
45#endif
46
47/* clang defines this macro for a builtin, which will not work with runtime patching */
48#undef __lwsync
49#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
50#define dma_rmb() __lwsync()
51#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
52
53#define __smp_lwsync() __lwsync()
54
55#define __smp_mb() mb()
56#define __smp_rmb() __lwsync()
57#define __smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
58
59/*
60 * This is a barrier which prevents following instructions from being
61 * started until the value of the argument x is known. For example, if
62 * x is a variable loaded from memory, this prevents following
63 * instructions from being executed until the load has been performed.
64 */
65#define data_barrier(x) \
66 asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
67
68#define __smp_store_release(p, v) \
69do { \
70 compiletime_assert_atomic_type(*p); \
71 __smp_lwsync(); \
72 WRITE_ONCE(*p, v); \
73} while (0)
74
75#define __smp_load_acquire(p) \
76({ \
77 typeof(*p) ___p1 = READ_ONCE(*p); \
78 compiletime_assert_atomic_type(*p); \
79 __smp_lwsync(); \
80 ___p1; \
81})
82
83#ifdef CONFIG_PPC_BOOK3S_64
84#define NOSPEC_BARRIER_SLOT nop
85#elif defined(CONFIG_PPC_FSL_BOOK3E)
86#define NOSPEC_BARRIER_SLOT nop; nop
87#endif
88
89#ifdef CONFIG_PPC_BARRIER_NOSPEC
90/*
91 * Prevent execution of subsequent instructions until preceding branches have
92 * been fully resolved and are no longer executing speculatively.
93 */
94#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; NOSPEC_BARRIER_SLOT
95
96// This also acts as a compiler barrier due to the memory clobber.
97#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")
98
99#else /* !CONFIG_PPC_BARRIER_NOSPEC */
100#define barrier_nospec_asm
101#define barrier_nospec()
102#endif /* CONFIG_PPC_BARRIER_NOSPEC */
103
104#include <asm-generic/barrier.h>
105
106#endif /* _ASM_POWERPC_BARRIER_H */