blob: f9eddbca79d2859f856454a2a0dbce68ab9eca36 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright IBM Corp. 1999, 2009
4 *
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7
8#ifndef __ASM_BARRIER_H
9#define __ASM_BARRIER_H
10
11/*
12 * Force strict CPU ordering.
13 * And yes, this is required on UP too when we're talking
14 * to devices.
15 */
16
17#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
18/* Fast-BCR without checkpoint synchronization */
19#define __ASM_BARRIER "bcr 14,0\n"
20#else
21#define __ASM_BARRIER "bcr 15,0\n"
22#endif
23
24#define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
25
26#define rmb() barrier()
27#define wmb() barrier()
28#define dma_rmb() mb()
29#define dma_wmb() mb()
30#define __smp_mb() mb()
31#define __smp_rmb() rmb()
32#define __smp_wmb() wmb()
33
34#define __smp_store_release(p, v) \
35do { \
36 compiletime_assert_atomic_type(*p); \
37 barrier(); \
38 WRITE_ONCE(*p, v); \
39} while (0)
40
41#define __smp_load_acquire(p) \
42({ \
43 typeof(*p) ___p1 = READ_ONCE(*p); \
44 compiletime_assert_atomic_type(*p); \
45 barrier(); \
46 ___p1; \
47})
48
49#define __smp_mb__before_atomic() barrier()
50#define __smp_mb__after_atomic() barrier()
51
52/**
53 * array_index_mask_nospec - generate a mask for array_idx() that is
54 * ~0UL when the bounds check succeeds and 0 otherwise
55 * @index: array element index
56 * @size: number of elements in array
57 */
58#define array_index_mask_nospec array_index_mask_nospec
59static inline unsigned long array_index_mask_nospec(unsigned long index,
60 unsigned long size)
61{
62 unsigned long mask;
63
64 if (__builtin_constant_p(size) && size > 0) {
65 asm(" clgr %2,%1\n"
66 " slbgr %0,%0\n"
67 :"=d" (mask) : "d" (size-1), "d" (index) :"cc");
68 return mask;
69 }
70 asm(" clgr %1,%2\n"
71 " slbgr %0,%0\n"
72 :"=d" (mask) : "d" (size), "d" (index) :"cc");
73 return ~mask;
74}
75
76#include <asm-generic/barrier.h>
77
78#endif /* __ASM_BARRIER_H */