blob: af7acacc96042944cb7c3595eb6cdf31a8c2e6e1 [file] [log] [blame]
xf.libdd93d52023-05-12 07:10:14 -07001/* Copyright (C) 2003-2016 Free Software Foundation, Inc.
2
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library. If not, see
17 <http://www.gnu.org/licenses/>. */
18
19#include <stdint.h>
20#include <sysdep.h>
21
22
23typedef int8_t atomic8_t;
24typedef uint8_t uatomic8_t;
25typedef int_fast8_t atomic_fast8_t;
26typedef uint_fast8_t uatomic_fast8_t;
27
28typedef int32_t atomic32_t;
29typedef uint32_t uatomic32_t;
30typedef int_fast32_t atomic_fast32_t;
31typedef uint_fast32_t uatomic_fast32_t;
32
33typedef intptr_t atomicptr_t;
34typedef uintptr_t uatomicptr_t;
35typedef intmax_t atomic_max_t;
36typedef uintmax_t uatomic_max_t;
37
38#define __HAVE_64B_ATOMICS 0
39#define USE_ATOMIC_COMPILER_BUILTINS 0
40
41
42/* Microblaze does not have byte and halfword forms of load and reserve and
43 store conditional. So for microblaze we stub out the 8- and 16-bit forms. */
44#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
45 (abort (), 0)
46
47#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
48 (abort (), 0)
49
50#define __arch_compare_and_exchange_bool_8_rel(mem, newval, oldval) \
51 (abort (), 0)
52
53#define __arch_compare_and_exchange_bool_16_rel(mem, newval, oldval) \
54 (abort (), 0)
55
56#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
57 ({ \
58 __typeof (*(mem)) __tmp; \
59 __typeof (mem) __memp = (mem); \
60 int test; \
61 __asm __volatile ( \
62 " addc r0, r0, r0;" \
63 "1: lwx %0, %3, r0;" \
64 " addic %1, r0, 0;" \
65 " bnei %1, 1b;" \
66 " cmp %1, %0, %4;" \
67 " bnei %1, 2f;" \
68 " swx %5, %3, r0;" \
69 " addic %1, r0, 0;" \
70 " bnei %1, 1b;" \
71 "2:" \
72 : "=&r" (__tmp), \
73 "=&r" (test), \
74 "=m" (*__memp) \
75 : "r" (__memp), \
76 "r" (oldval), \
77 "r" (newval) \
78 : "cc", "memory"); \
79 __tmp; \
80 })
81
82#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
83 (abort (), (__typeof (*mem)) 0)
84
85#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
86 ({ \
87 __typeof (*(mem)) __result; \
88 if (sizeof (*mem) == 4) \
89 __result = __arch_compare_and_exchange_val_32_acq (mem, newval, oldval); \
90 else if (sizeof (*mem) == 8) \
91 __result = __arch_compare_and_exchange_val_64_acq (mem, newval, oldval); \
92 else \
93 abort (); \
94 __result; \
95 })
96
97#define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
98 ({ \
99 __typeof (*(mem)) __result; \
100 if (sizeof (*mem) == 4) \
101 __result = __arch_compare_and_exchange_val_32_acq (mem, newval, oldval); \
102 else if (sizeof (*mem) == 8) \
103 __result = __arch_compare_and_exchange_val_64_acq (mem, newval, oldval); \
104 else \
105 abort (); \
106 __result; \
107 })
108
109#define __arch_atomic_exchange_32_acq(mem, value) \
110 ({ \
111 __typeof (*(mem)) __tmp; \
112 __typeof (mem) __memp = (mem); \
113 int test; \
114 __asm __volatile ( \
115 " addc r0, r0, r0;" \
116 "1: lwx %0, %4, r0;" \
117 " addic %1, r0, 0;" \
118 " bnei %1, 1b;" \
119 " swx %3, %4, r0;" \
120 " addic %1, r0, 0;" \
121 " bnei %1, 1b;" \
122 : "=&r" (__tmp), \
123 "=&r" (test), \
124 "=m" (*__memp) \
125 : "r" (value), \
126 "r" (__memp) \
127 : "cc", "memory"); \
128 __tmp; \
129 })
130
131#define __arch_atomic_exchange_64_acq(mem, newval) \
132 (abort (), (__typeof (*mem)) 0)
133
134#define atomic_exchange_acq(mem, value) \
135 ({ \
136 __typeof (*(mem)) __result; \
137 if (sizeof (*mem) == 4) \
138 __result = __arch_atomic_exchange_32_acq (mem, value); \
139 else if (sizeof (*mem) == 8) \
140 __result = __arch_atomic_exchange_64_acq (mem, value); \
141 else \
142 abort (); \
143 __result; \
144 })
145
146#define atomic_exchange_rel(mem, value) \
147 ({ \
148 __typeof (*(mem)) __result; \
149 if (sizeof (*mem) == 4) \
150 __result = __arch_atomic_exchange_32_acq (mem, value); \
151 else if (sizeof (*mem) == 8) \
152 __result = __arch_atomic_exchange_64_acq (mem, value); \
153 else \
154 abort (); \
155 __result; \
156 })
157
158#define __arch_atomic_exchange_and_add_32(mem, value) \
159 ({ \
160 __typeof (*(mem)) __tmp; \
161 __typeof (mem) __memp = (mem); \
162 int test; \
163 __asm __volatile ( \
164 " addc r0, r0, r0;" \
165 "1: lwx %0, %4, r0;" \
166 " addic %1, r0, 0;" \
167 " bnei %1, 1b;" \
168 " add %1, %3, %0;" \
169 " swx %1, %4, r0;" \
170 " addic %1, r0, 0;" \
171 " bnei %1, 1b;" \
172 : "=&r" (__tmp), \
173 "=&r" (test), \
174 "=m" (*__memp) \
175 : "r" (value), \
176 "r" (__memp) \
177 : "cc", "memory"); \
178 __tmp; \
179 })
180
181#define __arch_atomic_exchange_and_add_64(mem, value) \
182 (abort (), (__typeof (*mem)) 0)
183
184#define atomic_exchange_and_add(mem, value) \
185 ({ \
186 __typeof (*(mem)) __result; \
187 if (sizeof (*mem) == 4) \
188 __result = __arch_atomic_exchange_and_add_32 (mem, value); \
189 else if (sizeof (*mem) == 8) \
190 __result = __arch_atomic_exchange_and_add_64 (mem, value); \
191 else \
192 abort (); \
193 __result; \
194 })
195
196#define __arch_atomic_increment_val_32(mem) \
197 ({ \
198 __typeof (*(mem)) __val; \
199 int test; \
200 __asm __volatile ( \
201 " addc r0, r0, r0;" \
202 "1: lwx %0, %3, r0;" \
203 " addic %1, r0, 0;" \
204 " bnei %1, 1b;" \
205 " addi %0, %0, 1;" \
206 " swx %0, %3, r0;" \
207 " addic %1, r0, 0;" \
208 " bnei %1, 1b;" \
209 : "=&r" (__val), \
210 "=&r" (test), \
211 "=m" (*mem) \
212 : "r" (mem), \
213 "m" (*mem) \
214 : "cc", "memory"); \
215 __val; \
216 })
217
218#define __arch_atomic_increment_val_64(mem) \
219 (abort (), (__typeof (*mem)) 0)
220
221#define atomic_increment_val(mem) \
222 ({ \
223 __typeof (*(mem)) __result; \
224 if (sizeof (*(mem)) == 4) \
225 __result = __arch_atomic_increment_val_32 (mem); \
226 else if (sizeof (*(mem)) == 8) \
227 __result = __arch_atomic_increment_val_64 (mem); \
228 else \
229 abort (); \
230 __result; \
231 })
232
233#define atomic_increment(mem) ({ atomic_increment_val (mem); (void) 0; })
234
235#define __arch_atomic_decrement_val_32(mem) \
236 ({ \
237 __typeof (*(mem)) __val; \
238 int test; \
239 __asm __volatile ( \
240 " addc r0, r0, r0;" \
241 "1: lwx %0, %3, r0;" \
242 " addic %1, r0, 0;" \
243 " bnei %1, 1b;" \
244 " rsubi %0, %0, 1;" \
245 " swx %0, %3, r0;" \
246 " addic %1, r0, 0;" \
247 " bnei %1, 1b;" \
248 : "=&r" (__val), \
249 "=&r" (test), \
250 "=m" (*mem) \
251 : "r" (mem), \
252 "m" (*mem) \
253 : "cc", "memory"); \
254 __val; \
255 })
256
257#define __arch_atomic_decrement_val_64(mem) \
258 (abort (), (__typeof (*mem)) 0)
259
260#define atomic_decrement_val(mem) \
261 ({ \
262 __typeof (*(mem)) __result; \
263 if (sizeof (*(mem)) == 4) \
264 __result = __arch_atomic_decrement_val_32 (mem); \
265 else if (sizeof (*(mem)) == 8) \
266 __result = __arch_atomic_decrement_val_64 (mem); \
267 else \
268 abort (); \
269 __result; \
270 })
271
272#define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; })