]> rtime.felk.cvut.cz Git - l4.git/blob - l4/pkg/uclibc/lib/contrib/uclibc/libc/sysdeps/linux/powerpc/bits/atomic.h
update
[l4.git] / l4 / pkg / uclibc / lib / contrib / uclibc / libc / sysdeps / linux / powerpc / bits / atomic.h
1 /* Atomic operations.  PowerPC Common version.
2    Copyright (C) 2003, 2004 Free Software Foundation, Inc.
3    This file is part of the GNU C Library.
4    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
5
6    The GNU C Library is free software; you can redistribute it and/or
7    modify it under the terms of the GNU Lesser General Public
8    License as published by the Free Software Foundation; either
9    version 2.1 of the License, or (at your option) any later version.
10
11    The GNU C Library is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14    Lesser General Public License for more details.
15
16    You should have received a copy of the GNU Lesser General Public
17    License along with the GNU C Library; if not, see
18    <http://www.gnu.org/licenses/>.  */
19
20 #include <bits/wordsize.h>
21
22 #if __WORDSIZE == 64
23 /* Atomic operations.  PowerPC64 version.
24    Copyright (C) 2003, 2004 Free Software Foundation, Inc.
25    This file is part of the GNU C Library.
26    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
27
28    The GNU C Library is free software; you can redistribute it and/or
29    modify it under the terms of the GNU Lesser General Public
30    License as published by the Free Software Foundation; either
31    version 2.1 of the License, or (at your option) any later version.
32
33    The GNU C Library is distributed in the hope that it will be useful,
34    but WITHOUT ANY WARRANTY; without even the implied warranty of
35    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
36    Lesser General Public License for more details.
37
38    You should have received a copy of the GNU Lesser General Public
39    License along with the GNU C Library; if not, see
40    <http://www.gnu.org/licenses/>.  */
41
42 /* The 32-bit exchange_bool is different on powerpc64 because the subf
43    does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
44    (a load word and zero (high 32) form) load.
45    In powerpc64 register values are 64-bit by default,  including oldval.
46    The value in old val unknown sign extension, lwarx loads the 32-bit
47    value as unsigned.  So we explicitly clear the high 32 bits in oldval.  */
48 # define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
49 ({                                                                            \
50   unsigned int __tmp, __tmp2;                                                 \
51   __asm__ __volatile__ ("   clrldi  %1,%1,32\n"                               \
52                     "1: lwarx   %0,0,%2\n"                                    \
53                     "   subf.   %0,%1,%0\n"                                   \
54                     "   bne     2f\n"                                         \
55                     "   stwcx.  %4,0,%2\n"                                    \
56                     "   bne-    1b\n"                                         \
57                     "2: " __ARCH_ACQ_INSTR                                    \
58                     : "=&r" (__tmp), "=r" (__tmp2)                            \
59                     : "b" (mem), "1" (oldval), "r" (newval)                   \
60                     : "cr0", "memory");                                       \
61   __tmp != 0;                                                                 \
62 })
63
64 # define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
65 ({                                                                            \
66   unsigned int __tmp, __tmp2;                                                 \
67   __asm__ __volatile__ (__ARCH_REL_INSTR "\n"                                 \
68                     "   clrldi  %1,%1,32\n"                                   \
69                     "1: lwarx   %0,0,%2\n"                                    \
70                     "   subf.   %0,%1,%0\n"                                   \
71                     "   bne     2f\n"                                         \
72                     "   stwcx.  %4,0,%2\n"                                    \
73                     "   bne-    1b\n"                                         \
74                     "2: "                                                     \
75                     : "=&r" (__tmp), "=r" (__tmp2)                            \
76                     : "b" (mem), "1" (oldval), "r" (newval)                   \
77                     : "cr0", "memory");                                       \
78   __tmp != 0;                                                                 \
79 })
80
81 /*
82  * Only powerpc64 processors support Load doubleword and reserve index (ldarx)
83  * and Store doubleword conditional indexed (stdcx) instructions.  So here
84  * we define the 64-bit forms.
85  */
86 # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
87 ({                                                                            \
88   unsigned long __tmp;                                                        \
89   __asm__ __volatile__ (                                                              \
90                     "1: ldarx   %0,0,%1\n"                                    \
91                     "   subf.   %0,%2,%0\n"                                   \
92                     "   bne     2f\n"                                         \
93                     "   stdcx.  %3,0,%1\n"                                    \
94                     "   bne-    1b\n"                                         \
95                     "2: " __ARCH_ACQ_INSTR                                    \
96                     : "=&r" (__tmp)                                           \
97                     : "b" (mem), "r" (oldval), "r" (newval)                   \
98                     : "cr0", "memory");                                       \
99   __tmp != 0;                                                                 \
100 })
101
102 # define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
103 ({                                                                            \
104   unsigned long __tmp;                                                        \
105   __asm__ __volatile__ (__ARCH_REL_INSTR "\n"                                 \
106                     "1: ldarx   %0,0,%1\n"                                    \
107                     "   subf.   %0,%2,%0\n"                                   \
108                     "   bne     2f\n"                                         \
109                     "   stdcx.  %3,0,%1\n"                                    \
110                     "   bne-    1b\n"                                         \
111                     "2: "                                                     \
112                     : "=&r" (__tmp)                                           \
113                     : "b" (mem), "r" (oldval), "r" (newval)                   \
114                     : "cr0", "memory");                                       \
115   __tmp != 0;                                                                 \
116 })
117
118 #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
119   ({                                                                          \
120       __typeof (*(mem)) __tmp;                                                \
121       __typeof (mem)  __memp = (mem);                                         \
122       __asm__ __volatile__ (                                                  \
123                         "1:     ldarx   %0,0,%1\n"                            \
124                         "       cmpd    %0,%2\n"                              \
125                         "       bne     2f\n"                                 \
126                         "       stdcx.  %3,0,%1\n"                            \
127                         "       bne-    1b\n"                                 \
128                         "2:     " __ARCH_ACQ_INSTR                            \
129                         : "=&r" (__tmp)                                       \
130                         : "b" (__memp), "r" (oldval), "r" (newval)            \
131                         : "cr0", "memory");                                   \
132       __tmp;                                                                  \
133   })
134
135 #define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
136   ({                                                                          \
137       __typeof (*(mem)) __tmp;                                                \
138       __typeof (mem)  __memp = (mem);                                         \
139       __asm__ __volatile__ (__ARCH_REL_INSTR "\n"                                     \
140                         "1:     ldarx   %0,0,%1\n"                            \
141                         "       cmpd    %0,%2\n"                              \
142                         "       bne     2f\n"                                 \
143                         "       stdcx.  %3,0,%1\n"                            \
144                         "       bne-    1b\n"                                 \
145                         "2:     "                                             \
146                         : "=&r" (__tmp)                                       \
147                         : "b" (__memp), "r" (oldval), "r" (newval)            \
148                         : "cr0", "memory");                                   \
149       __tmp;                                                                  \
150   })
151
152 # define __arch_atomic_exchange_64_acq(mem, value) \
153     ({                                                                        \
154       __typeof (*mem) __val;                                                  \
155       __asm__ __volatile__ (__ARCH_REL_INSTR "\n"                                     \
156                         "1:     ldarx   %0,0,%2\n"                            \
157                         "       stdcx.  %3,0,%2\n"                            \
158                         "       bne-    1b\n"                                 \
159                   " " __ARCH_ACQ_INSTR                                        \
160                         : "=&r" (__val), "=m" (*mem)                          \
161                         : "b" (mem), "r" (value), "m" (*mem)                  \
162                         : "cr0", "memory");                                   \
163       __val;                                                                  \
164     })
165
166 # define __arch_atomic_exchange_64_rel(mem, value) \
167     ({                                                                        \
168       __typeof (*mem) __val;                                                  \
169       __asm__ __volatile__ (__ARCH_REL_INSTR "\n"                                     \
170                         "1:     ldarx   %0,0,%2\n"                            \
171                         "       stdcx.  %3,0,%2\n"                            \
172                         "       bne-    1b"                                   \
173                         : "=&r" (__val), "=m" (*mem)                          \
174                         : "b" (mem), "r" (value), "m" (*mem)                  \
175                         : "cr0", "memory");                                   \
176       __val;                                                                  \
177     })
178
179 # define __arch_atomic_exchange_and_add_64(mem, value) \
180     ({                                                                        \
181       __typeof (*mem) __val, __tmp;                                           \
182       __asm__ __volatile__ ("1: ldarx   %0,0,%3\n"                            \
183                         "       add     %1,%0,%4\n"                           \
184                         "       stdcx.  %1,0,%3\n"                            \
185                         "       bne-    1b"                                   \
186                         : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)           \
187                         : "b" (mem), "r" (value), "m" (*mem)                  \
188                         : "cr0", "memory");                                   \
189       __val;                                                                  \
190     })
191
192 # define __arch_atomic_increment_val_64(mem) \
193     ({                                                                        \
194       __typeof (*(mem)) __val;                                                \
195       __asm__ __volatile__ ("1: ldarx   %0,0,%2\n"                            \
196                         "       addi    %0,%0,1\n"                            \
197                         "       stdcx.  %0,0,%2\n"                            \
198                         "       bne-    1b"                                   \
199                         : "=&b" (__val), "=m" (*mem)                          \
200                         : "b" (mem), "m" (*mem)                               \
201                         : "cr0", "memory");                                   \
202       __val;                                                                  \
203     })
204
205 # define __arch_atomic_decrement_val_64(mem) \
206     ({                                                                        \
207       __typeof (*(mem)) __val;                                                \
208       __asm__ __volatile__ ("1: ldarx   %0,0,%2\n"                            \
209                         "       subi    %0,%0,1\n"                            \
210                         "       stdcx.  %0,0,%2\n"                            \
211                         "       bne-    1b"                                   \
212                         : "=&b" (__val), "=m" (*mem)                          \
213                         : "b" (mem), "m" (*mem)                               \
214                         : "cr0", "memory");                                   \
215       __val;                                                                  \
216     })
217
218 # define __arch_atomic_decrement_if_positive_64(mem) \
219   ({ int __val, __tmp;                                                        \
220      __asm__ __volatile__ ("1:  ldarx   %0,0,%3\n"                            \
221                        "        cmpdi   0,%0,0\n"                             \
222                        "        addi    %1,%0,-1\n"                           \
223                        "        ble     2f\n"                                 \
224                        "        stdcx.  %1,0,%3\n"                            \
225                        "        bne-    1b\n"                                 \
226                        "2:      " __ARCH_ACQ_INSTR                            \
227                        : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)            \
228                        : "b" (mem), "m" (*mem)                                \
229                        : "cr0", "memory");                                    \
230      __val;                                                                   \
231   })
232
233 /*
234  * All powerpc64 processors support the new "light weight"  sync (lwsync).
235  */
236 # define atomic_read_barrier()  __asm__ ("lwsync" ::: "memory")
237 /*
238  * "light weight" sync can also be used for the release barrier.
239  */
240 # ifndef UP
241 #  define __ARCH_REL_INSTR      "lwsync"
242 # endif
243
244 #else
245 /* Atomic operations.  PowerPC32 version.
246    Copyright (C) 2003, 2004 Free Software Foundation, Inc.
247    This file is part of the GNU C Library.
248    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
249
250    The GNU C Library is free software; you can redistribute it and/or
251    modify it under the terms of the GNU Lesser General Public
252    License as published by the Free Software Foundation; either
253    version 2.1 of the License, or (at your option) any later version.
254
255    The GNU C Library is distributed in the hope that it will be useful,
256    but WITHOUT ANY WARRANTY; without even the implied warranty of
257    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
258    Lesser General Public License for more details.
259
260    You should have received a copy of the GNU Lesser General Public
261    License along with the GNU C Library; if not, see
262    <http://www.gnu.org/licenses/>.  */
263
264 /*
265  * The 32-bit exchange_bool is different on powerpc64 because the subf
266  * does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
267  * (a load word and zero (high 32) form).  So powerpc64 has a slightly
268  * different version in sysdeps/powerpc/powerpc64/bits/atomic.h.
269  */
270 # define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval)         \
271 ({                                                                            \
272   unsigned int __tmp;                                                         \
273   __asm__ __volatile__ (                                                              \
274                     "1: lwarx   %0,0,%1\n"                                    \
275                     "   subf.   %0,%2,%0\n"                                   \
276                     "   bne     2f\n"                                         \
277                     "   stwcx.  %3,0,%1\n"                                    \
278                     "   bne-    1b\n"                                         \
279                     "2: " __ARCH_ACQ_INSTR                                    \
280                     : "=&r" (__tmp)                                           \
281                     : "b" (mem), "r" (oldval), "r" (newval)                   \
282                     : "cr0", "memory");                                       \
283   __tmp != 0;                                                                 \
284 })
285
286 # define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval)         \
287 ({                                                                            \
288   unsigned int __tmp;                                                         \
289   __asm__ __volatile__ (__ARCH_REL_INSTR "\n"                                 \
290                     "1: lwarx   %0,0,%1\n"                                    \
291                     "   subf.   %0,%2,%0\n"                                   \
292                     "   bne     2f\n"                                         \
293                     "   stwcx.  %3,0,%1\n"                                    \
294                     "   bne-    1b\n"                                         \
295                     "2: "                                                     \
296                     : "=&r" (__tmp)                                           \
297                     : "b" (mem), "r" (oldval), "r" (newval)                   \
298                     : "cr0", "memory");                                       \
299   __tmp != 0;                                                                 \
300 })
301
302 /* Powerpc32 processors don't implement the 64-bit (doubleword) forms of
303    load and reserve (ldarx) and store conditional (stdcx.) instructions.
304    So for powerpc32 we stub out the 64-bit forms.  */
305 # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
306   (abort (), 0)
307
308 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
309   (abort (), (__typeof (*mem)) 0)
310
311 # define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
312   (abort (), 0)
313
314 # define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
315   (abort (), (__typeof (*mem)) 0)
316
317 # define __arch_atomic_exchange_64_acq(mem, value) \
318     ({ abort (); (*mem) = (value); })
319
320 # define __arch_atomic_exchange_64_rel(mem, value) \
321     ({ abort (); (*mem) = (value); })
322
323 # define __arch_atomic_exchange_and_add_64(mem, value) \
324     ({ abort (); (*mem) = (value); })
325
326 # define __arch_atomic_increment_val_64(mem) \
327     ({ abort (); (*mem)++; })
328
329 # define __arch_atomic_decrement_val_64(mem) \
330     ({ abort (); (*mem)--; })
331
332 # define __arch_atomic_decrement_if_positive_64(mem) \
333     ({ abort (); (*mem)--; })
334
335 #ifdef _ARCH_PWR4
336 /*
337  * Newer powerpc64 processors support the new "light weight" sync (lwsync)
338  * So if the build is using -mcpu=[power4,power5,power5+,970] we can
339  * safely use lwsync.
340  */
341 # define atomic_read_barrier()  __asm__ ("lwsync" ::: "memory")
342 /*
343  * "light weight" sync can also be used for the release barrier.
344  */
345 # ifndef UP
346 #  define __ARCH_REL_INSTR      "lwsync"
347 # endif
348 #else
349
350 /*
351  * Older powerpc32 processors don't support the new "light weight"
352  * sync (lwsync).  So the only safe option is to use normal sync
353  * for all powerpc32 applications.
354  */
355 # define atomic_read_barrier()  __asm__ ("sync" ::: "memory")
356 #endif
357
358 #endif
359
360 #include <stdint.h>
361
362 typedef int32_t atomic32_t;
363 typedef uint32_t uatomic32_t;
364 typedef int_fast32_t atomic_fast32_t;
365 typedef uint_fast32_t uatomic_fast32_t;
366
367 typedef int64_t atomic64_t;
368 typedef uint64_t uatomic64_t;
369 typedef int_fast64_t atomic_fast64_t;
370 typedef uint_fast64_t uatomic_fast64_t;
371
372 typedef intptr_t atomicptr_t;
373 typedef uintptr_t uatomicptr_t;
374 typedef intmax_t atomic_max_t;
375 typedef uintmax_t uatomic_max_t;
376
377 /*
378  * Powerpc does not have byte and halfword forms of load and reserve and
379  * store conditional. So for powerpc we stub out the 8- and 16-bit forms.
380  */
381 #define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
382   (abort (), 0)
383
384 #define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
385   (abort (), 0)
386
387 #define __arch_compare_and_exchange_bool_8_rel(mem, newval, oldval) \
388   (abort (), 0)
389
390 #define __arch_compare_and_exchange_bool_16_rel(mem, newval, oldval) \
391   (abort (), 0)
392
393 #ifdef UP
394 # define __ARCH_ACQ_INSTR       ""
395 # define __ARCH_REL_INSTR       ""
396 #else
397 # define __ARCH_ACQ_INSTR       "isync"
398 # ifndef __ARCH_REL_INSTR
399 #  define __ARCH_REL_INSTR      "sync"
400 # endif
401 #endif
402
403 #ifndef MUTEX_HINT_ACQ
404 # define MUTEX_HINT_ACQ
405 #endif
406 #ifndef MUTEX_HINT_REL
407 # define MUTEX_HINT_REL
408 #endif
409
410 #define atomic_full_barrier()   __asm__ ("sync" ::: "memory")
411 #define atomic_write_barrier()  __asm__ ("eieio" ::: "memory")
412
413 #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval)           \
414   ({                                                                          \
415       __typeof (*(mem)) __tmp;                                                \
416       __typeof (mem)  __memp = (mem);                                         \
417       __asm__ __volatile__ (                                                  \
418                         "1:     lwarx   %0,0,%1\n"                            \
419                         "       cmpw    %0,%2\n"                              \
420                         "       bne     2f\n"                                 \
421                         "       stwcx.  %3,0,%1\n"                            \
422                         "       bne-    1b\n"                                 \
423                         "2:     " __ARCH_ACQ_INSTR                            \
424                         : "=&r" (__tmp)                                       \
425                         : "b" (__memp), "r" (oldval), "r" (newval)            \
426                         : "cr0", "memory");                                   \
427       __tmp;                                                                  \
428   })
429
430 #define __arch_compare_and_exchange_val_32_rel(mem, newval, oldval)           \
431   ({                                                                          \
432       __typeof (*(mem)) __tmp;                                                \
433       __typeof (mem)  __memp = (mem);                                         \
434       __asm__ __volatile__ (__ARCH_REL_INSTR "\n"                                     \
435                         "1:     lwarx   %0,0,%1\n"                            \
436                         "       cmpw    %0,%2\n"                              \
437                         "       bne     2f\n"                                 \
438                         "       stwcx.  %3,0,%1\n"                            \
439                         "       bne-    1b\n"                                 \
440                         "2:     "                                             \
441                         : "=&r" (__tmp)                                       \
442                         : "b" (__memp), "r" (oldval), "r" (newval)            \
443                         : "cr0", "memory");                                   \
444       __tmp;                                                                  \
445   })
446
447 #define __arch_atomic_exchange_32_acq(mem, value)                             \
448   ({                                                                          \
449     __typeof (*mem) __val;                                                    \
450     __asm__ __volatile__ (                                                            \
451                       "1:       lwarx   %0,0,%2\n"                            \
452                       "         stwcx.  %3,0,%2\n"                            \
453                       "         bne-    1b\n"                                 \
454                       "   " __ARCH_ACQ_INSTR                                  \
455                       : "=&r" (__val), "=m" (*mem)                            \
456                       : "b" (mem), "r" (value), "m" (*mem)                    \
457                       : "cr0", "memory");                                     \
458     __val;                                                                    \
459   })
460
461 #define __arch_atomic_exchange_32_rel(mem, value) \
462   ({                                                                          \
463     __typeof (*mem) __val;                                                    \
464     __asm__ __volatile__ (__ARCH_REL_INSTR "\n"                               \
465                       "1:       lwarx   %0,0,%2\n"                            \
466                       "         stwcx.  %3,0,%2\n"                            \
467                       "         bne-    1b"                                   \
468                       : "=&r" (__val), "=m" (*mem)                            \
469                       : "b" (mem), "r" (value), "m" (*mem)                    \
470                       : "cr0", "memory");                                     \
471     __val;                                                                    \
472   })
473
474 #define __arch_atomic_exchange_and_add_32(mem, value) \
475   ({                                                                          \
476     __typeof (*mem) __val, __tmp;                                             \
477     __asm__ __volatile__ ("1:   lwarx   %0,0,%3\n"                            \
478                       "         add     %1,%0,%4\n"                           \
479                       "         stwcx.  %1,0,%3\n"                            \
480                       "         bne-    1b"                                   \
481                       : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)             \
482                       : "b" (mem), "r" (value), "m" (*mem)                    \
483                       : "cr0", "memory");                                     \
484     __val;                                                                    \
485   })
486
487 #define __arch_atomic_increment_val_32(mem) \
488   ({                                                                          \
489     __typeof (*(mem)) __val;                                                  \
490     __asm__ __volatile__ ("1:   lwarx   %0,0,%2\n"                            \
491                       "         addi    %0,%0,1\n"                            \
492                       "         stwcx.  %0,0,%2\n"                            \
493                       "         bne-    1b"                                   \
494                       : "=&b" (__val), "=m" (*mem)                            \
495                       : "b" (mem), "m" (*mem)                                 \
496                       : "cr0", "memory");                                     \
497     __val;                                                                    \
498   })
499
500 #define __arch_atomic_decrement_val_32(mem) \
501   ({                                                                          \
502     __typeof (*(mem)) __val;                                                  \
503     __asm__ __volatile__ ("1:   lwarx   %0,0,%2\n"                            \
504                       "         subi    %0,%0,1\n"                            \
505                       "         stwcx.  %0,0,%2\n"                            \
506                       "         bne-    1b"                                   \
507                       : "=&b" (__val), "=m" (*mem)                            \
508                       : "b" (mem), "m" (*mem)                                 \
509                       : "cr0", "memory");                                     \
510     __val;                                                                    \
511   })
512
513 #define __arch_atomic_decrement_if_positive_32(mem) \
514   ({ int __val, __tmp;                                                        \
515      __asm__ __volatile__ ("1:  lwarx   %0,0,%3\n"                            \
516                        "        cmpwi   0,%0,0\n"                             \
517                        "        addi    %1,%0,-1\n"                           \
518                        "        ble     2f\n"                                 \
519                        "        stwcx.  %1,0,%3\n"                            \
520                        "        bne-    1b\n"                                 \
521                        "2:      " __ARCH_ACQ_INSTR                            \
522                        : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)            \
523                        : "b" (mem), "m" (*mem)                                \
524                        : "cr0", "memory");                                    \
525      __val;                                                                   \
526   })
527
528 #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
529   ({                                                                          \
530     __typeof (*(mem)) __result;                                               \
531     if (sizeof (*mem) == 4)                                                   \
532       __result = __arch_compare_and_exchange_val_32_acq(mem, newval, oldval); \
533     else if (sizeof (*mem) == 8)                                              \
534       __result = __arch_compare_and_exchange_val_64_acq(mem, newval, oldval); \
535     else                                                                      \
536        abort ();                                                              \
537     __result;                                                                 \
538   })
539
540 #define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
541   ({                                                                          \
542     __typeof (*(mem)) __result;                                               \
543     if (sizeof (*mem) == 4)                                                   \
544       __result = __arch_compare_and_exchange_val_32_rel(mem, newval, oldval); \
545     else if (sizeof (*mem) == 8)                                              \
546       __result = __arch_compare_and_exchange_val_64_rel(mem, newval, oldval); \
547     else                                                                      \
548        abort ();                                                              \
549     __result;                                                                 \
550   })
551
552 #define atomic_exchange_acq(mem, value) \
553   ({                                                                          \
554     __typeof (*(mem)) __result;                                               \
555     if (sizeof (*mem) == 4)                                                   \
556       __result = __arch_atomic_exchange_32_acq (mem, value);                  \
557     else if (sizeof (*mem) == 8)                                              \
558       __result = __arch_atomic_exchange_64_acq (mem, value);                  \
559     else                                                                      \
560        abort ();                                                              \
561     __result;                                                                 \
562   })
563
564 #define atomic_exchange_rel(mem, value) \
565   ({                                                                          \
566     __typeof (*(mem)) __result;                                               \
567     if (sizeof (*mem) == 4)                                                   \
568       __result = __arch_atomic_exchange_32_rel (mem, value);                  \
569     else if (sizeof (*mem) == 8)                                              \
570       __result = __arch_atomic_exchange_64_rel (mem, value);                  \
571     else                                                                      \
572        abort ();                                                              \
573     __result;                                                                 \
574   })
575
576 #define atomic_exchange_and_add(mem, value) \
577   ({                                                                          \
578     __typeof (*(mem)) __result;                                               \
579     if (sizeof (*mem) == 4)                                                   \
580       __result = __arch_atomic_exchange_and_add_32 (mem, value);              \
581     else if (sizeof (*mem) == 8)                                              \
582       __result = __arch_atomic_exchange_and_add_64 (mem, value);              \
583     else                                                                      \
584        abort ();                                                              \
585     __result;                                                                 \
586   })
587
588 #define atomic_increment_val(mem) \
589   ({                                                                          \
590     __typeof (*(mem)) __result;                                               \
591     if (sizeof (*(mem)) == 4)                                                 \
592       __result = __arch_atomic_increment_val_32 (mem);                        \
593     else if (sizeof (*(mem)) == 8)                                            \
594       __result = __arch_atomic_increment_val_64 (mem);                        \
595     else                                                                      \
596        abort ();                                                              \
597     __result;                                                                 \
598   })
599
600 #define atomic_increment(mem) ({ atomic_increment_val (mem); (void) 0; })
601
602 #define atomic_decrement_val(mem) \
603   ({                                                                          \
604     __typeof (*(mem)) __result;                                               \
605     if (sizeof (*(mem)) == 4)                                                 \
606       __result = __arch_atomic_decrement_val_32 (mem);                        \
607     else if (sizeof (*(mem)) == 8)                                            \
608       __result = __arch_atomic_decrement_val_64 (mem);                        \
609     else                                                                      \
610        abort ();                                                              \
611     __result;                                                                 \
612   })
613
614 #define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; })
615
616
617 /* Decrement *MEM if it is > 0, and return the old value.  */
618 #define atomic_decrement_if_positive(mem) \
619   ({ __typeof (*(mem)) __result;                                              \
620     if (sizeof (*mem) == 4)                                                   \
621       __result = __arch_atomic_decrement_if_positive_32 (mem);                \
622     else if (sizeof (*mem) == 8)                                              \
623       __result = __arch_atomic_decrement_if_positive_64 (mem);                \
624     else                                                                      \
625        abort ();                                                              \
626     __result;                                                                 \
627   })