1 /* Atomic operations used inside libc. Linux/SH version.
2 Copyright (C) 2003 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
23 typedef int8_t atomic8_t;
24 typedef uint8_t uatomic8_t;
25 typedef int_fast8_t atomic_fast8_t;
26 typedef uint_fast8_t uatomic_fast8_t;
28 typedef int16_t atomic16_t;
29 typedef uint16_t uatomic16_t;
30 typedef int_fast16_t atomic_fast16_t;
31 typedef uint_fast16_t uatomic_fast16_t;
33 typedef int32_t atomic32_t;
34 typedef uint32_t uatomic32_t;
35 typedef int_fast32_t atomic_fast32_t;
36 typedef uint_fast32_t uatomic_fast32_t;
38 typedef int64_t atomic64_t;
39 typedef uint64_t uatomic64_t;
40 typedef int_fast64_t atomic_fast64_t;
41 typedef uint_fast64_t uatomic_fast64_t;
43 typedef intptr_t atomicptr_t;
44 typedef uintptr_t uatomicptr_t;
45 typedef intmax_t atomic_max_t;
46 typedef uintmax_t uatomic_max_t;
48 /* SH kernel has implemented a gUSA ("g" User Space Atomicity) support
49 for the user space atomicity. The atomicity macros use this scheme.
52 Niibe Yutaka, "gUSA: Simple and Efficient User Space Atomicity
53 Emulation with Little Kernel Modification", Linux Conference 2002,
54 Japan. http://lc.linux.or.jp/lc2002/papers/niibe0919h.pdf (in
57 B.N. Bershad, D. Redell, and J. Ellis, "Fast Mutual Exclusion for
58 Uniprocessors", Proceedings of the Fifth Architectural Support for
59 Programming Languages and Operating Systems (ASPLOS), pp. 223-233,
60 October 1992. http://www.cs.washington.edu/homes/bershad/Papers/Rcs.ps
63 r15: -(size of atomic instruction sequence) < 0
65 r1: saved stack pointer
68 #define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
69 ({ __typeof (*(mem)) __result; \
70 __asm__ __volatile__ ("\
81 : "=&r" (__result) : "r" (mem), "r" (newval), "r" (oldval) \
82 : "r0", "r1", "t", "memory"); \
85 #define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
86 ({ __typeof (*(mem)) __result; \
87 __asm__ __volatile__ ("\
98 : "=&r" (__result) : "r" (mem), "r" (newval), "r" (oldval) \
99 : "r0", "r1", "t", "memory"); \
102 #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
103 ({ __typeof (*(mem)) __result; \
104 __asm__ __volatile__ ("\
115 : "=&r" (__result) : "r" (mem), "r" (newval), "r" (oldval) \
116 : "r0", "r1", "t", "memory"); \
119 /* XXX We do not really need 64-bit compare-and-exchange. At least
120 not in the moment. Using it would mean causing portability
121 problems since not many other 32-bit architectures have support for
122 such an operation. So don't define any code for now. */
124 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
125 (abort (), (__typeof (*mem)) 0)
127 #define atomic_exchange_and_add(mem, value) \
128 ({ __typeof (*(mem)) __result, __tmp, __value = (value); \
129 if (sizeof (*(mem)) == 1) \
130 __asm__ __volatile__ ("\
139 : "=&r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \
140 : "r0", "r1", "memory"); \
141 else if (sizeof (*(mem)) == 2) \
142 __asm__ __volatile__ ("\
151 : "=&r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \
152 : "r0", "r1", "memory"); \
153 else if (sizeof (*(mem)) == 4) \
154 __asm__ __volatile__ ("\
163 : "=&r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \
164 : "r0", "r1", "memory"); \
167 __typeof (mem) memp = (mem); \
170 while (__arch_compare_and_exchange_val_64_acq \
171 (memp, __result + __value, __result) == __result); \
176 #define atomic_add(mem, value) \
177 (void) ({ __typeof (*(mem)) __tmp, __value = (value); \
178 if (sizeof (*(mem)) == 1) \
179 __asm__ __volatile__ ("\
188 : "=&r" (__tmp) : "r" (mem), "0" (__value) \
189 : "r0", "r1", "r2", "memory"); \
190 else if (sizeof (*(mem)) == 2) \
191 __asm__ __volatile__ ("\
200 : "=&r" (__tmp) : "r" (mem), "0" (__value) \
201 : "r0", "r1", "r2", "memory"); \
202 else if (sizeof (*(mem)) == 4) \
203 __asm__ __volatile__ ("\
212 : "=&r" (__tmp) : "r" (mem), "0" (__value) \
213 : "r0", "r1", "r2", "memory"); \
216 __typeof (*(mem)) oldval; \
217 __typeof (mem) memp = (mem); \
220 while (__arch_compare_and_exchange_val_64_acq \
221 (memp, oldval + __value, oldval) == oldval); \
226 #define atomic_add_negative(mem, value) \
227 ({ unsigned char __result; \
228 __typeof (*(mem)) __tmp, __value = (value); \
229 if (sizeof (*(mem)) == 1) \
230 __asm__ __volatile__ ("\
241 : "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \
242 : "r0", "r1", "r2", "t", "memory"); \
243 else if (sizeof (*(mem)) == 2) \
244 __asm__ __volatile__ ("\
255 : "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \
256 : "r0", "r1", "r2", "t", "memory"); \
257 else if (sizeof (*(mem)) == 4) \
258 __asm__ __volatile__ ("\
269 : "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \
270 : "r0", "r1", "r2", "t", "memory"); \
275 #define atomic_add_zero(mem, value) \
276 ({ unsigned char __result; \
277 __typeof (*(mem)) __tmp, __value = (value); \
278 if (sizeof (*(mem)) == 1) \
279 __asm__ __volatile__ ("\
290 : "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \
291 : "r0", "r1", "r2", "t", "memory"); \
292 else if (sizeof (*(mem)) == 2) \
293 __asm__ __volatile__ ("\
304 : "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \
305 : "r0", "r1", "r2", "t", "memory"); \
306 else if (sizeof (*(mem)) == 4) \
307 __asm__ __volatile__ ("\
318 : "=r" (__result), "=&r" (__tmp) : "r" (mem), "1" (__value) \
319 : "r0", "r1", "r2", "t", "memory"); \
324 #define atomic_increment_and_test(mem) atomic_add_zero((mem), 1)
325 #define atomic_decrement_and_test(mem) atomic_add_zero((mem), -1)
327 #define atomic_bit_set(mem, bit) \
328 (void) ({ unsigned int __mask = 1 << (bit); \
329 if (sizeof (*(mem)) == 1) \
330 __asm__ __volatile__ ("\
339 : : "r" (mem), "r" (__mask) \
340 : "r0", "r1", "r2", "memory"); \
341 else if (sizeof (*(mem)) == 2) \
342 __asm__ __volatile__ ("\
351 : : "r" (mem), "r" (__mask) \
352 : "r0", "r1", "r2", "memory"); \
353 else if (sizeof (*(mem)) == 4) \
354 __asm__ __volatile__ ("\
363 : : "r" (mem), "r" (__mask) \
364 : "r0", "r1", "r2", "memory"); \
369 #define atomic_bit_test_set(mem, bit) \
370 ({ unsigned int __mask = 1 << (bit); \
371 unsigned int __result = __mask; \
372 if (sizeof (*(mem)) == 1) \
373 __asm__ __volatile__ ("\
384 : "=&r" (__result), "=&r" (__mask) \
385 : "r" (mem), "0" (__result), "1" (__mask) \
386 : "r0", "r1", "r2", "memory"); \
387 else if (sizeof (*(mem)) == 2) \
388 __asm__ __volatile__ ("\
399 : "=&r" (__result), "=&r" (__mask) \
400 : "r" (mem), "0" (__result), "1" (__mask) \
401 : "r0", "r1", "r2", "memory"); \
402 else if (sizeof (*(mem)) == 4) \
403 __asm__ __volatile__ ("\
414 : "=&r" (__result), "=&r" (__mask) \
415 : "r" (mem), "0" (__result), "1" (__mask) \
416 : "r0", "r1", "r2", "memory"); \