5 extern "C" void cas_error_type_with_bad_size_used(void);
7 #define MACRO_CAS_ASSERT(rs,cs) \
9 cas_error_type_with_bad_size_used()
12 template< typename A, typename B >
19 Pair(A const &a, B const &b) : first(a), second(b) {}
23 //---------------------------------------------------------------------------
26 template< typename Type > inline
28 cas(Type *ptr, Type oldval, Type newval)
30 MACRO_CAS_ASSERT(sizeof(Type),sizeof(Mword));
31 return cas_unsafe(reinterpret_cast<Mword*>(ptr),
32 (Mword)oldval, (Mword)newval);
35 template< typename Type > inline
37 cas2(Type *ptr, Type *oldval, Type *newval)
39 MACRO_CAS_ASSERT(sizeof(Type),(sizeof(Mword)*2));
40 return cas2_unsafe(reinterpret_cast<Mword*>(ptr),
41 reinterpret_cast<Mword*>(oldval),
42 reinterpret_cast<Mword*>(newval));
45 template <typename T> inline
47 atomic_change(T *ptr, T mask, T bits)
54 while (!cas(ptr, old, (old & mask) | bits));
58 //---------------------------------------------------------------------------
59 IMPLEMENTATION [ia32,ux]:
63 atomic_mp_and(Mword *l, Mword value)
65 asm volatile ("lock; andl %1, %2" : "=m"(*l) : "ir"(value), "m"(*l));
70 atomic_mp_or(Mword *l, Mword value)
72 asm volatile ("lock; orl %1, %2" : "=m"(*l) : "ir"(value), "m"(*l));
78 atomic_mp_add(Mword *l, Mword value)
80 asm volatile ("lock; addl %1, %2" : "=m"(*l) : "ir"(value), "m"(*l));
85 atomic_add(Mword *l, Mword value)
87 asm volatile ("addl %1, %2" : "=m"(*l) : "ir"(value), "m"(*l));
92 atomic_and(Mword *l, Mword mask)
94 asm volatile ("andl %1, %2" : "=m"(*l) : "ir"(mask), "m"(*l));
99 atomic_or(Mword *l, Mword bits)
101 asm volatile ("orl %1, %2" : "=m"(*l) : "ir"(bits), "m"(*l));
104 // ``unsafe'' stands for no safety according to the size of the given type.
105 // There are type safe versions of the cas operations in the architecture
106 // independent part of atomic that use the unsafe versions and make a type
111 cas_unsafe(Mword *ptr, Mword oldval, Mword newval)
118 : "r" (newval), "m" (*ptr), "a" (oldval)
121 return tmp == oldval;
127 mp_cas_arch(Mword *m, Mword o, Mword n)
132 ("lock; cmpxchgl %1, %2"
134 : "r" (n), "m" (*m), "a" (o)
142 cas2_unsafe(Mword *ptr, Mword *oldval, Mword *newval)
146 ("cmpxchg8b %3 ; sete %%cl"
151 "a" (* oldval), "d" (*(oldval+1)),
152 "b" (* newval), "c" (*(newval+1))
160 mp_cas2_arch(char *m, Mword o1, Mword o2, Mword n1, Mword n2)
164 ("lock; cmpxchg8b %3 ; sete %%cl"
165 : "=c" (ret), "=a" (o1), "=d" (o2)
166 : "m" (*m), "a" (o1), "d" (o2),
177 asm volatile ("xchg %0, %1" : "=r"(tmp) : "m"(*l), "0"(1) : "memory");
181 //---------------------------------------------------------------------------
182 IMPLEMENTATION[(ppc32 && !mp) || (arm && !armv6plus)]:
184 #include "processor.h"
186 inline NEEDS["processor.h"]
188 atomic_mp_and(Mword *l, Mword value)
190 Proc::Status s = Proc::cli_save();
192 Proc::sti_restore(s);
195 inline NEEDS["processor.h"]
197 atomic_mp_or(Mword *l, Mword value)
199 Proc::Status s = Proc::cli_save();
201 Proc::sti_restore(s);
204 inline NEEDS["processor.h"]
206 atomic_mp_add(Mword *l, Mword value)
208 Proc::Status s = Proc::cli_save();
210 Proc::sti_restore(s);
213 //---------------------------------------------------------------------------
214 IMPLEMENTATION[arm && armv6plus]:
218 atomic_mp_add(Mword *l, Mword value)
224 "ldrex %[v], [%[mem]] \n"
225 "add %[v], %[v], %[addval] \n"
226 "strex %[ret], %[v], [%[mem]] \n"
229 : [v] "=&r" (tmp), [ret] "=&r" (ret), "+m" (*l)
230 : [mem] "r" (l), [addval] "r" (value)
236 atomic_mp_and(Mword *l, Mword value)
242 "ldrex %[v], [%[mem]] \n"
243 "and %[v], %[v], %[andval] \n"
244 "strex %[ret], %[v], [%[mem]] \n"
247 : [v] "=&r" (tmp), [ret] "=&r" (ret), "+m" (*l)
248 : [mem] "r" (l), [andval] "r" (value)
254 atomic_mp_or(Mword *l, Mword value)
260 "ldrex %[v], [%[mem]] \n"
261 "orr %[v], %[v], %[orval] \n"
262 "strex %[ret], %[v], [%[mem]] \n"
265 : [v] "=&r" (tmp), [ret] "=&r" (ret), "+m" (*l)
266 : [mem] "r" (l), [orval] "r" (value)
273 mp_cas_arch(Mword *m, Mword o, Mword n)
280 "ldr %[tmp], [%[m]] \n"
281 "teq %[tmp], %[o] \n"
283 "ldrex %[tmp], [%[m]] \n"
284 "teq %[tmp], %[o] \n"
285 "strexeq %[res], %[n], [%[m]] \n"
289 : [tmp] "=&r" (tmp), [res] "=&r" (res), "+m" (*m)
290 : [n] "r" (n), [m] "r" (m), [o] "r" (o)
294 // res == 1 is failed
301 mp_cas2_arch(char *m, Mword o1, Mword o2, Mword n1, Mword n2)
303 register Mword _n1 asm("r6") = n1;
304 register Mword _n2 asm("r7") = n2;
305 register Mword tmp1 asm("r8");
306 register Mword tmp2 asm("r9");
312 "ldrd %[tmp1], [%[m]] \n"
313 "teq %[tmp1], %[o1] \n"
314 "teqeq %[tmp2], %[o2] \n"
316 "ldrexd %[tmp1], [%[m]] \n"
318 "teq %[tmp1], %[o1] \n"
319 "teqeq %[tmp2], %[o2] \n"
320 "strexdeq %[res], %[n1], [%[m]] \n"
324 : [tmp1] "=r" (tmp1), [tmp2] "=r" (tmp2),
325 [res] "=&r" (res), "+m" (*m), "+m" (*(m + 1))
326 : "0" (tmp1), "1" (tmp2),
327 [n1] "r" (_n1), "r" (_n2),
329 [o1] "r" (o1), [o2] "r" (o2)
335 //---------------------------------------------------------------------------
338 template< typename T > inline
340 mp_cas(T *m, T o, T n)
342 MACRO_CAS_ASSERT(sizeof(T),sizeof(Mword));
343 return mp_cas_arch(reinterpret_cast<Mword*>(m),
348 template< typename T, typename T2 > inline
350 mp_cas2(Pair<T,T2> *m, T o1, T2 o2, T n1, T2 n2)
352 MACRO_CAS_ASSERT(sizeof(T),sizeof(Mword));
353 MACRO_CAS_ASSERT(sizeof(T2),sizeof(Mword));
354 return mp_cas2_arch(reinterpret_cast<char *>(m),
362 //---------------------------------------------------------------------------
363 IMPLEMENTATION [!mp]:
365 template< typename T > inline
367 mp_cas(T *m, T o, T n)
368 { return cas(m,o,n); }