1 IMPLEMENTATION [amd64]:
5 atomic_mp_and(Mword *l, Mword value)
7 asm volatile ("lock; andq %1, %2" : "=m"(*l) : "ir"(value), "m"(*l));
12 atomic_mp_or(Mword *l, Mword value)
14 asm volatile ("lock; orq %1, %2" : "=m"(*l) : "ir"(value), "m"(*l));
19 atomic_mp_add (Mword *l, Mword value)
21 asm volatile ("lock; addq %1, %2" : "=m"(*l) : "ir"(value), "m"(*l));
26 atomic_add (Mword *l, Mword value)
28 asm volatile ("addq %1, %2" : "=m"(*l) : "ir"(value), "m"(*l));
33 atomic_and (Mword *l, Mword mask)
35 asm volatile ("andq %1, %2" : "=m"(*l) : "ir"(mask), "m"(*l));
40 atomic_or (Mword *l, Mword bits)
42 asm volatile ("orq %1, %2" : "=m"(*l) : "ir"(bits), "m"(*l));
45 // ``unsafe'' stands for no safety according to the size of the given type.
46 // There are type safe versions of the cas operations in the architecture
47 // independent part of atomic that use the unsafe versions and make a type
52 cas_unsafe (Mword *ptr, Mword oldval, Mword newval)
59 : "r" (newval), "m" (*ptr), "a" (oldval)
67 mp_cas_arch (Mword *ptr, Mword oldval, Mword newval)
72 ("lock; cmpxchgq %1, %2"
74 : "r" (newval), "m" (*ptr), "a" (oldval)
82 cas2_unsafe (Mword *ptr, Mword *oldval, Mword *newval)
86 ("cmpxchg16b %3 ; sete %%cl"
91 "a" (* oldval), "d" (*(oldval+1)),
92 "b" (* newval), "c" (*(newval+1))
100 mp_cas2_arch (char *p, Mword o1, Mword o2, Mword n1, Mword n2)
104 ("lock; cmpxchg16b %3 ; sete %%cl"
105 : "=c" (ret), "=a" (o1), "=d" (o2)
106 : "m" (*p), "a" (o1), "d" (o2),
118 asm volatile ("xchg %0, %1" : "=r"(tmp) : "m"(*l), "0"(1) : "memory");