2 * Copyrith (C) 2013 Imagination Technologies Ltd.
4 * Licensed under LGPL v2.1 or later, see the file COPYING.LIB in this tarball.
11 typedef int8_t atomic8_t;
12 typedef uint8_t uatomic8_t;
13 typedef int_fast8_t atomic_fast8_t;
14 typedef uint_fast8_t uatomic_fast8_t;
16 typedef int32_t atomic32_t;
17 typedef uint32_t uatomic32_t;
18 typedef int_fast32_t atomic_fast32_t;
19 typedef uint_fast32_t uatomic_fast32_t;
21 typedef intptr_t atomicptr_t;
22 typedef uintptr_t uatomicptr_t;
23 typedef intmax_t atomic_max_t;
24 typedef uintmax_t uatomic_max_t;
26 void __metag_link_error (void);
28 #define atomic_full_barrier() \
29 __asm__ __volatile__("": : :"memory")
31 /* Atomic compare and exchange. This sequence relies on the kernel to
32 provide a compare and exchange operation which is atomic. */
34 #define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
35 ({ __metag_link_error (); oldval; })
37 #define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
38 ({ __metag_link_error (); oldval; })
40 /* This code uses the kernel helper to do cmpxchg. It relies on the fact
41 the helper code only clobbers D0Re0. */
42 #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
43 ({ register __typeof (oldval) a_current __asm__ ("D1Ar1"); \
44 register __typeof (oldval) a_newval __asm__ ("D0Ar2") = (newval); \
45 register __typeof (mem) a_ptr __asm__ ("D1Ar3") = (mem); \
46 register __typeof (oldval) a_oldval __asm__ ("D0Ar4") = (oldval); \
47 __asm__ __volatile__ \
49 "GETD %[cur], [%[ptr]]\n\t" \
50 "CMP %[cur], %[old]\n\t" \
52 "MOVT D1RtP, #0x6fff\n\t" \
53 "ADD D1RtP, D1RtP, #0xf040\n\t" \
54 "SWAP D1RtP, PC\n\t" \
55 "MOV %[cur], %[old]\n\t" \
59 : [cur] "=&r" (a_current) \
60 : [new] "r" (a_newval), [ptr] "r" (a_ptr), \
61 [old] "r" (a_oldval) \
62 : "D0Re0", "D1RtP", "cc", "memory"); \
65 #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
66 ({ __metag_link_error (); oldval; })