]> rtime.felk.cvut.cz Git - l4.git/blob - l4/pkg/dde/linux26/contrib/arch/x86/include/asm/cmpxchg_32.h
Inital import
[l4.git] / l4 / pkg / dde / linux26 / contrib / arch / x86 / include / asm / cmpxchg_32.h
1 #ifndef _ASM_X86_CMPXCHG_32_H
2 #define _ASM_X86_CMPXCHG_32_H
3
4 #include <linux/bitops.h> /* for LOCK_PREFIX */
5
6 /*
7  * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
8  *       you need to test for the feature in boot_cpu_data.
9  */
10
11 #define xchg(ptr, v)                                                    \
12         ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
13
14 struct __xchg_dummy {
15         unsigned long a[100];
16 };
17 #define __xg(x) ((struct __xchg_dummy *)(x))
18
19 /*
20  * The semantics of XCHGCMP8B are a bit strange, this is why
21  * there is a loop and the loading of %%eax and %%edx has to
22  * be inside. This inlines well in most cases, the cached
23  * cost is around ~38 cycles. (in the future we might want
24  * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
25  * might have an implicit FPU-save as a cost, so it's not
26  * clear which path to go.)
27  *
28  * cmpxchg8b must be used with the lock prefix here to allow
29  * the instruction to be executed atomically, see page 3-102
30  * of the instruction set reference 24319102.pdf. We need
31  * the reader side to see the coherent 64bit value.
32  */
33 static inline void __set_64bit(unsigned long long *ptr,
34                                unsigned int low, unsigned int high)
35 {
36         asm volatile("\n1:\t"
37                      "movl (%0), %%eax\n\t"
38                      "movl 4(%0), %%edx\n\t"
39                      LOCK_PREFIX "cmpxchg8b (%0)\n\t"
40                      "jnz 1b"
41                      : /* no outputs */
42                      : "D"(ptr),
43                        "b"(low),
44                        "c"(high)
45                      : "ax", "dx", "memory");
46 }
47
48 static inline void __set_64bit_constant(unsigned long long *ptr,
49                                         unsigned long long value)
50 {
51         __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
52 }
53
54 #define ll_low(x)       *(((unsigned int *)&(x)) + 0)
55 #define ll_high(x)      *(((unsigned int *)&(x)) + 1)
56
57 static inline void __set_64bit_var(unsigned long long *ptr,
58                                    unsigned long long value)
59 {
60         __set_64bit(ptr, ll_low(value), ll_high(value));
61 }
62
63 #define set_64bit(ptr, value)                   \
64         (__builtin_constant_p((value))          \
65          ? __set_64bit_constant((ptr), (value)) \
66          : __set_64bit_var((ptr), (value)))
67
68 #define _set_64bit(ptr, value)                                          \
69         (__builtin_constant_p(value)                                    \
70          ? __set_64bit(ptr, (unsigned int)(value),                      \
71                        (unsigned int)((value) >> 32))                   \
72          : __set_64bit(ptr, ll_low((value)), ll_high((value))))
73
74 /*
75  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
76  * Note 2: xchg has side effect, so that attribute volatile is necessary,
77  *        but generally the primitive is invalid, *ptr is output argument. --ANK
78  */
79 static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
80                                    int size)
81 {
82         switch (size) {
83         case 1:
84                 asm volatile("xchgb %b0,%1"
85                              : "=q" (x)
86                              : "m" (*__xg(ptr)), "0" (x)
87                              : "memory");
88                 break;
89         case 2:
90                 asm volatile("xchgw %w0,%1"
91                              : "=r" (x)
92                              : "m" (*__xg(ptr)), "0" (x)
93                              : "memory");
94                 break;
95         case 4:
96                 asm volatile("xchgl %0,%1"
97                              : "=r" (x)
98                              : "m" (*__xg(ptr)), "0" (x)
99                              : "memory");
100                 break;
101         }
102         return x;
103 }
104
105 /*
106  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
107  * store NEW in MEM.  Return the initial value in MEM.  Success is
108  * indicated by comparing RETURN with OLD.
109  */
110
111 #ifdef CONFIG_X86_CMPXCHG
112 #define __HAVE_ARCH_CMPXCHG 1
113 #define cmpxchg(ptr, o, n)                                              \
114         ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),       \
115                                        (unsigned long)(n),              \
116                                        sizeof(*(ptr))))
117 #define sync_cmpxchg(ptr, o, n)                                         \
118         ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o),  \
119                                             (unsigned long)(n),         \
120                                             sizeof(*(ptr))))
121 #define cmpxchg_local(ptr, o, n)                                        \
122         ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
123                                              (unsigned long)(n),        \
124                                              sizeof(*(ptr))))
125 #endif
126
127 #ifdef CONFIG_X86_CMPXCHG64
128 #define cmpxchg64(ptr, o, n)                                            \
129         ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
130                                          (unsigned long long)(n)))
131 #define cmpxchg64_local(ptr, o, n)                                      \
132         ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
133                                                (unsigned long long)(n)))
134 #endif
135
136 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
137                                       unsigned long new, int size)
138 {
139         unsigned long prev;
140         switch (size) {
141         case 1:
142                 asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
143                              : "=a"(prev)
144                              : "q"(new), "m"(*__xg(ptr)), "0"(old)
145                              : "memory");
146                 return prev;
147         case 2:
148                 asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
149                              : "=a"(prev)
150                              : "r"(new), "m"(*__xg(ptr)), "0"(old)
151                              : "memory");
152                 return prev;
153         case 4:
154                 asm volatile(LOCK_PREFIX "cmpxchgl %1,%2"
155                              : "=a"(prev)
156                              : "r"(new), "m"(*__xg(ptr)), "0"(old)
157                              : "memory");
158                 return prev;
159         }
160         return old;
161 }
162
163 /*
164  * Always use locked operations when touching memory shared with a
165  * hypervisor, since the system may be SMP even if the guest kernel
166  * isn't.
167  */
168 static inline unsigned long __sync_cmpxchg(volatile void *ptr,
169                                            unsigned long old,
170                                            unsigned long new, int size)
171 {
172         unsigned long prev;
173         switch (size) {
174         case 1:
175                 asm volatile("lock; cmpxchgb %b1,%2"
176                              : "=a"(prev)
177                              : "q"(new), "m"(*__xg(ptr)), "0"(old)
178                              : "memory");
179                 return prev;
180         case 2:
181                 asm volatile("lock; cmpxchgw %w1,%2"
182                              : "=a"(prev)
183                              : "r"(new), "m"(*__xg(ptr)), "0"(old)
184                              : "memory");
185                 return prev;
186         case 4:
187                 asm volatile("lock; cmpxchgl %1,%2"
188                              : "=a"(prev)
189                              : "r"(new), "m"(*__xg(ptr)), "0"(old)
190                              : "memory");
191                 return prev;
192         }
193         return old;
194 }
195
196 static inline unsigned long __cmpxchg_local(volatile void *ptr,
197                                             unsigned long old,
198                                             unsigned long new, int size)
199 {
200         unsigned long prev;
201         switch (size) {
202         case 1:
203                 asm volatile("cmpxchgb %b1,%2"
204                              : "=a"(prev)
205                              : "q"(new), "m"(*__xg(ptr)), "0"(old)
206                              : "memory");
207                 return prev;
208         case 2:
209                 asm volatile("cmpxchgw %w1,%2"
210                              : "=a"(prev)
211                              : "r"(new), "m"(*__xg(ptr)), "0"(old)
212                              : "memory");
213                 return prev;
214         case 4:
215                 asm volatile("cmpxchgl %1,%2"
216                              : "=a"(prev)
217                              : "r"(new), "m"(*__xg(ptr)), "0"(old)
218                              : "memory");
219                 return prev;
220         }
221         return old;
222 }
223
224 static inline unsigned long long __cmpxchg64(volatile void *ptr,
225                                              unsigned long long old,
226                                              unsigned long long new)
227 {
228         unsigned long long prev;
229         asm volatile(LOCK_PREFIX "cmpxchg8b %3"
230                      : "=A"(prev)
231                      : "b"((unsigned long)new),
232                        "c"((unsigned long)(new >> 32)),
233                        "m"(*__xg(ptr)),
234                        "0"(old)
235                      : "memory");
236         return prev;
237 }
238
239 static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
240                                                    unsigned long long old,
241                                                    unsigned long long new)
242 {
243         unsigned long long prev;
244         asm volatile("cmpxchg8b %3"
245                      : "=A"(prev)
246                      : "b"((unsigned long)new),
247                        "c"((unsigned long)(new >> 32)),
248                        "m"(*__xg(ptr)),
249                        "0"(old)
250                      : "memory");
251         return prev;
252 }
253
254 #ifndef CONFIG_X86_CMPXCHG
255 /*
256  * Building a kernel capable running on 80386. It may be necessary to
257  * simulate the cmpxchg on the 80386 CPU. For that purpose we define
258  * a function for each of the sizes we support.
259  */
260
261 extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
262 extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
263 extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
264
265 static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
266                                         unsigned long new, int size)
267 {
268         switch (size) {
269         case 1:
270                 return cmpxchg_386_u8(ptr, old, new);
271         case 2:
272                 return cmpxchg_386_u16(ptr, old, new);
273         case 4:
274                 return cmpxchg_386_u32(ptr, old, new);
275         }
276         return old;
277 }
278
279 #define cmpxchg(ptr, o, n)                                              \
280 ({                                                                      \
281         __typeof__(*(ptr)) __ret;                                       \
282         if (likely(boot_cpu_data.x86 > 3))                              \
283                 __ret = (__typeof__(*(ptr)))__cmpxchg((ptr),            \
284                                 (unsigned long)(o), (unsigned long)(n), \
285                                 sizeof(*(ptr)));                        \
286         else                                                            \
287                 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr),          \
288                                 (unsigned long)(o), (unsigned long)(n), \
289                                 sizeof(*(ptr)));                        \
290         __ret;                                                          \
291 })
292 #define cmpxchg_local(ptr, o, n)                                        \
293 ({                                                                      \
294         __typeof__(*(ptr)) __ret;                                       \
295         if (likely(boot_cpu_data.x86 > 3))                              \
296                 __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr),      \
297                                 (unsigned long)(o), (unsigned long)(n), \
298                                 sizeof(*(ptr)));                        \
299         else                                                            \
300                 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr),          \
301                                 (unsigned long)(o), (unsigned long)(n), \
302                                 sizeof(*(ptr)));                        \
303         __ret;                                                          \
304 })
305 #endif
306
307 #ifndef CONFIG_X86_CMPXCHG64
308 /*
309  * Building a kernel capable running on 80386 and 80486. It may be necessary
310  * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
311  */
312
313 extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
314
315 #define cmpxchg64(ptr, o, n)                                            \
316 ({                                                                      \
317         __typeof__(*(ptr)) __ret;                                       \
318         if (likely(boot_cpu_data.x86 > 4))                              \
319                 __ret = (__typeof__(*(ptr)))__cmpxchg64((ptr),          \
320                                 (unsigned long long)(o),                \
321                                 (unsigned long long)(n));               \
322         else                                                            \
323                 __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr),      \
324                                 (unsigned long long)(o),                \
325                                 (unsigned long long)(n));               \
326         __ret;                                                          \
327 })
328 #define cmpxchg64_local(ptr, o, n)                                      \
329 ({                                                                      \
330         __typeof__(*(ptr)) __ret;                                       \
331         if (likely(boot_cpu_data.x86 > 4))                              \
332                 __ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr),    \
333                                 (unsigned long long)(o),                \
334                                 (unsigned long long)(n));               \
335         else                                                            \
336                 __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr),      \
337                                 (unsigned long long)(o),                \
338                                 (unsigned long long)(n));               \
339         __ret;                                                          \
340 })
341
342 #endif
343
344 #endif /* _ASM_X86_CMPXCHG_32_H */