1 /* Definition for thread-local data handling. nptl/x86_64 version.
2 Copyright (C) 2002-2007, 2008, 2009 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
24 //l4/# include <asm/prctl.h> /* For ARCH_SET_FS. */
29 //l4/# include <sysdep.h>
30 # include <bits/kernel-features.h>
31 # include <bits/wordsize.h>
32 # include <xmmintrin.h>
35 /* Type for the dtv. */
49 void *tcb; /* Pointer to the TCB. Not necessarily the
50 thread descriptor used by libpthread. */
52 void *self; /* Pointer to the thread descriptor. */
56 uintptr_t stack_guard;
57 uintptr_t pointer_guard;
58 unsigned long int vgetcpu_cache[2];
59 # ifndef __ASSUME_PRIVATE_FUTEX
65 int rtld_must_xmm_save;
67 /* Reservation of some values for the TM ABI. */
68 void *__private_tm[5];
71 /* Have space for the post-AVX register size. */
72 __m128 rtld_savespace_sse[8][4];
78 #else /* __ASSEMBLER__ */
79 # include <tcb-offsets.h>
83 /* We require TLS support in the tools. */
84 #define HAVE_TLS_SUPPORT 1
85 #define HAVE___THREAD 1
86 #define HAVE_TLS_MODEL_ATTRIBUTE 1
88 /* Signal that TLS support is available. */
91 /* Alignment requirement for the stack. */
92 #define STACK_ALIGN 16
96 /* Get system call information. */
97 //l4/# include <sysdep.h>
100 /* Get the thread descriptor definition. */
102 # include <l4/sys/utcb.h>
103 # include <l4/sys/segment.h>
107 # define LOCK_PREFIX /* nothing */
109 # define LOCK_PREFIX "lock;"
113 /* This is the size of the initial TCB. Can't be just sizeof (tcbhead_t),
114 because NPTL getpid, __libc_alloca_cutoff etc. need (almost) the whole
115 struct pthread even when not linked with -lpthread. */
116 # define TLS_INIT_TCB_SIZE sizeof (struct pthread)
118 /* Alignment requirements for the initial TCB. */
119 # define TLS_INIT_TCB_ALIGN __alignof__ (struct pthread)
121 /* This is the size of the TCB. */
122 # define TLS_TCB_SIZE sizeof (struct pthread)
124 /* Alignment requirements for the TCB. */
125 //# define TLS_TCB_ALIGN __alignof__ (struct pthread)
126 // Normally the above would be correct But we have to store post-AVX
127 // vector registers in the TCB and we want the storage to be aligned.
128 // unfortunately there isn't yet a type for these values and hence no
129 // 32-byte alignment requirement. Make this explicit, for now.
130 # define TLS_TCB_ALIGN 32
132 /* The TCB can have any size and the memory following the address the
133 thread pointer points to is unspecified. Allocate the TCB there. */
134 # define TLS_TCB_AT_TP 1
137 /* Install the dtv pointer. The pointer passed is to the element with
138 index -1 which contain the length. */
139 # define INSTALL_DTV(descr, dtvp) \
140 ((tcbhead_t *) (descr))->dtv = (dtvp) + 1
142 /* Install new dtv for current thread. */
143 # define INSTALL_NEW_DTV(dtvp) \
144 ({ struct pthread *__pd; \
145 THREAD_SETMEM (__pd, header.dtv, (dtvp)); })
147 /* Return dtv of given thread descriptor. */
148 # define GET_DTV(descr) \
149 (((tcbhead_t *) (descr))->dtv)
152 /* Macros to load from and store into segment registers. */
153 # define TLS_GET_FS() \
154 ({ int __seg; __asm__ ("movl %%fs, %0" : "=q" (__seg)); __seg; })
155 # define TLS_SET_FS(val) \
156 __asm__ ("movl %0, %%fs" :: "q" (val))
159 static inline char const *TLS_INIT_TP(void *thrdescr, int secondcall)
162 tcbhead_t *_head = (tcbhead_t *)thrdescr;
163 _head->tcb = thrdescr;
164 _head->self = thrdescr;
165 if (fiasco_amd64_set_fs(L4_INVALID_CAP, (l4_umword_t)thrdescr, l4_utcb()) < 0)
171 /* Code to initially initialize the thread pointer. This might need
172 special attention since 'errno' is not yet available and if the
173 operation can cause a failure 'errno' must not be touched.
175 We have to make the syscall for both uses of the macro since the
176 address might be (and probably is) different. */
177 # define TLS_INIT_TP(thrdescr, secondcall) \
178 ({ void *_thrdescr = (thrdescr); \
179 tcbhead_t *_head = _thrdescr; \
182 _head->tcb = _thrdescr; \
183 /* For now the thread descriptor is at the same address. */ \
184 _head->self = _thrdescr; \
186 /* It is a simple syscall to set the %fs value for the thread. */ \
187 __asm__ __volatile__ ("syscall" \
189 : "0" ((unsigned long int) __NR_arch_prctl), \
190 "D" ((unsigned long int) ARCH_SET_FS), \
192 : "memory", "cc", "r11", "cx"); \
194 _result ? "cannot set %fs base address for thread-local storage" : 0; \
199 /* Return the address of the dtv for the current thread. */
200 # define THREAD_DTV() \
201 ({ struct pthread *__pd; \
202 THREAD_GETMEM (__pd, header.dtv); })
205 /* Return the thread descriptor for the current thread.
207 The contained asm must *not* be marked __volatile__ since otherwise
209 pthread_descr self = thread_self();
210 do not get optimized away. */
211 # define THREAD_SELF \
212 ({ struct pthread *__self; \
213 __asm__ ("movq %%fs:%c1,%q0" : "=r" (__self) \
214 : "i" (offsetof (struct pthread, header.self))); \
217 /* Magic for libthread_db to know how to do THREAD_SELF. */
218 # define DB_THREAD_SELF_INCLUDE <sys/reg.h> /* For the FS constant. */
219 # define DB_THREAD_SELF CONST_THREAD_AREA (64, FS)
221 /* Read member of the thread descriptor directly. */
222 # define THREAD_GETMEM(descr, member) \
223 ({ __typeof (descr->member) __value; \
224 if (sizeof (__value) == 1) \
225 __asm__ __volatile__ ("movb %%fs:%P2,%b0" \
227 : "0" (0), "i" (offsetof (struct pthread, member))); \
228 else if (sizeof (__value) == 4) \
229 __asm__ __volatile__ ("movl %%fs:%P1,%0" \
231 : "i" (offsetof (struct pthread, member))); \
234 if (sizeof (__value) != 8) \
235 /* There should not be any value with a size other than 1, \
239 __asm__ __volatile__ ("movq %%fs:%P1,%q0" \
241 : "i" (offsetof (struct pthread, member))); \
246 /* Same as THREAD_GETMEM, but the member offset can be non-constant. */
247 # define THREAD_GETMEM_NC(descr, member, idx) \
248 ({ __typeof (descr->member[0]) __value; \
249 if (sizeof (__value) == 1) \
250 __asm__ __volatile__ ("movb %%fs:%P2(%q3),%b0" \
252 : "0" (0), "i" (offsetof (struct pthread, member[0])), \
254 else if (sizeof (__value) == 4) \
255 __asm__ __volatile__ ("movl %%fs:%P1(,%q2,4),%0" \
257 : "i" (offsetof (struct pthread, member[0])), "r" (idx));\
260 if (sizeof (__value) != 8) \
261 /* There should not be any value with a size other than 1, \
265 __asm__ __volatile__ ("movq %%fs:%P1(,%q2,8),%q0" \
267 : "i" (offsetof (struct pthread, member[0])), \
273 /* Loading addresses of objects on x86-64 needs to be treated special
274 when generating PIC code. */
276 # define IMM_MODE "nr"
278 # define IMM_MODE "ir"
282 /* Same as THREAD_SETMEM, but the member offset can be non-constant. */
283 # define THREAD_SETMEM(descr, member, value) \
284 ({ if (sizeof (descr->member) == 1) \
285 __asm__ __volatile__ ("movb %b0,%%fs:%P1" : \
287 "i" (offsetof (struct pthread, member))); \
288 else if (sizeof (descr->member) == 4) \
289 __asm__ __volatile__ ("movl %0,%%fs:%P1" : \
290 : IMM_MODE (value), \
291 "i" (offsetof (struct pthread, member))); \
294 if (sizeof (descr->member) != 8) \
295 /* There should not be any value with a size other than 1, \
299 __asm__ __volatile__ ("movq %q0,%%fs:%P1" : \
300 : IMM_MODE ((unsigned long int) value), \
301 "i" (offsetof (struct pthread, member))); \
305 /* Set member of the thread descriptor directly. */
306 # define THREAD_SETMEM_NC(descr, member, idx, value) \
307 ({ if (sizeof (descr->member[0]) == 1) \
308 __asm__ __volatile__ ("movb %b0,%%fs:%P1(%q2)" : \
310 "i" (offsetof (struct pthread, member[0])), \
312 else if (sizeof (descr->member[0]) == 4) \
313 __asm__ __volatile__ ("movl %0,%%fs:%P1(,%q2,4)" : \
314 : IMM_MODE (value), \
315 "i" (offsetof (struct pthread, member[0])), \
319 if (sizeof (descr->member[0]) != 8) \
320 /* There should not be any value with a size other than 1, \
324 __asm__ __volatile__ ("movq %q0,%%fs:%P1(,%q2,8)" : \
325 : IMM_MODE ((unsigned long int) value), \
326 "i" (offsetof (struct pthread, member[0])), \
331 /* Atomic compare and exchange on TLS, returning old value. */
332 # define THREAD_ATOMIC_CMPXCHG_VAL(descr, member, newval, oldval) \
333 ({ __typeof (descr->member) __ret; \
334 __typeof (oldval) __old = (oldval); \
335 if (sizeof (descr->member) == 4) \
336 __asm__ __volatile__ (LOCK_PREFIX "cmpxchgl %2, %%fs:%P3" \
338 : "0" (__old), "r" (newval), \
339 "i" (offsetof (struct pthread, member))); \
341 /* Not necessary for other sizes in the moment. */ \
346 /* Atomic logical and. */
347 # define THREAD_ATOMIC_AND(descr, member, val) \
348 (void) ({ if (sizeof ((descr)->member) == 4) \
349 __asm__ __volatile__ (LOCK_PREFIX "andl %1, %%fs:%P0" \
350 :: "i" (offsetof (struct pthread, member)), \
353 /* Not necessary for other sizes in the moment. */ \
357 /* Atomic set bit. */
358 # define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
359 (void) ({ if (sizeof ((descr)->member) == 4) \
360 __asm__ __volatile__ (LOCK_PREFIX "orl %1, %%fs:%P0" \
361 :: "i" (offsetof (struct pthread, member)), \
362 "ir" (1 << (bit))); \
364 /* Not necessary for other sizes in the moment. */ \
368 # define CALL_THREAD_FCT(descr) \
370 __asm__ __volatile__ ("movq %%fs:%P2, %%rdi\n\t" \
373 : "i" (offsetof (struct pthread, start_routine)), \
374 "i" (offsetof (struct pthread, arg)) \
375 : "di", "si", "cx", "dx", "r8", "r9", "r10", "r11", \
380 /* Set the stack guard field in TCB head. */
381 # define THREAD_SET_STACK_GUARD(value) \
382 THREAD_SETMEM (THREAD_SELF, header.stack_guard, value)
383 # define THREAD_COPY_STACK_GUARD(descr) \
384 ((descr)->header.stack_guard \
385 = THREAD_GETMEM (THREAD_SELF, header.stack_guard))
388 /* Set the pointer guard field in the TCB head. */
389 # define THREAD_SET_POINTER_GUARD(value) \
390 THREAD_SETMEM (THREAD_SELF, header.pointer_guard, value)
391 # define THREAD_COPY_POINTER_GUARD(descr) \
392 ((descr)->header.pointer_guard \
393 = THREAD_GETMEM (THREAD_SELF, header.pointer_guard))
396 /* Get and set the global scope generation counter in the TCB head. */
397 # define THREAD_GSCOPE_FLAG_UNUSED 0
398 # define THREAD_GSCOPE_FLAG_USED 1
399 # define THREAD_GSCOPE_FLAG_WAIT 2
400 # define THREAD_GSCOPE_RESET_FLAG() \
403 __asm__ __volatile__ ("xchgl %0, %%fs:%P1" \
405 : "i" (offsetof (struct pthread, header.gscope_flag)), \
406 "0" (THREAD_GSCOPE_FLAG_UNUSED)); \
407 if (__res == THREAD_GSCOPE_FLAG_WAIT) \
408 lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
411 # define THREAD_GSCOPE_SET_FLAG() \
412 THREAD_SETMEM (THREAD_SELF, header.gscope_flag, THREAD_GSCOPE_FLAG_USED)
413 # define THREAD_GSCOPE_WAIT() \
414 GL(dl_wait_lookup_done) ()
418 /* Defined in dl-trampoline.S. */
419 extern void _dl_x86_64_save_sse (void);
420 extern void _dl_x86_64_restore_sse (void);
422 # define RTLD_CHECK_FOREIGN_CALL \
423 (THREAD_GETMEM (THREAD_SELF, header.rtld_must_xmm_save) != 0)
425 /* NB: Don't use the xchg operation because that would imply a lock
426 prefix which is expensive and unnecessary. The cache line is also
427 not contested at all. */
428 # define RTLD_ENABLE_FOREIGN_CALL \
429 int old_rtld_must_xmm_save = THREAD_GETMEM (THREAD_SELF, \
430 header.rtld_must_xmm_save); \
431 THREAD_SETMEM (THREAD_SELF, header.rtld_must_xmm_save, 1)
433 # define RTLD_PREPARE_FOREIGN_CALL \
434 do if (THREAD_GETMEM (THREAD_SELF, header.rtld_must_xmm_save)) \
436 _dl_x86_64_save_sse (); \
437 THREAD_SETMEM (THREAD_SELF, header.rtld_must_xmm_save, 0); \
441 # define RTLD_FINALIZE_FOREIGN_CALL \
443 if (THREAD_GETMEM (THREAD_SELF, header.rtld_must_xmm_save) == 0) \
444 _dl_x86_64_restore_sse (); \
445 THREAD_SETMEM (THREAD_SELF, header.rtld_must_xmm_save, \
446 old_rtld_must_xmm_save); \
451 #endif /* __ASSEMBLER__ */