1 /* vi: set sw=4 ts=4: */
3 * Thread-local storage handling in the ELF dynamic linker.
5 * Copyright (C) 2005 by Steven J. Hill <sjhill@realitydiluted.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. The name of the above contributors may not be
13 * used to endorse or promote products derived from this software
14 * without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 void *(*_dl_calloc_function) (size_t __nmemb, size_t __size) = NULL;
34 void *(*_dl_realloc_function) (void *__ptr, size_t __size) = NULL;
35 void *(*_dl_memalign_function) (size_t __boundary, size_t __size) = NULL;
37 void (*_dl_free_function) (void *__ptr);
38 void *_dl_memalign (size_t __boundary, size_t __size);
39 struct link_map *_dl_update_slotinfo (unsigned long int req_modid);
41 /* Round up N to the nearest multiple of P, where P is a power of 2
42 --- without using libgcc division routines. */
43 #define roundup_pow2(n, p) (((n) + (p) - 1) & ~((p) - 1))
46 _dl_calloc (size_t __nmemb, size_t __size)
49 size_t size = (__size * __nmemb);
51 if (_dl_calloc_function)
52 return (*_dl_calloc_function) (__nmemb, __size);
54 if ((result = _dl_malloc(size)) != NULL) {
55 _dl_memset(result, 0, size);
62 _dl_realloc (void * __ptr, size_t __size)
64 if (_dl_realloc_function)
65 return (*_dl_realloc_function) (__ptr, __size);
67 _dl_debug_early("NOT IMPLEMENTED PROPERLY!!!\n");
71 /* The __tls_get_addr function has two basic forms which differ in the
72 arguments. The IA-64 form takes two parameters, the module ID and
73 offset. The form used, among others, on IA-32 takes a reference to
74 a special structure which contain the same information. The second
75 form seems to be more often used (in the moment) so we default to
76 it. Users of the IA-64 form have to provide adequate definitions
77 of the following macros. */
79 # define GET_ADDR_ARGS tls_index *ti
81 #ifndef GET_ADDR_MODULE
82 # define GET_ADDR_MODULE ti->ti_module
84 #ifndef GET_ADDR_OFFSET
85 # define GET_ADDR_OFFSET ti->ti_offset
89 * Amount of excess space to allocate in the static TLS area
90 * to allow dynamic loading of modules defining IE-model TLS data.
92 #define TLS_STATIC_SURPLUS 64 + DL_NNS * 100
94 /* Value used for dtv entries for which the allocation is delayed. */
95 #define TLS_DTV_UNALLOCATED ((void *) -1l)
98 * We are trying to perform a static TLS relocation in MAP, but it was
99 * dynamically loaded. This can only work if there is enough surplus in
100 * the static TLS area already allocated for each running thread. If this
101 * object's TLS segment is too big to fit, we fail. If it fits,
102 * we set MAP->l_tls_offset and return.
103 * This function intentionally does not return any value but signals error
104 * directly, as static TLS should be rare and code handling it should
105 * not be inlined as much as possible.
108 internal_function __attribute_noinline__
109 _dl_allocate_static_tls (struct link_map *map)
111 /* If the alignment requirements are too high fail. */
112 if (map->l_tls_align > _dl_tls_static_align)
115 _dl_dprintf(2, "cannot allocate memory in static TLS block");
119 # ifdef TLS_TCB_AT_TP
124 freebytes = _dl_tls_static_size - _dl_tls_static_used - TLS_TCB_SIZE;
126 blsize = map->l_tls_blocksize + map->l_tls_firstbyte_offset;
127 if (freebytes < blsize)
130 n = (freebytes - blsize) & ~(map->l_tls_align - 1);
132 size_t offset = _dl_tls_static_used + (freebytes - n
133 - map->l_tls_firstbyte_offset);
135 map->l_tls_offset = _dl_tls_static_used = offset;
136 # elif defined(TLS_DTV_AT_TP)
140 size_t offset = roundup_pow2 (_dl_tls_static_used, map->l_tls_align);
141 used = offset + map->l_tls_blocksize;
144 /* dl_tls_static_used includes the TCB at the beginning. */
145 if (check > _dl_tls_static_size)
148 map->l_tls_offset = offset;
149 _dl_tls_static_used = used;
151 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
155 * If the object is not yet relocated we cannot initialize the
156 * static TLS region. Delay it.
158 if (((struct elf_resolve *) map)->init_flag & RELOCS_DONE)
162 * Update the slot information data for at least the generation of
163 * the DSO we are allocating data for.
165 if (__builtin_expect (THREAD_DTV()[0].counter != _dl_tls_generation, 0))
166 (void) _dl_update_slotinfo (map->l_tls_modid);
168 _dl_init_static_tls (map);
171 map->l_need_tls_init = 1;
175 /* Initialize static TLS area and DTV for current (only) thread.
176 libpthread implementations should provide their own hook
177 to handle all threads. */
179 internal_function __attribute_noinline__
180 _dl_nothread_init_static_tls (struct link_map *map)
182 # ifdef TLS_TCB_AT_TP
183 void *dest = (char *) THREAD_SELF - map->l_tls_offset;
184 # elif defined(TLS_DTV_AT_TP)
185 void *dest = (char *) THREAD_SELF + map->l_tls_offset + TLS_PRE_TCB_SIZE;
187 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
190 /* Fill in the DTV slot so that a later LD/GD access will find it. */
191 dtv_t *dtv = THREAD_DTV ();
192 if (!(map->l_tls_modid <= dtv[-1].counter)) {
193 _dl_dprintf(2, "map->l_tls_modid <= dtv[-1].counter FAILED!\n");
196 dtv[map->l_tls_modid].pointer.val = dest;
197 dtv[map->l_tls_modid].pointer.is_static = true;
199 /* Initialize the memory. */
200 _dl_memcpy(dest, map->l_tls_initimage, map->l_tls_initimage_size);
201 _dl_memset((dest + map->l_tls_initimage_size), '\0',
202 map->l_tls_blocksize - map->l_tls_initimage_size);
206 /* Taken from glibc/sysdeps/generic/dl-tls.c */
210 _dl_debug_early("cannot allocate thread-local memory: ABORT\n");
216 _dl_next_tls_modid (void)
220 if (__builtin_expect (_dl_tls_dtv_gaps, false))
223 struct dtv_slotinfo_list *runp = _dl_tls_dtv_slotinfo_list;
225 /* Note that this branch will never be executed during program
226 start since there are no gaps at that time. Therefore it
227 does not matter that the dl_tls_dtv_slotinfo is not allocated
228 yet when the function is called for the first times.
230 NB: the offset +1 is due to the fact that DTV[0] is used
231 for something else. */
232 result = _dl_tls_static_nelem + 1;
233 if (result <= _dl_tls_max_dtv_idx)
236 while (result - disp < runp->len)
238 if (runp->slotinfo[result - disp].map == NULL)
242 _dl_assert (result <= _dl_tls_max_dtv_idx + 1);
245 if (result - disp < runp->len)
250 while ((runp = runp->next) != NULL);
252 if (result > _dl_tls_max_dtv_idx)
254 /* The new index must indeed be exactly one higher than the
256 _dl_assert (result == _dl_tls_max_dtv_idx + 1);
257 /* There is no gap anymore. */
258 _dl_tls_dtv_gaps = false;
265 /* No gaps, allocate a new entry. */
268 result = ++_dl_tls_max_dtv_idx;
276 _dl_determine_tlsoffset (void)
278 size_t max_align = TLS_TCB_ALIGN;
280 size_t freebottom = 0;
282 /* The first element of the dtv slot info list is allocated. */
283 _dl_assert (_dl_tls_dtv_slotinfo_list != NULL);
284 /* There is at this point only one element in the
285 dl_tls_dtv_slotinfo_list list. */
286 _dl_assert (_dl_tls_dtv_slotinfo_list->next == NULL);
288 struct dtv_slotinfo *slotinfo = _dl_tls_dtv_slotinfo_list->slotinfo;
290 /* Determining the offset of the various parts of the static TLS
291 block has several dependencies. In addition we have to work
292 around bugs in some toolchains.
294 Each TLS block from the objects available at link time has a size
295 and an alignment requirement. The GNU ld computes the alignment
296 requirements for the data at the positions *in the file*, though.
297 I.e, it is not simply possible to allocate a block with the size
298 of the TLS program header entry. The data is layed out assuming
299 that the first byte of the TLS block fulfills
301 p_vaddr mod p_align == &TLS_BLOCK mod p_align
303 This means we have to add artificial padding at the beginning of
304 the TLS block. These bytes are never used for the TLS data in
305 this module but the first byte allocated must be aligned
306 according to mod p_align == 0 so that the first byte of the TLS
307 block is aligned according to p_vaddr mod p_align. This is ugly
308 and the linker can help by computing the offsets in the TLS block
309 assuming the first byte of the TLS block is aligned according to
312 The extra space which might be allocated before the first byte of
313 the TLS block need not go unused. The code below tries to use
314 that memory for the next TLS block. This can work if the total
315 memory requirement for the next TLS block is smaller than the
318 # ifdef TLS_TCB_AT_TP
319 /* We simply start with zero. */
322 for (size_t cnt = 1; slotinfo[cnt].map != NULL; ++cnt)
324 _dl_assert (cnt < _dl_tls_dtv_slotinfo_list->len);
326 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
327 & (slotinfo[cnt].map->l_tls_align - 1));
329 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
331 if (freebottom - freetop >= slotinfo[cnt].map->l_tls_blocksize)
333 off = roundup_pow2 (freetop + slotinfo[cnt].map->l_tls_blocksize
334 - firstbyte, slotinfo[cnt].map->l_tls_align)
336 if (off <= freebottom)
340 /* XXX For some architectures we perhaps should store the
342 slotinfo[cnt].map->l_tls_offset = off;
347 off = roundup_pow2 (offset + slotinfo[cnt].map->l_tls_blocksize
348 - firstbyte, slotinfo[cnt].map->l_tls_align)
350 if (off > offset + slotinfo[cnt].map->l_tls_blocksize
351 + (freebottom - freetop))
354 freebottom = off - slotinfo[cnt].map->l_tls_blocksize;
358 /* XXX For some architectures we perhaps should store the
360 slotinfo[cnt].map->l_tls_offset = off;
363 _dl_tls_static_used = offset;
364 _dl_tls_static_size = (roundup_pow2 (offset + TLS_STATIC_SURPLUS, max_align)
366 # elif defined(TLS_DTV_AT_TP)
367 /* The TLS blocks start right after the TCB. */
368 size_t offset = TLS_TCB_SIZE;
371 for (cnt = 1; slotinfo[cnt].map != NULL; ++cnt)
373 _dl_assert (cnt < _dl_tls_dtv_slotinfo_list->len);
375 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
376 & (slotinfo[cnt].map->l_tls_align - 1));
378 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
380 if (slotinfo[cnt].map->l_tls_blocksize <= freetop - freebottom)
382 off = roundup_pow2 (freebottom, slotinfo[cnt].map->l_tls_align);
383 if (off - freebottom < firstbyte)
384 off += slotinfo[cnt].map->l_tls_align;
385 if (off + slotinfo[cnt].map->l_tls_blocksize - firstbyte <= freetop)
387 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
388 freebottom = (off + slotinfo[cnt].map->l_tls_blocksize
394 off = roundup_pow2 (offset, slotinfo[cnt].map->l_tls_align);
395 if (off - offset < firstbyte)
396 off += slotinfo[cnt].map->l_tls_align;
398 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
399 if (off - firstbyte - offset > freetop - freebottom)
402 freetop = off - firstbyte;
405 offset = off + slotinfo[cnt].map->l_tls_blocksize - firstbyte;
408 _dl_tls_static_used = offset;
409 _dl_tls_static_size = roundup_pow2 (offset + TLS_STATIC_SURPLUS,
412 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
415 /* The alignment requirement for the static TLS block. */
416 _dl_tls_static_align = max_align;
419 /* This is called only when the data structure setup was skipped at startup,
420 when there was no need for it then. Now we have dynamically loaded
421 something needing TLS, or libpthread needs it. */
422 rtld_hidden_proto(_dl_tls_setup)
427 _dl_assert (_dl_tls_dtv_slotinfo_list == NULL);
428 _dl_assert (_dl_tls_max_dtv_idx == 0);
430 const size_t nelem = 2 + TLS_SLOTINFO_SURPLUS;
432 _dl_tls_dtv_slotinfo_list
433 = _dl_calloc (1, (sizeof (struct dtv_slotinfo_list)
434 + nelem * sizeof (struct dtv_slotinfo)));
435 if (_dl_tls_dtv_slotinfo_list == NULL)
438 _dl_tls_dtv_slotinfo_list->len = nelem;
440 /* Number of elements in the static TLS block. It can't be zero
441 because of various assumptions. The one element is null. */
442 _dl_tls_static_nelem = _dl_tls_max_dtv_idx = 1;
444 /* This initializes more variables for us. */
445 _dl_determine_tlsoffset ();
449 rtld_hidden_def (_dl_tls_setup)
453 allocate_dtv (void *result)
458 /* We allocate a few more elements in the dtv than are needed for the
459 initial set of modules. This should avoid in most cases expansions
461 dtv_length = _dl_tls_max_dtv_idx + DTV_SURPLUS;
462 dtv = _dl_calloc (dtv_length + 2, sizeof (dtv_t));
465 /* This is the initial length of the dtv. */
466 dtv[0].counter = dtv_length;
468 /* The rest of the dtv (including the generation counter) is
469 Initialize with zero to indicate nothing there. */
471 /* Add the dtv to the thread data structures. */
472 INSTALL_DTV (result, dtv);
480 /* Get size and alignment requirements of the static TLS block. */
483 _dl_get_tls_static_info (size_t *sizep, size_t *alignp)
485 *sizep = _dl_tls_static_size;
486 *alignp = _dl_tls_static_align;
491 _dl_allocate_tls_storage (void)
494 size_t size = _dl_tls_static_size;
496 # if defined(TLS_DTV_AT_TP)
498 [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
499 ^ This should be returned. */
500 size += (TLS_PRE_TCB_SIZE + _dl_tls_static_align - 1)
501 & ~(_dl_tls_static_align - 1);
504 /* Allocate a correctly aligned chunk of memory. */
505 result = _dl_memalign (_dl_tls_static_align, size);
506 if (__builtin_expect (result != NULL, 1))
508 /* Allocate the DTV. */
509 void *allocated = result;
511 # ifdef TLS_TCB_AT_TP
512 /* The TCB follows the TLS blocks. */
513 result = (char *) result + size - TLS_TCB_SIZE;
515 /* Clear the TCB data structure. We can't ask the caller (i.e.
516 libpthread) to do it, because we will initialize the DTV et al. */
517 _dl_memset (result, '\0', TLS_TCB_SIZE);
518 # elif defined(TLS_DTV_AT_TP)
519 result = (char *) result + size - _dl_tls_static_size;
521 /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it.
522 We can't ask the caller (i.e. libpthread) to do it, because we will
523 initialize the DTV et al. */
524 _dl_memset ((char *) result - TLS_PRE_TCB_SIZE, '\0',
525 TLS_PRE_TCB_SIZE + TLS_TCB_SIZE);
528 result = allocate_dtv (result);
530 _dl_free (allocated);
538 _dl_allocate_tls_init (void *result)
541 /* The memory allocation failed. */
544 dtv_t *dtv = GET_DTV (result);
545 struct dtv_slotinfo_list *listp;
549 /* We have to prepare the dtv for all currently loaded modules using
550 TLS. For those which are dynamically loaded we add the values
551 indicating deferred allocation. */
552 listp = _dl_tls_dtv_slotinfo_list;
557 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
559 struct link_map *map;
562 /* Check for the total number of used slots. */
563 if (total + cnt > _dl_tls_max_dtv_idx)
566 map = listp->slotinfo[cnt].map;
571 /* Keep track of the maximum generation number. This might
572 not be the generation counter. */
573 maxgen = MAX (maxgen, listp->slotinfo[cnt].gen);
575 if (map->l_tls_offset == NO_TLS_OFFSET)
577 /* For dynamically loaded modules we simply store
578 the value indicating deferred allocation. */
579 dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
580 dtv[map->l_tls_modid].pointer.is_static = false;
584 _dl_assert (map->l_tls_modid == cnt);
585 _dl_assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
586 # ifdef TLS_TCB_AT_TP
587 _dl_assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
588 dest = (char *) result - map->l_tls_offset;
589 # elif defined(TLS_DTV_AT_TP)
590 dest = (char *) result + map->l_tls_offset;
592 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
595 /* Copy the initialization image and clear the BSS part. */
596 dtv[map->l_tls_modid].pointer.val = dest;
597 dtv[map->l_tls_modid].pointer.is_static = true;
598 _dl_memcpy(dest, map->l_tls_initimage, map->l_tls_initimage_size);
599 _dl_memset((dest + map->l_tls_initimage_size), '\0',
600 map->l_tls_blocksize - map->l_tls_initimage_size);
605 if (total >= _dl_tls_max_dtv_idx)
609 _dl_assert (listp != NULL);
612 /* The DTV version is up-to-date now. */
613 dtv[0].counter = maxgen;
620 _dl_allocate_tls (void *mem)
622 return _dl_allocate_tls_init (mem == NULL
623 ? _dl_allocate_tls_storage ()
624 : allocate_dtv (mem));
629 _dl_deallocate_tls (void *tcb, bool dealloc_tcb)
631 dtv_t *dtv = GET_DTV (tcb);
634 /* We need to free the memory allocated for non-static TLS. */
635 for (cnt = 0; cnt < dtv[-1].counter; ++cnt)
636 if (! dtv[1 + cnt].pointer.is_static
637 && dtv[1 + cnt].pointer.val != TLS_DTV_UNALLOCATED)
638 _dl_free (dtv[1 + cnt].pointer.val);
640 /* The array starts with dtv[-1]. */
641 if (dtv != _dl_initial_dtv)
646 # ifdef TLS_TCB_AT_TP
647 /* The TCB follows the TLS blocks. Back up to free the whole block. */
648 tcb -= _dl_tls_static_size - TLS_TCB_SIZE;
649 # elif defined(TLS_DTV_AT_TP)
650 /* Back up the TLS_PRE_TCB_SIZE bytes. */
651 tcb -= (TLS_PRE_TCB_SIZE + _dl_tls_static_align - 1)
652 & ~(_dl_tls_static_align - 1);
659 allocate_and_init (struct link_map *map)
663 newp = _dl_memalign (map->l_tls_align, map->l_tls_blocksize);
666 _dl_dprintf(2, "%s:%d: Out of memory!!!\n", __FUNCTION__, __LINE__);
670 /* Initialize the memory. */
671 _dl_memcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size);
672 _dl_memset ((newp + map->l_tls_initimage_size), '\0',
673 map->l_tls_blocksize - map->l_tls_initimage_size);
679 _dl_update_slotinfo (unsigned long int req_modid)
681 struct link_map *the_map = NULL;
682 dtv_t *dtv = THREAD_DTV ();
684 /* The global dl_tls_dtv_slotinfo array contains for each module
685 index the generation counter current when the entry was created.
686 This array never shrinks so that all module indices which were
687 valid at some time can be used to access it. Before the first
688 use of a new module index in this function the array was extended
689 appropriately. Access also does not have to be guarded against
690 modifications of the array. It is assumed that pointer-size
691 values can be read atomically even in SMP environments. It is
692 possible that other threads at the same time dynamically load
693 code and therefore add to the slotinfo list. This is a problem
694 since we must not pick up any information about incomplete work.
695 The solution to this is to ignore all dtv slots which were
696 created after the one we are currently interested. We know that
697 dynamic loading for this module is completed and this is the last
698 load operation we know finished. */
699 unsigned long int idx = req_modid;
700 struct dtv_slotinfo_list *listp = _dl_tls_dtv_slotinfo_list;
702 _dl_debug_early ("Updating slotinfo for module %d\n", req_modid);
704 while (idx >= listp->len)
710 if (dtv[0].counter < listp->slotinfo[idx].gen)
712 /* The generation counter for the slot is higher than what the
713 current dtv implements. We have to update the whole dtv but
714 only those entries with a generation counter <= the one for
715 the entry we need. */
716 size_t new_gen = listp->slotinfo[idx].gen;
719 /* We have to look through the entire dtv slotinfo list. */
720 listp = _dl_tls_dtv_slotinfo_list;
725 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
727 size_t gen = listp->slotinfo[cnt].gen;
730 /* This is a slot for a generation younger than the
731 one we are handling now. It might be incompletely
732 set up so ignore it. */
735 /* If the entry is older than the current dtv layout we
736 know we don't have to handle it. */
737 if (gen <= dtv[0].counter)
740 /* If there is no map this means the entry is empty. */
741 struct link_map *map = listp->slotinfo[cnt].map;
744 /* If this modid was used at some point the memory
745 might still be allocated. */
746 if (! dtv[total + cnt].pointer.is_static
747 && dtv[total + cnt].pointer.val != TLS_DTV_UNALLOCATED)
749 _dl_free (dtv[total + cnt].pointer.val);
750 dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED;
756 /* Check whether the current dtv array is large enough. */
757 size_t modid = map->l_tls_modid;
758 _dl_assert (total + cnt == modid);
759 if (dtv[-1].counter < modid)
761 /* Reallocate the dtv. */
763 size_t newsize = _dl_tls_max_dtv_idx + DTV_SURPLUS;
764 size_t oldsize = dtv[-1].counter;
766 _dl_assert (map->l_tls_modid <= newsize);
768 if (dtv == _dl_initial_dtv)
770 /* This is the initial dtv that was allocated
771 during rtld startup using the dl-minimal.c
772 malloc instead of the real malloc. We can't
773 free it, we have to abandon the old storage. */
775 newp = _dl_malloc ((2 + newsize) * sizeof (dtv_t));
778 _dl_memcpy (newp, &dtv[-1], oldsize * sizeof (dtv_t));
782 newp = _dl_realloc (&dtv[-1],
783 (2 + newsize) * sizeof (dtv_t));
788 newp[0].counter = newsize;
790 /* Clear the newly allocated part. */
791 _dl_memset (newp + 2 + oldsize, '\0',
792 (newsize - oldsize) * sizeof (dtv_t));
794 /* Point dtv to the generation counter. */
797 /* Install this new dtv in the thread data
799 INSTALL_NEW_DTV (dtv);
802 /* If there is currently memory allocate for this
803 dtv entry free it. */
804 /* XXX Ideally we will at some point create a memory
806 if (! dtv[modid].pointer.is_static
807 && dtv[modid].pointer.val != TLS_DTV_UNALLOCATED)
808 /* Note that free is called for NULL is well. We
809 deallocate even if it is this dtv entry we are
810 supposed to load. The reason is that we call
811 memalign and not malloc. */
812 _dl_free (dtv[modid].pointer.val);
814 /* This module is loaded dynamically- We defer memory
816 dtv[modid].pointer.is_static = false;
817 dtv[modid].pointer.val = TLS_DTV_UNALLOCATED;
819 if (modid == req_modid)
825 while ((listp = listp->next) != NULL);
827 /* This will be the new maximum generation counter. */
828 dtv[0].counter = new_gen;
835 /* The generic dynamic and local dynamic model cannot be used in
836 statically linked applications. */
838 __tls_get_addr (GET_ADDR_ARGS)
840 dtv_t *dtv = THREAD_DTV ();
841 struct link_map *the_map = NULL;
844 if (__builtin_expect (dtv[0].counter != _dl_tls_generation, 0))
845 the_map = _dl_update_slotinfo (GET_ADDR_MODULE);
847 p = dtv[GET_ADDR_MODULE].pointer.val;
849 if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
851 /* The allocation was deferred. Do it now. */
854 /* Find the link map for this module. */
855 size_t idx = GET_ADDR_MODULE;
856 struct dtv_slotinfo_list *listp = _dl_tls_dtv_slotinfo_list;
858 while (idx >= listp->len)
864 the_map = listp->slotinfo[idx].map;
867 p = dtv[GET_ADDR_MODULE].pointer.val = allocate_and_init (the_map);
868 dtv[GET_ADDR_MODULE].pointer.is_static = false;
871 return (char *) p + GET_ADDR_OFFSET;
875 _dl_add_to_slotinfo (struct link_map *l)
877 /* Now that we know the object is loaded successfully add
878 modules containing TLS data to the dtv info table. We
879 might have to increase its size. */
880 struct dtv_slotinfo_list *listp;
881 struct dtv_slotinfo_list *prevp;
882 size_t idx = l->l_tls_modid;
884 _dl_debug_early("Adding to slotinfo for %s\n", l->l_name);
886 /* Find the place in the dtv slotinfo list. */
887 listp = _dl_tls_dtv_slotinfo_list;
888 prevp = NULL; /* Needed to shut up gcc. */
891 /* Does it fit in the array of this list element? */
892 if (idx < listp->len)
898 while (listp != NULL);
902 /* When we come here it means we have to add a new element
903 to the slotinfo list. And the new module must be in
905 _dl_assert (idx == 0);
907 listp = prevp->next = (struct dtv_slotinfo_list *)
908 _dl_malloc (sizeof (struct dtv_slotinfo_list)
909 + TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
912 /* We ran out of memory. We will simply fail this
913 call but don't undo anything we did so far. The
914 application will crash or be terminated anyway very
917 /* We have to do this since some entries in the dtv
918 slotinfo array might already point to this
920 ++_dl_tls_generation;
922 _dl_dprintf (_dl_debug_file,
923 "cannot create TLS data structures: ABORT\n");
927 listp->len = TLS_SLOTINFO_SURPLUS;
929 _dl_memset (listp->slotinfo, '\0',
930 TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
933 /* Add the information into the slotinfo data structure. */
934 listp->slotinfo[idx].map = l;
935 listp->slotinfo[idx].gen = _dl_tls_generation + 1;
936 /* ??? ideally this would be done once per call to dlopen. However there's
937 no easy way to indicate whether a library used TLS, so do it here
939 /* Bump the TLS generation number. */
940 _dl_tls_generation++;
943 /* Taken from glibc/elf/rtld.c */
944 static bool tls_init_tp_called;
946 /* _dl_error_catch_tsd points to this for the single-threaded case.
947 It's reset by the thread library for multithreaded programs. */
948 void ** __attribute__ ((const))
949 _dl_initial_error_catch_tsd (void)
960 rtld_hidden_proto(init_tls)
965 /* Number of elements in the static TLS block. */
966 _dl_tls_static_nelem = _dl_tls_max_dtv_idx;
968 /* Do not do this twice. The audit interface might have required
969 the DTV interfaces to be set up early. */
970 if (_dl_initial_dtv != NULL)
973 /* Allocate the array which contains the information about the
974 dtv slots. We allocate a few entries more than needed to
975 avoid the need for reallocation. */
976 size_t nelem = _dl_tls_max_dtv_idx + 1 + TLS_SLOTINFO_SURPLUS;
979 _dl_assert (_dl_tls_dtv_slotinfo_list == NULL);
980 _dl_tls_dtv_slotinfo_list = (struct dtv_slotinfo_list *)
981 _dl_calloc (sizeof (struct dtv_slotinfo_list)
982 + nelem * sizeof (struct dtv_slotinfo), 1);
983 /* No need to check the return value. If memory allocation failed
984 the program would have been terminated. */
986 struct dtv_slotinfo *slotinfo = _dl_tls_dtv_slotinfo_list->slotinfo;
987 _dl_tls_dtv_slotinfo_list->len = nelem;
988 _dl_tls_dtv_slotinfo_list->next = NULL;
990 /* Fill in the information from the loaded modules. No namespace
991 but the base one can be filled at this time. */
994 for (l = (struct link_map *) _dl_loaded_modules; l != NULL; l = l->l_next)
995 if (l->l_tls_blocksize != 0)
997 /* This is a module with TLS data. Store the map reference.
998 The generation counter is zero. */
1000 /* Skeep slot[0]: it will be never used */
1001 slotinfo[++i].map = l;
1003 _dl_assert (i == _dl_tls_max_dtv_idx);
1005 /* Compute the TLS offsets for the various blocks. */
1006 _dl_determine_tlsoffset ();
1008 /* Construct the static TLS block and the dtv for the initial
1009 thread. For some platforms this will include allocating memory
1010 for the thread descriptor. The memory for the TLS block will
1011 never be freed. It should be allocated accordingly. The dtv
1012 array can be changed if dynamic loading requires it. */
1013 void *tcbp = _dl_allocate_tls_storage ();
1015 _dl_debug_early("\ncannot allocate TLS data structures for initial thread");
1019 /* Store for detection of the special case by __tls_get_addr
1020 so it knows not to pass this dtv to the normal realloc. */
1021 _dl_initial_dtv = GET_DTV (tcbp);
1023 /* And finally install it for the main thread. If ld.so itself uses
1024 TLS we know the thread pointer was initialized earlier. */
1025 const char *lossage = TLS_INIT_TP (tcbp, USE___THREAD);
1026 if(__builtin_expect (lossage != NULL, 0)) {
1027 _dl_debug_early("cannot set up thread-local storage: %s\n", lossage);
1030 tls_init_tp_called = true;
1034 rtld_hidden_def (init_tls)