1 /* vi: set sw=4 ts=4: */
3 * Thread-local storage handling in the ELF dynamic linker.
5 * Copyright (C) 2005 by Steven J. Hill <sjhill@realitydiluted.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. The name of the above contributors may not be
13 * used to endorse or promote products derived from this software
14 * without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 void *(*_dl_calloc_function) (size_t __nmemb, size_t __size) = NULL;
34 void *(*_dl_realloc_function) (void *__ptr, size_t __size) = NULL;
35 void *(*_dl_memalign_function) (size_t __boundary, size_t __size) = NULL;
37 void (*_dl_free_function) (void *__ptr);
38 void *_dl_memalign (size_t __boundary, size_t __size);
39 struct link_map *_dl_update_slotinfo (unsigned long int req_modid);
41 /* Round up N to the nearest multiple of P, where P is a power of 2
42 --- without using libgcc division routines. */
43 #define roundup_pow2(n, p) (((n) + (p) - 1) & ~((p) - 1))
46 _dl_calloc (size_t __nmemb, size_t __size)
49 size_t size = (__size * __nmemb);
51 if (_dl_calloc_function)
52 return (*_dl_calloc_function) (__nmemb, __size);
54 if ((result = _dl_malloc(size)) != NULL) {
55 _dl_memset(result, 0, size);
62 _dl_realloc (void * __ptr, size_t __size)
64 if (_dl_realloc_function)
65 return (*_dl_realloc_function) (__ptr, __size);
67 _dl_debug_early("NOT IMPLEMENTED PROPERLY!!!\n");
71 /* The __tls_get_addr function has two basic forms which differ in the
72 arguments. The IA-64 form takes two parameters, the module ID and
73 offset. The form used, among others, on IA-32 takes a reference to
74 a special structure which contain the same information. The second
75 form seems to be more often used (in the moment) so we default to
76 it. Users of the IA-64 form have to provide adequate definitions
77 of the following macros. */
79 # define GET_ADDR_ARGS tls_index *ti
81 #ifndef GET_ADDR_MODULE
82 # define GET_ADDR_MODULE ti->ti_module
84 #ifndef GET_ADDR_OFFSET
85 # define GET_ADDR_OFFSET ti->ti_offset
89 * Amount of excess space to allocate in the static TLS area
90 * to allow dynamic loading of modules defining IE-model TLS data.
92 #define TLS_STATIC_SURPLUS 64 + DL_NNS * 100
94 /* Value used for dtv entries for which the allocation is delayed. */
95 #define TLS_DTV_UNALLOCATED ((void *) -1l)
98 * We are trying to perform a static TLS relocation in MAP, but it was
99 * dynamically loaded. This can only work if there is enough surplus in
100 * the static TLS area already allocated for each running thread. If this
101 * object's TLS segment is too big to fit, we fail. If it fits,
102 * we set MAP->l_tls_offset and return.
103 * This function intentionally does not return any value but signals error
104 * directly, as static TLS should be rare and code handling it should
105 * not be inlined as much as possible.
108 internal_function __attribute_noinline__
109 _dl_allocate_static_tls (struct link_map *map)
111 /* If the alignment requirements are too high fail. */
112 if (map->l_tls_align > _dl_tls_static_align)
115 _dl_dprintf(2, "cannot allocate memory in static TLS block");
119 # ifdef TLS_TCB_AT_TP
124 freebytes = _dl_tls_static_size - _dl_tls_static_used - TLS_TCB_SIZE;
126 blsize = map->l_tls_blocksize + map->l_tls_firstbyte_offset;
127 if (freebytes < blsize)
130 n = (freebytes - blsize) & ~(map->l_tls_align - 1);
132 size_t offset = _dl_tls_static_used + (freebytes - n
133 - map->l_tls_firstbyte_offset);
135 map->l_tls_offset = _dl_tls_static_used = offset;
136 # elif defined(TLS_DTV_AT_TP)
140 size_t offset = roundup_pow2 (_dl_tls_static_used, map->l_tls_align);
141 used = offset + map->l_tls_blocksize;
144 /* dl_tls_static_used includes the TCB at the beginning. */
145 if (check > _dl_tls_static_size)
148 map->l_tls_offset = offset;
149 _dl_tls_static_used = used;
151 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
155 * If the object is not yet relocated we cannot initialize the
156 * static TLS region. Delay it.
158 if (((struct elf_resolve *) map)->init_flag & RELOCS_DONE)
162 * Update the slot information data for at least the generation of
163 * the DSO we are allocating data for.
165 if (__builtin_expect (THREAD_DTV()[0].counter != _dl_tls_generation, 0))
166 (void) _dl_update_slotinfo (map->l_tls_modid);
168 _dl_init_static_tls (map);
171 map->l_need_tls_init = 1;
175 /* Initialize static TLS area and DTV for current (only) thread.
176 libpthread implementations should provide their own hook
177 to handle all threads. */
179 attribute_hidden __attribute_noinline__
180 _dl_nothread_init_static_tls (struct link_map *map)
182 # ifdef TLS_TCB_AT_TP
183 void *dest = (char *) THREAD_SELF - map->l_tls_offset;
184 # elif defined(TLS_DTV_AT_TP)
185 void *dest = (char *) THREAD_SELF + map->l_tls_offset + TLS_PRE_TCB_SIZE;
187 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
190 /* Fill in the DTV slot so that a later LD/GD access will find it. */
191 dtv_t *dtv = THREAD_DTV ();
192 if (!(map->l_tls_modid <= dtv[-1].counter)) {
193 _dl_dprintf(2, "map->l_tls_modid <= dtv[-1].counter FAILED!\n");
196 dtv[map->l_tls_modid].pointer.val = dest;
197 dtv[map->l_tls_modid].pointer.is_static = true;
199 /* Initialize the memory. */
200 _dl_memcpy(dest, map->l_tls_initimage, map->l_tls_initimage_size);
201 _dl_memset((dest + map->l_tls_initimage_size), '\0',
202 map->l_tls_blocksize - map->l_tls_initimage_size);
206 /* Taken from glibc/sysdeps/generic/dl-tls.c */
210 _dl_debug_early("cannot allocate thread-local memory: ABORT\n");
216 _dl_next_tls_modid (void)
220 if (__builtin_expect (_dl_tls_dtv_gaps, false))
223 struct dtv_slotinfo_list *runp = _dl_tls_dtv_slotinfo_list;
225 /* Note that this branch will never be executed during program
226 start since there are no gaps at that time. Therefore it
227 does not matter that the dl_tls_dtv_slotinfo is not allocated
228 yet when the function is called for the first times.
230 NB: the offset +1 is due to the fact that DTV[0] is used
231 for something else. */
232 result = _dl_tls_static_nelem + 1;
233 if (result <= _dl_tls_max_dtv_idx)
236 while (result - disp < runp->len)
238 if (runp->slotinfo[result - disp].map == NULL)
242 _dl_assert (result <= _dl_tls_max_dtv_idx + 1);
245 if (result - disp < runp->len)
250 while ((runp = runp->next) != NULL);
252 if (result > _dl_tls_max_dtv_idx)
254 /* The new index must indeed be exactly one higher than the
256 _dl_assert (result == _dl_tls_max_dtv_idx + 1);
257 /* There is no gap anymore. */
258 _dl_tls_dtv_gaps = false;
265 /* No gaps, allocate a new entry. */
268 result = ++_dl_tls_max_dtv_idx;
275 # define MAX(x,y) (((x) > (y)) ? (x) : (y))
280 _dl_determine_tlsoffset (void)
282 size_t max_align = TLS_TCB_ALIGN;
284 size_t freebottom = 0;
286 /* The first element of the dtv slot info list is allocated. */
287 _dl_assert (_dl_tls_dtv_slotinfo_list != NULL);
288 /* There is at this point only one element in the
289 dl_tls_dtv_slotinfo_list list. */
290 _dl_assert (_dl_tls_dtv_slotinfo_list->next == NULL);
292 struct dtv_slotinfo *slotinfo = _dl_tls_dtv_slotinfo_list->slotinfo;
294 /* Determining the offset of the various parts of the static TLS
295 block has several dependencies. In addition we have to work
296 around bugs in some toolchains.
298 Each TLS block from the objects available at link time has a size
299 and an alignment requirement. The GNU ld computes the alignment
300 requirements for the data at the positions *in the file*, though.
301 I.e, it is not simply possible to allocate a block with the size
302 of the TLS program header entry. The data is layed out assuming
303 that the first byte of the TLS block fulfills
305 p_vaddr mod p_align == &TLS_BLOCK mod p_align
307 This means we have to add artificial padding at the beginning of
308 the TLS block. These bytes are never used for the TLS data in
309 this module but the first byte allocated must be aligned
310 according to mod p_align == 0 so that the first byte of the TLS
311 block is aligned according to p_vaddr mod p_align. This is ugly
312 and the linker can help by computing the offsets in the TLS block
313 assuming the first byte of the TLS block is aligned according to
316 The extra space which might be allocated before the first byte of
317 the TLS block need not go unused. The code below tries to use
318 that memory for the next TLS block. This can work if the total
319 memory requirement for the next TLS block is smaller than the
322 # ifdef TLS_TCB_AT_TP
323 /* We simply start with zero. */
324 size_t cnt, offset = 0;
326 for (cnt = 1; slotinfo[cnt].map != NULL; ++cnt)
328 _dl_assert (cnt < _dl_tls_dtv_slotinfo_list->len);
330 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
331 & (slotinfo[cnt].map->l_tls_align - 1));
333 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
335 if (freebottom - freetop >= slotinfo[cnt].map->l_tls_blocksize)
337 off = roundup_pow2 (freetop + slotinfo[cnt].map->l_tls_blocksize
338 - firstbyte, slotinfo[cnt].map->l_tls_align)
340 if (off <= freebottom)
344 /* XXX For some architectures we perhaps should store the
346 slotinfo[cnt].map->l_tls_offset = off;
351 off = roundup_pow2 (offset + slotinfo[cnt].map->l_tls_blocksize
352 - firstbyte, slotinfo[cnt].map->l_tls_align)
354 if (off > offset + slotinfo[cnt].map->l_tls_blocksize
355 + (freebottom - freetop))
358 freebottom = off - slotinfo[cnt].map->l_tls_blocksize;
362 /* XXX For some architectures we perhaps should store the
364 slotinfo[cnt].map->l_tls_offset = off;
367 _dl_tls_static_used = offset;
368 _dl_tls_static_size = (roundup_pow2 (offset + TLS_STATIC_SURPLUS, max_align)
370 # elif defined(TLS_DTV_AT_TP)
371 /* The TLS blocks start right after the TCB. */
372 size_t offset = TLS_TCB_SIZE;
375 for (cnt = 1; slotinfo[cnt].map != NULL; ++cnt)
377 _dl_assert (cnt < _dl_tls_dtv_slotinfo_list->len);
379 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
380 & (slotinfo[cnt].map->l_tls_align - 1));
382 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
384 if (slotinfo[cnt].map->l_tls_blocksize <= freetop - freebottom)
386 off = roundup_pow2 (freebottom, slotinfo[cnt].map->l_tls_align);
387 if (off - freebottom < firstbyte)
388 off += slotinfo[cnt].map->l_tls_align;
389 if (off + slotinfo[cnt].map->l_tls_blocksize - firstbyte <= freetop)
391 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
392 freebottom = (off + slotinfo[cnt].map->l_tls_blocksize
398 off = roundup_pow2 (offset, slotinfo[cnt].map->l_tls_align);
399 if (off - offset < firstbyte)
400 off += slotinfo[cnt].map->l_tls_align;
402 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
403 if (off - firstbyte - offset > freetop - freebottom)
406 freetop = off - firstbyte;
409 offset = off + slotinfo[cnt].map->l_tls_blocksize - firstbyte;
412 _dl_tls_static_used = offset;
413 _dl_tls_static_size = roundup_pow2 (offset + TLS_STATIC_SURPLUS,
416 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
419 /* The alignment requirement for the static TLS block. */
420 _dl_tls_static_align = max_align;
423 /* This is called only when the data structure setup was skipped at startup,
424 when there was no need for it then. Now we have dynamically loaded
425 something needing TLS, or libpthread needs it. */
426 rtld_hidden_proto(_dl_tls_setup)
431 _dl_assert (_dl_tls_dtv_slotinfo_list == NULL);
432 _dl_assert (_dl_tls_max_dtv_idx == 0);
434 const size_t nelem = 2 + TLS_SLOTINFO_SURPLUS;
436 _dl_tls_dtv_slotinfo_list
437 = _dl_calloc (1, (sizeof (struct dtv_slotinfo_list)
438 + nelem * sizeof (struct dtv_slotinfo)));
439 if (_dl_tls_dtv_slotinfo_list == NULL)
442 _dl_tls_dtv_slotinfo_list->len = nelem;
444 /* Number of elements in the static TLS block. It can't be zero
445 because of various assumptions. The one element is null. */
446 _dl_tls_static_nelem = _dl_tls_max_dtv_idx = 1;
448 /* This initializes more variables for us. */
449 _dl_determine_tlsoffset ();
453 rtld_hidden_def (_dl_tls_setup)
457 allocate_dtv (void *result)
462 /* We allocate a few more elements in the dtv than are needed for the
463 initial set of modules. This should avoid in most cases expansions
465 dtv_length = _dl_tls_max_dtv_idx + DTV_SURPLUS;
466 dtv = _dl_calloc (dtv_length + 2, sizeof (dtv_t));
469 /* This is the initial length of the dtv. */
470 dtv[0].counter = dtv_length;
472 /* The rest of the dtv (including the generation counter) is
473 Initialize with zero to indicate nothing there. */
475 /* Add the dtv to the thread data structures. */
476 INSTALL_DTV (result, dtv);
484 /* Get size and alignment requirements of the static TLS block. */
487 _dl_get_tls_static_info (size_t *sizep, size_t *alignp)
489 *sizep = _dl_tls_static_size;
490 *alignp = _dl_tls_static_align;
495 _dl_allocate_tls_storage (void)
498 size_t size = _dl_tls_static_size;
500 # if defined(TLS_DTV_AT_TP)
502 [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
503 ^ This should be returned. */
504 size += (TLS_PRE_TCB_SIZE + _dl_tls_static_align - 1)
505 & ~(_dl_tls_static_align - 1);
508 /* Allocate a correctly aligned chunk of memory. */
509 result = _dl_memalign (_dl_tls_static_align, size);
510 if (__builtin_expect (result != NULL, 1))
512 /* Allocate the DTV. */
513 void *allocated = result;
515 # ifdef TLS_TCB_AT_TP
516 /* The TCB follows the TLS blocks. */
517 result = (char *) result + size - TLS_TCB_SIZE;
519 /* Clear the TCB data structure. We can't ask the caller (i.e.
520 libpthread) to do it, because we will initialize the DTV et al. */
521 _dl_memset (result, '\0', TLS_TCB_SIZE);
522 # elif defined(TLS_DTV_AT_TP)
523 result = (char *) result + size - _dl_tls_static_size;
525 /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it.
526 We can't ask the caller (i.e. libpthread) to do it, because we will
527 initialize the DTV et al. */
528 _dl_memset ((char *) result - TLS_PRE_TCB_SIZE, '\0',
529 TLS_PRE_TCB_SIZE + TLS_TCB_SIZE);
532 result = allocate_dtv (result);
534 _dl_free (allocated);
542 _dl_allocate_tls_init (void *result)
545 /* The memory allocation failed. */
548 dtv_t *dtv = GET_DTV (result);
549 struct dtv_slotinfo_list *listp;
553 /* We have to prepare the dtv for all currently loaded modules using
554 TLS. For those which are dynamically loaded we add the values
555 indicating deferred allocation. */
556 listp = _dl_tls_dtv_slotinfo_list;
561 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
563 struct link_map *map;
566 /* Check for the total number of used slots. */
567 if (total + cnt > _dl_tls_max_dtv_idx)
570 map = listp->slotinfo[cnt].map;
575 /* Keep track of the maximum generation number. This might
576 not be the generation counter. */
577 maxgen = MAX (maxgen, listp->slotinfo[cnt].gen);
579 if (map->l_tls_offset == NO_TLS_OFFSET)
581 /* For dynamically loaded modules we simply store
582 the value indicating deferred allocation. */
583 dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
584 dtv[map->l_tls_modid].pointer.is_static = false;
588 _dl_assert (map->l_tls_modid == cnt);
589 _dl_assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
590 # ifdef TLS_TCB_AT_TP
591 _dl_assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
592 dest = (char *) result - map->l_tls_offset;
593 # elif defined(TLS_DTV_AT_TP)
594 dest = (char *) result + map->l_tls_offset;
596 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
599 /* Copy the initialization image and clear the BSS part. */
600 dtv[map->l_tls_modid].pointer.val = dest;
601 dtv[map->l_tls_modid].pointer.is_static = true;
602 _dl_memcpy(dest, map->l_tls_initimage, map->l_tls_initimage_size);
603 _dl_memset((dest + map->l_tls_initimage_size), '\0',
604 map->l_tls_blocksize - map->l_tls_initimage_size);
609 if (total >= _dl_tls_max_dtv_idx)
613 _dl_assert (listp != NULL);
616 /* The DTV version is up-to-date now. */
617 dtv[0].counter = maxgen;
624 _dl_allocate_tls (void *mem)
626 return _dl_allocate_tls_init (mem == NULL
627 ? _dl_allocate_tls_storage ()
628 : allocate_dtv (mem));
633 _dl_deallocate_tls (void *tcb, bool dealloc_tcb)
635 dtv_t *dtv = GET_DTV (tcb);
638 /* We need to free the memory allocated for non-static TLS. */
639 for (cnt = 0; cnt < dtv[-1].counter; ++cnt)
640 if (! dtv[1 + cnt].pointer.is_static
641 && dtv[1 + cnt].pointer.val != TLS_DTV_UNALLOCATED)
642 _dl_free (dtv[1 + cnt].pointer.val);
644 /* The array starts with dtv[-1]. */
645 if (dtv != _dl_initial_dtv)
650 # ifdef TLS_TCB_AT_TP
651 /* The TCB follows the TLS blocks. Back up to free the whole block. */
652 tcb -= _dl_tls_static_size - TLS_TCB_SIZE;
653 # elif defined(TLS_DTV_AT_TP)
654 /* Back up the TLS_PRE_TCB_SIZE bytes. */
655 tcb -= (TLS_PRE_TCB_SIZE + _dl_tls_static_align - 1)
656 & ~(_dl_tls_static_align - 1);
663 allocate_and_init (struct link_map *map)
667 newp = _dl_memalign (map->l_tls_align, map->l_tls_blocksize);
670 _dl_dprintf(2, "%s:%d: Out of memory!!!\n", __FUNCTION__, __LINE__);
674 /* Initialize the memory. */
675 _dl_memcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size);
676 _dl_memset ((newp + map->l_tls_initimage_size), '\0',
677 map->l_tls_blocksize - map->l_tls_initimage_size);
683 _dl_update_slotinfo (unsigned long int req_modid)
685 struct link_map *the_map = NULL;
686 dtv_t *dtv = THREAD_DTV ();
688 /* The global dl_tls_dtv_slotinfo array contains for each module
689 index the generation counter current when the entry was created.
690 This array never shrinks so that all module indices which were
691 valid at some time can be used to access it. Before the first
692 use of a new module index in this function the array was extended
693 appropriately. Access also does not have to be guarded against
694 modifications of the array. It is assumed that pointer-size
695 values can be read atomically even in SMP environments. It is
696 possible that other threads at the same time dynamically load
697 code and therefore add to the slotinfo list. This is a problem
698 since we must not pick up any information about incomplete work.
699 The solution to this is to ignore all dtv slots which were
700 created after the one we are currently interested. We know that
701 dynamic loading for this module is completed and this is the last
702 load operation we know finished. */
703 unsigned long int idx = req_modid;
704 struct dtv_slotinfo_list *listp = _dl_tls_dtv_slotinfo_list;
706 _dl_debug_early ("Updating slotinfo for module %d\n", req_modid);
708 while (idx >= listp->len)
714 if (dtv[0].counter < listp->slotinfo[idx].gen)
716 /* The generation counter for the slot is higher than what the
717 current dtv implements. We have to update the whole dtv but
718 only those entries with a generation counter <= the one for
719 the entry we need. */
720 size_t new_gen = listp->slotinfo[idx].gen;
723 /* We have to look through the entire dtv slotinfo list. */
724 listp = _dl_tls_dtv_slotinfo_list;
729 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
731 size_t gen = listp->slotinfo[cnt].gen;
734 /* This is a slot for a generation younger than the
735 one we are handling now. It might be incompletely
736 set up so ignore it. */
739 /* If the entry is older than the current dtv layout we
740 know we don't have to handle it. */
741 if (gen <= dtv[0].counter)
744 /* If there is no map this means the entry is empty. */
745 struct link_map *map = listp->slotinfo[cnt].map;
748 /* If this modid was used at some point the memory
749 might still be allocated. */
750 if (! dtv[total + cnt].pointer.is_static
751 && dtv[total + cnt].pointer.val != TLS_DTV_UNALLOCATED)
753 _dl_free (dtv[total + cnt].pointer.val);
754 dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED;
760 /* Check whether the current dtv array is large enough. */
761 size_t modid = map->l_tls_modid;
762 _dl_assert (total + cnt == modid);
763 if (dtv[-1].counter < modid)
765 /* Reallocate the dtv. */
767 size_t newsize = _dl_tls_max_dtv_idx + DTV_SURPLUS;
768 size_t oldsize = dtv[-1].counter;
770 _dl_assert (map->l_tls_modid <= newsize);
772 if (dtv == _dl_initial_dtv)
774 /* This is the initial dtv that was allocated
775 during rtld startup using the dl-minimal.c
776 malloc instead of the real malloc. We can't
777 free it, we have to abandon the old storage. */
779 newp = _dl_malloc ((2 + newsize) * sizeof (dtv_t));
782 _dl_memcpy (newp, &dtv[-1], oldsize * sizeof (dtv_t));
786 newp = _dl_realloc (&dtv[-1],
787 (2 + newsize) * sizeof (dtv_t));
792 newp[0].counter = newsize;
794 /* Clear the newly allocated part. */
795 _dl_memset (newp + 2 + oldsize, '\0',
796 (newsize - oldsize) * sizeof (dtv_t));
798 /* Point dtv to the generation counter. */
801 /* Install this new dtv in the thread data
803 INSTALL_NEW_DTV (dtv);
806 /* If there is currently memory allocate for this
807 dtv entry free it. */
808 /* XXX Ideally we will at some point create a memory
810 if (! dtv[modid].pointer.is_static
811 && dtv[modid].pointer.val != TLS_DTV_UNALLOCATED)
812 /* Note that free is called for NULL is well. We
813 deallocate even if it is this dtv entry we are
814 supposed to load. The reason is that we call
815 memalign and not malloc. */
816 _dl_free (dtv[modid].pointer.val);
818 /* This module is loaded dynamically- We defer memory
820 dtv[modid].pointer.is_static = false;
821 dtv[modid].pointer.val = TLS_DTV_UNALLOCATED;
823 if (modid == req_modid)
829 while ((listp = listp->next) != NULL);
831 /* This will be the new maximum generation counter. */
832 dtv[0].counter = new_gen;
839 /* The generic dynamic and local dynamic model cannot be used in
840 statically linked applications. */
842 __tls_get_addr (GET_ADDR_ARGS)
844 dtv_t *dtv = THREAD_DTV ();
845 struct link_map *the_map = NULL;
848 if (__builtin_expect (dtv[0].counter != _dl_tls_generation, 0))
850 the_map = _dl_update_slotinfo (GET_ADDR_MODULE);
854 p = dtv[GET_ADDR_MODULE].pointer.val;
856 if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
858 /* The allocation was deferred. Do it now. */
861 /* Find the link map for this module. */
862 size_t idx = GET_ADDR_MODULE;
863 struct dtv_slotinfo_list *listp = _dl_tls_dtv_slotinfo_list;
865 while (idx >= listp->len)
871 the_map = listp->slotinfo[idx].map;
874 p = dtv[GET_ADDR_MODULE].pointer.val = allocate_and_init (the_map);
875 dtv[GET_ADDR_MODULE].pointer.is_static = false;
878 return (char *) p + GET_ADDR_OFFSET;
882 _dl_add_to_slotinfo (struct link_map *l)
884 /* Now that we know the object is loaded successfully add
885 modules containing TLS data to the dtv info table. We
886 might have to increase its size. */
887 struct dtv_slotinfo_list *listp;
888 struct dtv_slotinfo_list *prevp;
889 size_t idx = l->l_tls_modid;
891 _dl_debug_early("Adding to slotinfo for %s\n", l->l_name);
893 /* Find the place in the dtv slotinfo list. */
894 listp = _dl_tls_dtv_slotinfo_list;
895 prevp = NULL; /* Needed to shut up gcc. */
898 /* Does it fit in the array of this list element? */
899 if (idx < listp->len)
905 while (listp != NULL);
909 /* When we come here it means we have to add a new element
910 to the slotinfo list. And the new module must be in
912 _dl_assert (idx == 0);
914 listp = prevp->next = (struct dtv_slotinfo_list *)
915 _dl_malloc (sizeof (struct dtv_slotinfo_list)
916 + TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
919 /* We ran out of memory. We will simply fail this
920 call but don't undo anything we did so far. The
921 application will crash or be terminated anyway very
924 /* We have to do this since some entries in the dtv
925 slotinfo array might already point to this
927 ++_dl_tls_generation;
929 _dl_dprintf (_dl_debug_file,
930 "cannot create TLS data structures: ABORT\n");
934 listp->len = TLS_SLOTINFO_SURPLUS;
936 _dl_memset (listp->slotinfo, '\0',
937 TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
940 /* Add the information into the slotinfo data structure. */
941 listp->slotinfo[idx].map = l;
942 listp->slotinfo[idx].gen = _dl_tls_generation + 1;
943 /* ??? ideally this would be done once per call to dlopen. However there's
944 no easy way to indicate whether a library used TLS, so do it here
946 /* Bump the TLS generation number. */
947 _dl_tls_generation++;
950 /* Taken from glibc/elf/rtld.c */
951 static bool tls_init_tp_called;
953 /* _dl_error_catch_tsd points to this for the single-threaded case.
954 It's reset by the thread library for multithreaded programs. */
955 void ** __attribute__ ((const))
956 _dl_initial_error_catch_tsd (void)
967 rtld_hidden_proto(init_tls)
972 /* Number of elements in the static TLS block. */
973 _dl_tls_static_nelem = _dl_tls_max_dtv_idx;
975 /* Do not do this twice. The audit interface might have required
976 the DTV interfaces to be set up early. */
977 if (_dl_initial_dtv != NULL)
980 /* Allocate the array which contains the information about the
981 dtv slots. We allocate a few entries more than needed to
982 avoid the need for reallocation. */
983 size_t nelem = _dl_tls_max_dtv_idx + 1 + TLS_SLOTINFO_SURPLUS;
986 _dl_assert (_dl_tls_dtv_slotinfo_list == NULL);
987 _dl_tls_dtv_slotinfo_list = (struct dtv_slotinfo_list *)
988 _dl_calloc (sizeof (struct dtv_slotinfo_list)
989 + nelem * sizeof (struct dtv_slotinfo), 1);
990 /* No need to check the return value. If memory allocation failed
991 the program would have been terminated. */
993 struct dtv_slotinfo *slotinfo = _dl_tls_dtv_slotinfo_list->slotinfo;
994 _dl_tls_dtv_slotinfo_list->len = nelem;
995 _dl_tls_dtv_slotinfo_list->next = NULL;
997 /* Fill in the information from the loaded modules. No namespace
998 but the base one can be filled at this time. */
1001 for (l = (struct link_map *) _dl_loaded_modules; l != NULL; l = l->l_next)
1002 if (l->l_tls_blocksize != 0)
1004 /* This is a module with TLS data. Store the map reference.
1005 The generation counter is zero. */
1007 /* Skeep slot[0]: it will be never used */
1008 slotinfo[++i].map = l;
1010 _dl_assert (i == _dl_tls_max_dtv_idx);
1012 /* Compute the TLS offsets for the various blocks. */
1013 _dl_determine_tlsoffset ();
1015 /* Construct the static TLS block and the dtv for the initial
1016 thread. For some platforms this will include allocating memory
1017 for the thread descriptor. The memory for the TLS block will
1018 never be freed. It should be allocated accordingly. The dtv
1019 array can be changed if dynamic loading requires it. */
1020 void *tcbp = _dl_allocate_tls_storage ();
1022 _dl_debug_early("\ncannot allocate TLS data structures for initial thread");
1026 /* Store for detection of the special case by __tls_get_addr
1027 so it knows not to pass this dtv to the normal realloc. */
1028 _dl_initial_dtv = GET_DTV (tcbp);
1030 /* And finally install it for the main thread. If ld.so itself uses
1031 TLS we know the thread pointer was initialized earlier. */
1032 const char *lossage = TLS_INIT_TP (tcbp, USE___THREAD);
1033 if(__builtin_expect (lossage != NULL, 0)) {
1034 _dl_debug_early("cannot set up thread-local storage: %s\n", lossage);
1037 tls_init_tp_called = true;
1041 rtld_hidden_def (init_tls)