1 /* Thread-local storage handling in the ELF dynamic linker. Generic version.
2 Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #if defined SHARED || defined NOT_IN_libc
20 # error in buildsystem: This file is for libc.a
25 #include <sys/param.h>
38 #define _dl_malloc malloc
39 #define _dl_memset memset
40 #define _dl_mempcpy mempcpy
41 #define _dl_dprintf fprintf
42 #define _dl_debug_file stderr
45 /* Amount of excess space to allocate in the static TLS area
46 to allow dynamic loading of modules defining IE-model TLS data. */
47 # define TLS_STATIC_SURPLUS 64 + DL_NNS * 100
49 /* Value used for dtv entries for which the allocation is delayed. */
50 # define TLS_DTV_UNALLOCATED ((void *) -1l)
53 /* Out-of-memory handler. */
56 __attribute__ ((__noreturn__))
60 _dl_dprintf (_dl_debug_file,
61 "cannot allocate thread-local memory: ABORT\n");
68 void *_dl_memalign(size_t alignment, size_t bytes);
69 void *_dl_memalign(size_t alignment, size_t bytes)
71 return _dl_malloc(bytes);
76 * We are trying to perform a static TLS relocation in MAP, but it was
77 * dynamically loaded. This can only work if there is enough surplus in
78 * the static TLS area already allocated for each running thread. If this
79 * object's TLS segment is too big to fit, we fail. If it fits,
80 * we set MAP->l_tls_offset and return.
81 * This function intentionally does not return any value but signals error
82 * directly, as static TLS should be rare and code handling it should
83 * not be inlined as much as possible.
88 internal_function __attribute_noinline__
89 _dl_allocate_static_tls (struct link_map *map)
91 /* If the alignment requirements are too high fail. */
92 if (map->l_tls_align > _dl_tls_static_align)
95 _dl_dprintf(_dl_debug_file, "cannot allocate memory in static TLS block");
99 # if defined(TLS_TCB_AT_TP)
104 freebytes = _dl_tls_static_size - _dl_tls_static_used - TLS_TCB_SIZE;
106 blsize = map->l_tls_blocksize + map->l_tls_firstbyte_offset;
107 if (freebytes < blsize)
110 n = (freebytes - blsize) / map->l_tls_align;
112 size_t offset = _dl_tls_static_used + (freebytes - n * map->l_tls_align
113 - map->l_tls_firstbyte_offset);
115 map->l_tls_offset = _dl_tls_static_used = offset;
116 # elif defined(TLS_DTV_AT_TP)
120 size_t offset = roundup (_dl_tls_static_used, map->l_tls_align);
121 used = offset + map->l_tls_blocksize;
124 /* dl_tls_static_used includes the TCB at the beginning. */
125 if (check > _dl_tls_static_size)
128 map->l_tls_offset = offset;
129 _dl_tls_static_used = used;
131 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
135 * If the object is not yet relocated we cannot initialize the
136 * static TLS region. Delay it.
138 if (((struct elf_resolve *) map)->init_flag & RELOCS_DONE)
142 * Update the slot information data for at least the generation of
143 * the DSO we are allocating data for.
145 if (__builtin_expect (THREAD_DTV()[0].counter != _dl_tls_generation, 0))
146 (void) _dl_update_slotinfo (map->l_tls_modid);
148 _dl_init_static_tls (map);
151 map->l_need_tls_init = 1;
156 _dl_next_tls_modid (void)
160 if (__builtin_expect (GL(dl_tls_dtv_gaps), false))
163 struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list);
165 /* Note that this branch will never be executed during program
166 start since there are no gaps at that time. Therefore it
167 does not matter that the dl_tls_dtv_slotinfo is not allocated
168 yet when the function is called for the first times.
170 NB: the offset +1 is due to the fact that DTV[0] is used
171 for something else. */
172 result = GL(dl_tls_static_nelem) + 1;
173 if (result <= GL(dl_tls_max_dtv_idx))
176 while (result - disp < runp->len)
178 if (runp->slotinfo[result - disp].map == NULL)
182 assert (result <= GL(dl_tls_max_dtv_idx) + 1);
185 if (result - disp < runp->len)
190 while ((runp = runp->next) != NULL);
192 if (result > GL(dl_tls_max_dtv_idx))
194 /* The new index must indeed be exactly one higher than the
196 assert (result == GL(dl_tls_max_dtv_idx) + 1);
197 /* There is no gap anymore. */
198 GL(dl_tls_dtv_gaps) = false;
205 /* No gaps, allocate a new entry. */
208 result = ++GL(dl_tls_max_dtv_idx);
218 _dl_determine_tlsoffset (void)
220 size_t max_align = TLS_TCB_ALIGN;
222 size_t freebottom = 0;
224 /* The first element of the dtv slot info list is allocated. */
225 assert (GL(dl_tls_dtv_slotinfo_list) != NULL);
226 /* There is at this point only one element in the
227 dl_tls_dtv_slotinfo_list list. */
228 assert (GL(dl_tls_dtv_slotinfo_list)->next == NULL);
230 struct dtv_slotinfo *slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
232 /* Determining the offset of the various parts of the static TLS
233 block has several dependencies. In addition we have to work
234 around bugs in some toolchains.
236 Each TLS block from the objects available at link time has a size
237 and an alignment requirement. The GNU ld computes the alignment
238 requirements for the data at the positions *in the file*, though.
239 I.e, it is not simply possible to allocate a block with the size
240 of the TLS program header entry. The data is layed out assuming
241 that the first byte of the TLS block fulfills
243 p_vaddr mod p_align == &TLS_BLOCK mod p_align
245 This means we have to add artificial padding at the beginning of
246 the TLS block. These bytes are never used for the TLS data in
247 this module but the first byte allocated must be aligned
248 according to mod p_align == 0 so that the first byte of the TLS
249 block is aligned according to p_vaddr mod p_align. This is ugly
250 and the linker can help by computing the offsets in the TLS block
251 assuming the first byte of the TLS block is aligned according to
254 The extra space which might be allocated before the first byte of
255 the TLS block need not go unused. The code below tries to use
256 that memory for the next TLS block. This can work if the total
257 memory requirement for the next TLS block is smaller than the
260 # if defined(TLS_TCB_AT_TP)
261 /* We simply start with zero. */
265 for (cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
267 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
269 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
270 & (slotinfo[cnt].map->l_tls_align - 1));
272 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
274 if (freebottom - freetop >= slotinfo[cnt].map->l_tls_blocksize)
276 off = roundup (freetop + slotinfo[cnt].map->l_tls_blocksize
277 - firstbyte, slotinfo[cnt].map->l_tls_align)
279 if (off <= freebottom)
283 /* XXX For some architectures we perhaps should store the
285 slotinfo[cnt].map->l_tls_offset = off;
290 off = roundup (offset + slotinfo[cnt].map->l_tls_blocksize - firstbyte,
291 slotinfo[cnt].map->l_tls_align) + firstbyte;
292 if (off > offset + slotinfo[cnt].map->l_tls_blocksize
293 + (freebottom - freetop))
296 freebottom = off - slotinfo[cnt].map->l_tls_blocksize;
300 /* XXX For some architectures we perhaps should store the
302 slotinfo[cnt].map->l_tls_offset = off;
305 GL(dl_tls_static_used) = offset;
306 GL(dl_tls_static_size) = (roundup (offset + TLS_STATIC_SURPLUS, max_align)
308 # elif defined(TLS_DTV_AT_TP)
309 /* The TLS blocks start right after the TCB. */
310 size_t offset = TLS_TCB_SIZE;
313 for (cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
315 assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
317 size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
318 & (slotinfo[cnt].map->l_tls_align - 1));
320 max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
322 if (slotinfo[cnt].map->l_tls_blocksize <= freetop - freebottom)
324 off = roundup (freebottom, slotinfo[cnt].map->l_tls_align);
325 if (off - freebottom < firstbyte)
326 off += slotinfo[cnt].map->l_tls_align;
327 if (off + slotinfo[cnt].map->l_tls_blocksize - firstbyte <= freetop)
329 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
330 freebottom = (off + slotinfo[cnt].map->l_tls_blocksize
336 off = roundup (offset, slotinfo[cnt].map->l_tls_align);
337 if (off - offset < firstbyte)
338 off += slotinfo[cnt].map->l_tls_align;
340 slotinfo[cnt].map->l_tls_offset = off - firstbyte;
341 if (off - firstbyte - offset > freetop - freebottom)
344 freetop = off - firstbyte;
347 offset = off + slotinfo[cnt].map->l_tls_blocksize - firstbyte;
350 GL(dl_tls_static_used) = offset;
351 GL(dl_tls_static_size) = roundup (offset + TLS_STATIC_SURPLUS,
354 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
357 /* The alignment requirement for the static TLS block. */
358 GL(dl_tls_static_align) = max_align;
362 /* This is called only when the data structure setup was skipped at startup,
363 when there was no need for it then. Now we have dynamically loaded
364 something needing TLS, or libpthread needs it. */
369 assert (GL(dl_tls_dtv_slotinfo_list) == NULL);
370 assert (GL(dl_tls_max_dtv_idx) == 0);
372 const size_t nelem = 2 + TLS_SLOTINFO_SURPLUS;
374 GL(dl_tls_dtv_slotinfo_list)
375 = calloc (1, (sizeof (struct dtv_slotinfo_list)
376 + nelem * sizeof (struct dtv_slotinfo)));
377 if (GL(dl_tls_dtv_slotinfo_list) == NULL)
380 GL(dl_tls_dtv_slotinfo_list)->len = nelem;
382 /* Number of elements in the static TLS block. It can't be zero
383 because of various assumptions. The one element is null. */
384 GL(dl_tls_static_nelem) = GL(dl_tls_max_dtv_idx) = 1;
386 /* This initializes more variables for us. */
387 _dl_determine_tlsoffset ();
395 allocate_dtv (void *result)
400 /* We allocate a few more elements in the dtv than are needed for the
401 initial set of modules. This should avoid in most cases expansions
403 dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
404 dtv = calloc (dtv_length + 2, sizeof (dtv_t));
407 /* This is the initial length of the dtv. */
408 dtv[0].counter = dtv_length;
410 /* The rest of the dtv (including the generation counter) is
411 Initialize with zero to indicate nothing there. */
413 /* Add the dtv to the thread data structures. */
414 INSTALL_DTV (result, dtv);
423 /* Get size and alignment requirements of the static TLS block. */
426 _dl_get_tls_static_info (size_t *sizep, size_t *alignp)
428 *sizep = GL(dl_tls_static_size);
429 *alignp = GL(dl_tls_static_align);
435 _dl_allocate_tls_storage (void)
438 size_t size = GL(dl_tls_static_size);
440 # if defined(TLS_DTV_AT_TP)
442 [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
443 ^ This should be returned. */
444 size += (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
445 & ~(GL(dl_tls_static_align) - 1);
448 /* Allocate a correctly aligned chunk of memory. */
449 result = _dl_memalign (GL(dl_tls_static_align), size);
450 if (__builtin_expect (result != NULL, 1))
452 /* Allocate the DTV. */
453 void *allocated = result;
455 # if defined(TLS_TCB_AT_TP)
456 /* The TCB follows the TLS blocks. */
457 result = (char *) result + size - TLS_TCB_SIZE;
459 /* Clear the TCB data structure. We can't ask the caller (i.e.
460 libpthread) to do it, because we will initialize the DTV et al. */
461 _dl_memset (result, '\0', TLS_TCB_SIZE);
462 # elif defined(TLS_DTV_AT_TP)
463 result = (char *) result + size - GL(dl_tls_static_size);
465 /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it.
466 We can't ask the caller (i.e. libpthread) to do it, because we will
467 initialize the DTV et al. */
468 _dl_memset ((char *) result - TLS_PRE_TCB_SIZE, '\0',
469 TLS_PRE_TCB_SIZE + TLS_TCB_SIZE);
472 result = allocate_dtv (result);
483 _dl_allocate_tls_init (void *result)
486 /* The memory allocation failed. */
489 dtv_t *dtv = GET_DTV (result);
490 struct dtv_slotinfo_list *listp;
494 /* We have to prepare the dtv for all currently loaded modules using
495 TLS. For those which are dynamically loaded we add the values
496 indicating deferred allocation. */
497 listp = GL(dl_tls_dtv_slotinfo_list);
502 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
504 struct link_map *map;
507 /* Check for the total number of used slots. */
508 if (total + cnt > GL(dl_tls_max_dtv_idx))
511 map = listp->slotinfo[cnt].map;
516 /* Keep track of the maximum generation number. This might
517 not be the generation counter. */
518 maxgen = MAX (maxgen, listp->slotinfo[cnt].gen);
520 if (map->l_tls_offset == NO_TLS_OFFSET)
522 /* For dynamically loaded modules we simply store
523 the value indicating deferred allocation. */
524 dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
525 dtv[map->l_tls_modid].pointer.is_static = false;
529 assert (map->l_tls_modid == cnt);
530 assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
531 # if defined(TLS_TCB_AT_TP)
532 assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
533 dest = (char *) result - map->l_tls_offset;
534 # elif defined(TLS_DTV_AT_TP)
535 dest = (char *) result + map->l_tls_offset;
537 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
540 /* Copy the initialization image and clear the BSS part. */
541 dtv[map->l_tls_modid].pointer.val = dest;
542 dtv[map->l_tls_modid].pointer.is_static = true;
543 _dl_memset (_dl_mempcpy (dest, map->l_tls_initimage,
544 map->l_tls_initimage_size), '\0',
545 map->l_tls_blocksize - map->l_tls_initimage_size);
549 if (total >= GL(dl_tls_max_dtv_idx))
553 assert (listp != NULL);
556 /* The DTV version is up-to-date now. */
557 dtv[0].counter = maxgen;
564 _dl_allocate_tls (void *mem)
566 return _dl_allocate_tls_init (mem == NULL
567 ? _dl_allocate_tls_storage ()
568 : allocate_dtv (mem));
574 _dl_deallocate_tls (void *tcb, bool dealloc_tcb)
576 dtv_t *dtv = GET_DTV (tcb);
579 /* We need to free the memory allocated for non-static TLS. */
580 for (cnt = 0; cnt < dtv[-1].counter; ++cnt)
581 if (! dtv[1 + cnt].pointer.is_static
582 && dtv[1 + cnt].pointer.val != TLS_DTV_UNALLOCATED)
583 free (dtv[1 + cnt].pointer.val);
585 /* The array starts with dtv[-1]. */
587 if (dtv != GL(dl_initial_dtv))
593 # if defined(TLS_TCB_AT_TP)
594 /* The TCB follows the TLS blocks. Back up to free the whole block. */
595 tcb -= GL(dl_tls_static_size) - TLS_TCB_SIZE;
596 # elif defined(TLS_DTV_AT_TP)
597 /* Back up the TLS_PRE_TCB_SIZE bytes. */
598 tcb -= (TLS_PRE_TCB_SIZE + GL(dl_tls_static_align) - 1)
599 & ~(GL(dl_tls_static_align) - 1);
607 /* The __tls_get_addr function has two basic forms which differ in the
608 arguments. The IA-64 form takes two parameters, the module ID and
609 offset. The form used, among others, on IA-32 takes a reference to
610 a special structure which contain the same information. The second
611 form seems to be more often used (in the moment) so we default to
612 it. Users of the IA-64 form have to provide adequate definitions
613 of the following macros. */
614 # ifndef GET_ADDR_ARGS
615 # define GET_ADDR_ARGS tls_index *ti
617 # ifndef GET_ADDR_MODULE
618 # define GET_ADDR_MODULE ti->ti_module
620 # ifndef GET_ADDR_OFFSET
621 # define GET_ADDR_OFFSET ti->ti_offset
626 allocate_and_init (struct link_map *map)
630 newp = _dl_memalign (map->l_tls_align, map->l_tls_blocksize);
634 /* Initialize the memory. */
635 _dl_memset (_dl_mempcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size),
636 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
643 _dl_update_slotinfo (unsigned long int req_modid)
645 struct link_map *the_map = NULL;
646 dtv_t *dtv = THREAD_DTV ();
648 /* The global dl_tls_dtv_slotinfo array contains for each module
649 index the generation counter current when the entry was created.
650 This array never shrinks so that all module indices which were
651 valid at some time can be used to access it. Before the first
652 use of a new module index in this function the array was extended
653 appropriately. Access also does not have to be guarded against
654 modifications of the array. It is assumed that pointer-size
655 values can be read atomically even in SMP environments. It is
656 possible that other threads at the same time dynamically load
657 code and therefore add to the slotinfo list. This is a problem
658 since we must not pick up any information about incomplete work.
659 The solution to this is to ignore all dtv slots which were
660 created after the one we are currently interested. We know that
661 dynamic loading for this module is completed and this is the last
662 load operation we know finished. */
663 unsigned long int idx = req_modid;
664 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
666 while (idx >= listp->len)
672 if (dtv[0].counter < listp->slotinfo[idx].gen)
674 /* The generation counter for the slot is higher than what the
675 current dtv implements. We have to update the whole dtv but
676 only those entries with a generation counter <= the one for
677 the entry we need. */
678 size_t new_gen = listp->slotinfo[idx].gen;
681 /* We have to look through the entire dtv slotinfo list. */
682 listp = GL(dl_tls_dtv_slotinfo_list);
687 for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
689 size_t gen = listp->slotinfo[cnt].gen;
692 /* This is a slot for a generation younger than the
693 one we are handling now. It might be incompletely
694 set up so ignore it. */
697 /* If the entry is older than the current dtv layout we
698 know we don't have to handle it. */
699 if (gen <= dtv[0].counter)
702 /* If there is no map this means the entry is empty. */
703 struct link_map *map = listp->slotinfo[cnt].map;
706 /* If this modid was used at some point the memory
707 might still be allocated. */
708 if (! dtv[total + cnt].pointer.is_static
709 && dtv[total + cnt].pointer.val != TLS_DTV_UNALLOCATED)
711 free (dtv[total + cnt].pointer.val);
712 dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED;
718 /* Check whether the current dtv array is large enough. */
719 size_t modid = map->l_tls_modid;
720 assert (total + cnt == modid);
721 if (dtv[-1].counter < modid)
723 /* Reallocate the dtv. */
725 size_t newsize = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
726 size_t oldsize = dtv[-1].counter;
728 assert (map->l_tls_modid <= newsize);
730 if (dtv == GL(dl_initial_dtv))
732 /* This is the initial dtv that was allocated
733 during rtld startup using the dl-minimal.c
734 malloc instead of the real malloc. We can't
735 free it, we have to abandon the old storage. */
737 newp = malloc ((2 + newsize) * sizeof (dtv_t));
740 _dl_memcpy (newp, &dtv[-1], oldsize * sizeof (dtv_t));
744 newp = realloc (&dtv[-1],
745 (2 + newsize) * sizeof (dtv_t));
750 newp[0].counter = newsize;
752 /* Clear the newly allocated part. */
753 _dl_memset (newp + 2 + oldsize, '\0',
754 (newsize - oldsize) * sizeof (dtv_t));
756 /* Point dtv to the generation counter. */
759 /* Install this new dtv in the thread data
761 INSTALL_NEW_DTV (dtv);
764 /* If there is currently memory allocate for this
765 dtv entry free it. */
766 /* XXX Ideally we will at some point create a memory
768 if (! dtv[modid].pointer.is_static
769 && dtv[modid].pointer.val != TLS_DTV_UNALLOCATED)
770 /* Note that free is called for NULL is well. We
771 deallocate even if it is this dtv entry we are
772 supposed to load. The reason is that we call
773 memalign and not malloc. */
774 free (dtv[modid].pointer.val);
776 /* This module is loaded dynamically- We defer memory
778 dtv[modid].pointer.is_static = false;
779 dtv[modid].pointer.val = TLS_DTV_UNALLOCATED;
781 if (modid == req_modid)
787 while ((listp = listp->next) != NULL);
789 /* This will be the new maximum generation counter. */
790 dtv[0].counter = new_gen;
797 /* The generic dynamic and local dynamic model cannot be used in
798 statically linked applications. */
800 __tls_get_addr (GET_ADDR_ARGS)
802 dtv_t *dtv = THREAD_DTV ();
803 struct link_map *the_map = NULL;
806 if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
807 the_map = _dl_update_slotinfo (GET_ADDR_MODULE);
809 p = dtv[GET_ADDR_MODULE].pointer.val;
811 if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
813 /* The allocation was deferred. Do it now. */
816 /* Find the link map for this module. */
817 size_t idx = GET_ADDR_MODULE;
818 struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
820 while (idx >= listp->len)
826 the_map = listp->slotinfo[idx].map;
829 p = dtv[GET_ADDR_MODULE].pointer.val = allocate_and_init (the_map);
830 dtv[GET_ADDR_MODULE].pointer.is_static = false;
833 return (char *) p + GET_ADDR_OFFSET;
839 void _dl_add_to_slotinfo (struct link_map *l);
841 _dl_add_to_slotinfo (struct link_map *l)
843 /* Now that we know the object is loaded successfully add
844 modules containing TLS data to the dtv info table. We
845 might have to increase its size. */
846 struct dtv_slotinfo_list *listp;
847 struct dtv_slotinfo_list *prevp;
848 size_t idx = l->l_tls_modid;
850 /* Find the place in the dtv slotinfo list. */
851 listp = GL(dl_tls_dtv_slotinfo_list);
852 prevp = NULL; /* Needed to shut up gcc. */
855 /* Does it fit in the array of this list element? */
856 if (idx < listp->len)
862 while (listp != NULL);
866 /* When we come here it means we have to add a new element
867 to the slotinfo list. And the new module must be in
871 listp = prevp->next = (struct dtv_slotinfo_list *)
872 malloc (sizeof (struct dtv_slotinfo_list)
873 + TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
876 /* We ran out of memory. We will simply fail this
877 call but don't undo anything we did so far. The
878 application will crash or be terminated anyway very
881 /* We have to do this since some entries in the dtv
882 slotinfo array might already point to this
884 ++GL(dl_tls_generation);
886 _dl_dprintf (_dl_debug_file,
887 "cannot create TLS data structures: ABORT\n");
891 listp->len = TLS_SLOTINFO_SURPLUS;
893 _dl_memset (listp->slotinfo, '\0',
894 TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
897 /* Add the information into the slotinfo data structure. */
898 listp->slotinfo[idx].map = l;
899 listp->slotinfo[idx].gen = GL(dl_tls_generation) + 1;