1 /* Copyright (C) 2002-2007, 2009 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 #include <sys/param.h>
29 #include <lowlevellock.h>
31 #include <bits/kernel-features.h>
34 #ifndef NEED_SEPARATE_REGISTER_STACK
36 /* Most architectures have exactly one stack pointer. Some have more. */
37 # define STACK_VARIABLES void *stackaddr = NULL
39 /* How to pass the values to the 'create_thread' function. */
40 # define STACK_VARIABLES_ARGS stackaddr
42 /* How to declare function which gets there parameters. */
43 # define STACK_VARIABLES_PARMS void *stackaddr
45 /* How to declare allocate_stack. */
46 # define ALLOCATE_STACK_PARMS void **stack
48 /* This is how the function is called. We do it this way to allow
49 other variants of the function to have more parameters. */
50 # define ALLOCATE_STACK(attr, pd) allocate_stack (attr, pd, &stackaddr)
54 /* We need two stacks. The kernel will place them but we have to tell
55 the kernel about the size of the reserved address space. */
56 # define STACK_VARIABLES void *stackaddr = NULL; size_t stacksize = 0
58 /* How to pass the values to the 'create_thread' function. */
59 # define STACK_VARIABLES_ARGS stackaddr, stacksize
61 /* How to declare function which gets there parameters. */
62 # define STACK_VARIABLES_PARMS void *stackaddr, size_t stacksize
64 /* How to declare allocate_stack. */
65 # define ALLOCATE_STACK_PARMS void **stack, size_t *stacksize
67 /* This is how the function is called. We do it this way to allow
68 other variants of the function to have more parameters. */
69 # define ALLOCATE_STACK(attr, pd) \
70 allocate_stack (attr, pd, &stackaddr, &stacksize)
75 /* Default alignment of stack. */
77 # define STACK_ALIGN __alignof__ (long double)
80 /* Default value for minimal stack size after allocating thread
81 descriptor and guard. */
82 #ifndef MINIMAL_REST_STACK
83 # define MINIMAL_REST_STACK 4096
87 /* Newer kernels have the MAP_STACK flag to indicate a mapping is used for
88 a stack. Use it when possible. */
93 /* This yields the pointer that TLS support code calls the thread pointer. */
94 #if defined(TLS_TCB_AT_TP)
95 # define TLS_TPADJ(pd) (pd)
96 #elif defined(TLS_DTV_AT_TP)
97 # define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))
100 /* Cache handling for not-yet free stacks. */
102 /* Maximum size in kB of cache. */
103 static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default. */
104 static size_t stack_cache_actsize;
106 /* Mutex protecting this variable. */
107 static int stack_cache_lock = LLL_LOCK_INITIALIZER;
109 /* List of queued stack frames. */
110 static LIST_HEAD (stack_cache);
112 /* List of the stacks in use. */
113 static LIST_HEAD (stack_used);
115 /* We need to record what list operations we are going to do so that,
116 in case of an asynchronous interruption due to a fork() call, we
117 can correct for the work. */
118 static uintptr_t in_flight_stack;
120 /* List of the threads with user provided stacks in use. No need to
121 initialize this, since it's done in __pthread_initialize_minimal. */
122 list_t __stack_user __attribute__ ((nocommon));
123 hidden_data_def (__stack_user)
125 #if defined COLORING_INCREMENT && COLORING_INCREMENT != 0
126 /* Number of threads created. */
127 static unsigned int nptl_ncreated;
131 /* Check whether the stack is still used or not. */
132 #define FREE_P(descr) ((descr)->tid <= 0)
136 stack_list_del (list_t *elem)
138 in_flight_stack = (uintptr_t) elem;
140 atomic_write_barrier ();
144 atomic_write_barrier ();
151 stack_list_add (list_t *elem, list_t *list)
153 in_flight_stack = (uintptr_t) elem | 1;
155 atomic_write_barrier ();
157 list_add (elem, list);
159 atomic_write_barrier ();
165 /* We create a double linked list of all cache entries. Double linked
166 because this allows removing entries from the end. */
169 /* Get a stack frame from the cache. We have to match by size since
170 some blocks might be too small or far too large. */
171 static struct pthread *
172 get_cached_stack (size_t *sizep, void **memp)
174 size_t size = *sizep;
175 struct pthread *result = NULL;
178 lll_lock (stack_cache_lock, LLL_PRIVATE);
180 /* Search the cache for a matching entry. We search for the
181 smallest stack which has at least the required size. Note that
182 in normal situations the size of all allocated stacks is the
183 same. As the very least there are only a few different sizes.
184 Therefore this loop will exit early most of the time with an
186 list_for_each (entry, &stack_cache)
188 struct pthread *curr;
190 curr = list_entry (entry, struct pthread, list);
191 if (FREE_P (curr) && curr->stackblock_size >= size)
193 if (curr->stackblock_size == size)
200 || result->stackblock_size > curr->stackblock_size)
205 if (__builtin_expect (result == NULL, 0)
206 /* Make sure the size difference is not too excessive. In that
207 case we do not use the block. */
208 || __builtin_expect (result->stackblock_size > 4 * size, 0))
210 /* Release the lock. */
211 lll_unlock (stack_cache_lock, LLL_PRIVATE);
216 /* Dequeue the entry. */
217 stack_list_del (&result->list);
219 /* And add to the list of stacks in use. */
220 stack_list_add (&result->list, &stack_used);
222 /* And decrease the cache size. */
223 stack_cache_actsize -= result->stackblock_size;
225 /* Release the lock early. */
226 lll_unlock (stack_cache_lock, LLL_PRIVATE);
228 /* Report size and location of the stack to the caller. */
229 *sizep = result->stackblock_size;
230 *memp = result->stackblock;
232 /* Cancellation handling is back to the default. */
233 result->cancelhandling = 0;
234 result->cleanup = NULL;
236 /* No pending event. */
237 result->nextevent = NULL;
240 dtv_t *dtv = GET_DTV (TLS_TPADJ (result));
241 memset (dtv, '\0', (dtv[-1].counter + 1) * sizeof (dtv_t));
243 /* Re-initialize the TLS. */
244 _dl_allocate_tls_init (TLS_TPADJ (result));
250 /* Free stacks until cache size is lower than LIMIT. */
252 __free_stacks (size_t limit)
254 /* We reduce the size of the cache. Remove the last entries until
255 the size is below the limit. */
259 /* Search from the end of the list. */
260 list_for_each_prev_safe (entry, prev, &stack_cache)
262 struct pthread *curr;
264 curr = list_entry (entry, struct pthread, list);
267 /* Unlink the block. */
268 stack_list_del (entry);
270 /* Account for the freed memory. */
271 stack_cache_actsize -= curr->stackblock_size;
273 /* Free the memory associated with the ELF TLS. */
274 _dl_deallocate_tls (TLS_TPADJ (curr), false);
276 /* Remove this block. This should never fail. If it does
277 something is really wrong. */
278 if (munmap (curr->stackblock, curr->stackblock_size) != 0)
281 /* Maybe we have freed enough. */
282 if (stack_cache_actsize <= limit)
289 /* Add a stack frame which is not used anymore to the stack. Must be
290 called with the cache lock held. */
292 __attribute ((always_inline))
293 queue_stack (struct pthread *stack)
295 /* We unconditionally add the stack to the list. The memory may
296 still be in use but it will not be reused until the kernel marks
297 the stack as not used anymore. */
298 stack_list_add (&stack->list, &stack_cache);
300 stack_cache_actsize += stack->stackblock_size;
301 if (__builtin_expect (stack_cache_actsize > stack_cache_maxsize, 0))
302 __free_stacks (stack_cache_maxsize);
308 change_stack_perm (struct pthread *pd
309 #ifdef NEED_SEPARATE_REGISTER_STACK
314 #ifdef NEED_SEPARATE_REGISTER_STACK
315 void *stack = (pd->stackblock
316 + (((((pd->stackblock_size - pd->guardsize) / 2)
317 & pagemask) + pd->guardsize) & pagemask));
318 size_t len = pd->stackblock + pd->stackblock_size - stack;
319 #elif defined _STACK_GROWS_DOWN
320 void *stack = pd->stackblock + pd->guardsize;
321 size_t len = pd->stackblock_size - pd->guardsize;
322 #elif defined _STACK_GROWS_UP
323 void *stack = pd->stackblock;
324 size_t len = (uintptr_t) pd - pd->guardsize - (uintptr_t) pd->stackblock;
326 # error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP"
328 if (mprotect (stack, len, PROT_READ | PROT_WRITE | PROT_EXEC) != 0)
336 allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
337 ALLOCATE_STACK_PARMS)
341 size_t pagesize_m1 = __getpagesize () - 1;
344 assert (attr != NULL);
345 assert (powerof2 (pagesize_m1 + 1));
346 assert (TCB_ALIGNMENT >= STACK_ALIGN);
348 /* Get the stack size from the attribute if it is set. Otherwise we
349 use the default we determined at start time. */
350 size = attr->stacksize ?: __default_stacksize;
352 /* Get memory for the stack. */
353 if (__builtin_expect (attr->flags & ATTR_FLAG_STACKADDR, 0))
357 /* If the user also specified the size of the stack make sure it
359 if (attr->stacksize != 0
360 && attr->stacksize < (__static_tls_size + MINIMAL_REST_STACK))
363 /* Adjust stack size for alignment of the TLS block. */
364 #if defined(TLS_TCB_AT_TP)
365 adj = ((uintptr_t) attr->stackaddr - TLS_TCB_SIZE)
366 & __static_tls_align_m1;
367 assert (size > adj + TLS_TCB_SIZE);
368 #elif defined(TLS_DTV_AT_TP)
369 adj = ((uintptr_t) attr->stackaddr - __static_tls_size)
370 & __static_tls_align_m1;
374 /* The user provided some memory. Let's hope it matches the
375 size... We do not allocate guard pages if the user provided
376 the stack. It is the user's responsibility to do this if it
378 #if defined(TLS_TCB_AT_TP)
379 pd = (struct pthread *) ((uintptr_t) attr->stackaddr
380 - TLS_TCB_SIZE - adj);
381 #elif defined(TLS_DTV_AT_TP)
382 pd = (struct pthread *) (((uintptr_t) attr->stackaddr
383 - __static_tls_size - adj)
387 /* The user provided stack memory needs to be cleared. */
388 memset (pd, '\0', sizeof (struct pthread));
390 /* The first TSD block is included in the TCB. */
391 pd->specific[0] = pd->specific_1stblock;
393 /* Remember the stack-related values. */
394 pd->stackblock = (char *) attr->stackaddr - size;
395 pd->stackblock_size = size;
397 /* This is a user-provided stack. It will not be queued in the
398 stack cache nor will the memory (except the TLS memory) be freed. */
399 pd->user_stack = true;
401 /* This is at least the second thread. */
402 pd->header.multiple_threads = 1;
403 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
404 __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
407 #ifndef __ASSUME_PRIVATE_FUTEX
408 /* The thread must know when private futexes are supported. */
409 pd->header.private_futex = THREAD_GETMEM (THREAD_SELF,
410 header.private_futex);
413 #ifdef NEED_DL_SYSINFO
414 /* Copy the sysinfo value from the parent. */
415 THREAD_SYSINFO(pd) = THREAD_SELF_SYSINFO;
418 /* The process ID is also the same as that of the caller. */
419 pd->pid = THREAD_GETMEM (THREAD_SELF, pid);
421 /* Allocate the DTV for this thread. */
422 if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
424 /* Something went wrong. */
425 assert (errno == ENOMEM);
430 /* Prepare to modify global data. */
431 lll_lock (stack_cache_lock, LLL_PRIVATE);
433 /* And add to the list of stacks in use. */
434 list_add (&pd->list, &__stack_user);
436 lll_unlock (stack_cache_lock, LLL_PRIVATE);
440 /* Allocate some anonymous memory. If possible use the cache. */
444 const int prot = (PROT_READ | PROT_WRITE);
446 #if defined COLORING_INCREMENT && COLORING_INCREMENT != 0
447 /* Add one more page for stack coloring. Don't do it for stacks
448 with 16 times pagesize or larger. This might just cause
449 unnecessary misalignment. */
450 if (size <= 16 * pagesize_m1)
451 size += pagesize_m1 + 1;
454 /* Adjust the stack size for alignment. */
455 size &= ~__static_tls_align_m1;
458 /* Make sure the size of the stack is enough for the guard and
459 eventually the thread descriptor. */
460 guardsize = (attr->guardsize + pagesize_m1) & ~pagesize_m1;
461 if (__builtin_expect (size < ((guardsize + __static_tls_size
462 + MINIMAL_REST_STACK + pagesize_m1)
465 /* The stack is too small (or the guard too large). */
468 /* Try to get a stack from the cache. */
470 pd = get_cached_stack (&size, &mem);
473 /* To avoid aliasing effects on a larger scale than pages we
474 adjust the allocated stack size if necessary. This way
475 allocations directly following each other will not have
476 aliasing problems. */
477 #if defined MULTI_PAGE_ALIASING && MULTI_PAGE_ALIASING != 0
478 if ((size % MULTI_PAGE_ALIASING) == 0)
479 size += pagesize_m1 + 1;
482 mem = mmap (NULL, size, prot,
483 MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
485 if (__builtin_expect (mem == MAP_FAILED, 0))
488 __set_errno (EAGAIN);
493 /* SIZE is guaranteed to be greater than zero.
494 So we can never get a null pointer back from mmap. */
495 assert (mem != NULL);
497 #if defined COLORING_INCREMENT && COLORING_INCREMENT != 0
498 /* Atomically increment NCREATED. */
499 unsigned int ncreated = atomic_increment_val (&nptl_ncreated);
501 /* We chose the offset for coloring by incrementing it for
502 every new thread by a fixed amount. The offset used
503 module the page size. Even if coloring would be better
504 relative to higher alignment values it makes no sense to
505 do it since the mmap() interface does not allow us to
506 specify any alignment for the returned memory block. */
507 size_t coloring = (ncreated * COLORING_INCREMENT) & pagesize_m1;
509 /* Make sure the coloring offsets does not disturb the alignment
510 of the TCB and static TLS block. */
511 if (__builtin_expect ((coloring & __static_tls_align_m1) != 0, 0))
512 coloring = (((coloring + __static_tls_align_m1)
513 & ~(__static_tls_align_m1))
516 /* Unless specified we do not make any adjustments. */
520 /* Place the thread descriptor at the end of the stack. */
521 #if defined(TLS_TCB_AT_TP)
522 pd = (struct pthread *) ((char *) mem + size - coloring) - 1;
523 #elif defined(TLS_DTV_AT_TP)
524 pd = (struct pthread *) ((((uintptr_t) mem + size - coloring
526 & ~__static_tls_align_m1)
530 /* Remember the stack-related values. */
531 pd->stackblock = mem;
532 pd->stackblock_size = size;
534 /* We allocated the first block thread-specific data array.
535 This address will not change for the lifetime of this
537 pd->specific[0] = pd->specific_1stblock;
539 /* This is at least the second thread. */
540 pd->header.multiple_threads = 1;
541 #ifndef TLS_MULTIPLE_THREADS_IN_TCB
542 __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1;
545 #ifndef __ASSUME_PRIVATE_FUTEX
546 /* The thread must know when private futexes are supported. */
547 pd->header.private_futex = THREAD_GETMEM (THREAD_SELF,
548 header.private_futex);
551 #ifdef NEED_DL_SYSINFO
552 /* Copy the sysinfo value from the parent. */
553 THREAD_SYSINFO(pd) = THREAD_SELF_SYSINFO;
556 /* The process ID is also the same as that of the caller. */
557 pd->pid = THREAD_GETMEM (THREAD_SELF, pid);
559 /* Allocate the DTV for this thread. */
560 if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
562 /* Something went wrong. */
563 assert (errno == ENOMEM);
565 /* Free the stack memory we just allocated. */
566 (void) munmap (mem, size);
572 /* Prepare to modify global data. */
573 lll_lock (stack_cache_lock, LLL_PRIVATE);
575 /* And add to the list of stacks in use. */
576 stack_list_add (&pd->list, &stack_used);
578 lll_unlock (stack_cache_lock, LLL_PRIVATE);
581 /* Note that all of the stack and the thread descriptor is
582 zeroed. This means we do not have to initialize fields
583 with initial value zero. This is specifically true for
584 the 'tid' field which is always set back to zero once the
585 stack is not used anymore and for the 'guardsize' field
586 which will be read next. */
589 /* Create or resize the guard area if necessary. */
590 if (__builtin_expect (guardsize > pd->guardsize, 0))
592 #ifdef NEED_SEPARATE_REGISTER_STACK
593 char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1);
594 #elif defined _STACK_GROWS_DOWN
596 #elif defined _STACK_GROWS_UP
597 char *guard = (char *) (((uintptr_t) pd - guardsize) & ~pagesize_m1);
599 if (mprotect (guard, guardsize, PROT_NONE) != 0)
605 lll_lock (stack_cache_lock, LLL_PRIVATE);
607 /* Remove the thread from the list. */
608 stack_list_del (&pd->list);
610 lll_unlock (stack_cache_lock, LLL_PRIVATE);
612 /* Get rid of the TLS block we allocated. */
613 _dl_deallocate_tls (TLS_TPADJ (pd), false);
615 /* Free the stack memory regardless of whether the size
616 of the cache is over the limit or not. If this piece
617 of memory caused problems we better do not use it
618 anymore. Uh, and we ignore possible errors. There
619 is nothing we could do. */
620 (void) munmap (mem, size);
625 pd->guardsize = guardsize;
627 else if (__builtin_expect (pd->guardsize - guardsize > size - reqsize,
630 /* The old guard area is too large. */
632 #ifdef NEED_SEPARATE_REGISTER_STACK
633 char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1);
634 char *oldguard = mem + (((size - pd->guardsize) / 2) & ~pagesize_m1);
637 && mprotect (oldguard, guard - oldguard, prot) != 0)
640 if (mprotect (guard + guardsize,
641 oldguard + pd->guardsize - guard - guardsize,
644 #elif defined _STACK_GROWS_DOWN
645 if (mprotect ((char *) mem + guardsize, pd->guardsize - guardsize,
648 #elif defined _STACK_GROWS_UP
649 if (mprotect ((char *) pd - pd->guardsize,
650 pd->guardsize - guardsize, prot) != 0)
654 pd->guardsize = guardsize;
656 /* The pthread_getattr_np() calls need to get passed the size
657 requested in the attribute, regardless of how large the
658 actually used guardsize is. */
659 pd->reported_guardsize = guardsize;
662 /* Initialize the lock. We have to do this unconditionally since the
663 stillborn thread could be canceled while the lock is taken. */
664 pd->lock = LLL_LOCK_INITIALIZER;
666 /* The robust mutex lists also need to be initialized
667 unconditionally because the cleanup for the previous stack owner
668 might have happened in the kernel. */
669 pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
670 - offsetof (pthread_mutex_t,
671 __data.__list.__next));
672 pd->robust_head.list_op_pending = NULL;
673 #ifdef __PTHREAD_MUTEX_HAVE_PREV
674 pd->robust_prev = &pd->robust_head;
676 pd->robust_head.list = &pd->robust_head;
678 /* We place the thread descriptor at the end of the stack. */
681 #if defined(TLS_TCB_AT_TP)
682 /* The stack begins before the TCB and the static TLS block. */
683 stacktop = ((char *) (pd + 1) - __static_tls_size);
684 #elif defined(TLS_DTV_AT_TP)
685 stacktop = (char *) (pd - 1);
688 #ifdef NEED_SEPARATE_REGISTER_STACK
689 *stack = pd->stackblock;
690 *stacksize = stacktop - *stack;
691 #elif defined _STACK_GROWS_DOWN
693 #elif defined _STACK_GROWS_UP
694 *stack = pd->stackblock;
704 __deallocate_stack (struct pthread *pd)
706 lll_lock (stack_cache_lock, LLL_PRIVATE);
708 /* Remove the thread from the list of threads with user defined
710 stack_list_del (&pd->list);
712 /* Not much to do. Just free the mmap()ed memory. Note that we do
713 not reset the 'used' flag in the 'tid' field. This is done by
714 the kernel. If no thread has been created yet this field is
716 if (__builtin_expect (! pd->user_stack, 1))
717 (void) queue_stack (pd);
719 /* Free the memory associated with the ELF TLS. */
720 _dl_deallocate_tls (TLS_TPADJ (pd), false);
722 lll_unlock (stack_cache_lock, LLL_PRIVATE);
728 __make_stacks_executable (void **stack_endp)
730 /* First the main thread's stack. */
735 #ifdef NEED_SEPARATE_REGISTER_STACK
736 const size_t pagemask = ~(__getpagesize () - 1);
739 lll_lock (stack_cache_lock, LLL_PRIVATE);
742 list_for_each (runp, &stack_used)
744 err = change_stack_perm (list_entry (runp, struct pthread, list)
745 #ifdef NEED_SEPARATE_REGISTER_STACK
753 /* Also change the permission for the currently unused stacks. This
754 might be wasted time but better spend it here than adding a check
757 list_for_each (runp, &stack_cache)
759 err = change_stack_perm (list_entry (runp, struct pthread, list)
760 #ifdef NEED_SEPARATE_REGISTER_STACK
768 lll_unlock (stack_cache_lock, LLL_PRIVATE);
774 /* In case of a fork() call the memory allocation in the child will be
775 the same but only one thread is running. All stacks except that of
776 the one running thread are not used anymore. We have to recycle
779 __reclaim_stacks (void)
781 struct pthread *self = (struct pthread *) THREAD_SELF;
783 /* No locking necessary. The caller is the only stack in use. But
784 we have to be aware that we might have interrupted a list
787 if (in_flight_stack != 0)
789 bool add_p = in_flight_stack & 1;
790 list_t *elem = (list_t *) (in_flight_stack & ~UINTMAX_C (1));
794 /* We always add at the beginning of the list. So in this
795 case we only need to check the beginning of these lists. */
796 int check_list (list_t *l)
798 if (l->next->prev != l)
800 assert (l->next->prev == elem);
802 elem->next = l->next;
812 if (check_list (&stack_used) == 0)
813 (void) check_list (&stack_cache);
817 /* We can simply always replay the delete operation. */
818 elem->next->prev = elem->prev;
819 elem->prev->next = elem->next;
823 /* Mark all stacks except the still running one as free. */
825 list_for_each (runp, &stack_used)
827 struct pthread *curp = list_entry (runp, struct pthread, list);
830 /* This marks the stack as free. */
833 /* The PID field must be initialized for the new process. */
834 curp->pid = self->pid;
836 /* Account for the size of the stack. */
837 stack_cache_actsize += curp->stackblock_size;
839 if (curp->specific_used)
841 /* Clear the thread-specific data. */
842 memset (curp->specific_1stblock, '\0',
843 sizeof (curp->specific_1stblock));
845 curp->specific_used = false;
848 for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
849 if (curp->specific[cnt] != NULL)
851 memset (curp->specific[cnt], '\0',
852 sizeof (curp->specific_1stblock));
854 /* We have allocated the block which we do not
855 free here so re-set the bit. */
856 curp->specific_used = true;
862 /* Reset the PIDs in any cached stacks. */
863 list_for_each (runp, &stack_cache)
865 struct pthread *curp = list_entry (runp, struct pthread, list);
866 curp->pid = self->pid;
869 /* Add the stack of all running threads to the cache. */
870 list_splice (&stack_used, &stack_cache);
872 /* Remove the entry for the current thread to from the cache list
873 and add it to the list of running threads. Which of the two
874 lists is decided by the user_stack flag. */
875 stack_list_del (&self->list);
877 /* Re-initialize the lists for all the threads. */
878 INIT_LIST_HEAD (&stack_used);
879 INIT_LIST_HEAD (&__stack_user);
881 if (__builtin_expect (THREAD_GETMEM (self, user_stack), 0))
882 list_add (&self->list, &__stack_user);
884 list_add (&self->list, &stack_used);
886 /* There is one thread running. */
891 /* Initialize the lock. */
892 stack_cache_lock = LLL_LOCK_INITIALIZER;
897 # undef __find_thread_by_id
898 /* Find a thread given the thread ID. */
901 __find_thread_by_id (pid_t tid)
903 struct pthread *result = NULL;
905 lll_lock (stack_cache_lock, LLL_PRIVATE);
907 /* Iterate over the list with system-allocated threads first. */
909 list_for_each (runp, &stack_used)
911 struct pthread *curp;
913 curp = list_entry (runp, struct pthread, list);
915 if (curp->tid == tid)
922 /* Now the list with threads using user-allocated stacks. */
923 list_for_each (runp, &__stack_user)
925 struct pthread *curp;
927 curp = list_entry (runp, struct pthread, list);
929 if (curp->tid == tid)
937 lll_unlock (stack_cache_lock, LLL_PRIVATE);
946 setxid_mark_thread (struct xid_command *cmdp, struct pthread *t)
950 /* Don't let the thread exit before the setxid handler runs. */
955 ch = t->cancelhandling;
957 /* If the thread is exiting right now, ignore it. */
958 if ((ch & EXITING_BITMASK) != 0)
961 while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
962 ch | SETXID_BITMASK, ch));
968 setxid_unmark_thread (struct xid_command *cmdp, struct pthread *t)
974 ch = t->cancelhandling;
975 if ((ch & SETXID_BITMASK) == 0)
978 while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
979 ch & ~SETXID_BITMASK, ch));
981 /* Release the futex just in case. */
983 lll_futex_wake (&t->setxid_futex, 1, LLL_PRIVATE);
989 setxid_signal_thread (struct xid_command *cmdp, struct pthread *t)
991 if ((t->cancelhandling & SETXID_BITMASK) == 0)
995 INTERNAL_SYSCALL_DECL (err);
996 #if defined (__ASSUME_TGKILL) && __ASSUME_TGKILL
997 val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
1001 val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
1003 if (INTERNAL_SYSCALL_ERROR_P (val, err)
1004 && INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS)
1006 val = INTERNAL_SYSCALL (tkill, err, 2, t->tid, SIGSETXID);
1009 /* If this failed, it must have had not started yet or else exited. */
1010 if (!INTERNAL_SYSCALL_ERROR_P (val, err))
1012 atomic_increment (&cmdp->cntr);
1022 __nptl_setxid (struct xid_command *cmdp)
1026 lll_lock (stack_cache_lock, LLL_PRIVATE);
1031 struct pthread *self = THREAD_SELF;
1033 /* Iterate over the list with system-allocated threads first. */
1035 list_for_each (runp, &stack_used)
1037 struct pthread *t = list_entry (runp, struct pthread, list);
1041 setxid_mark_thread (cmdp, t);
1044 /* Now the list with threads using user-allocated stacks. */
1045 list_for_each (runp, &__stack_user)
1047 struct pthread *t = list_entry (runp, struct pthread, list);
1051 setxid_mark_thread (cmdp, t);
1054 /* Iterate until we don't succeed in signalling anyone. That means
1055 we have gotten all running threads, and their children will be
1056 automatically correct once started. */
1061 list_for_each (runp, &stack_used)
1063 struct pthread *t = list_entry (runp, struct pthread, list);
1067 signalled += setxid_signal_thread (cmdp, t);
1070 list_for_each (runp, &__stack_user)
1072 struct pthread *t = list_entry (runp, struct pthread, list);
1076 signalled += setxid_signal_thread (cmdp, t);
1079 int cur = cmdp->cntr;
1082 lll_futex_wait (&cmdp->cntr, cur, LLL_PRIVATE);
1086 while (signalled != 0);
1088 /* Clean up flags, so that no thread blocks during exit waiting
1089 for a signal which will never come. */
1090 list_for_each (runp, &stack_used)
1092 struct pthread *t = list_entry (runp, struct pthread, list);
1096 setxid_unmark_thread (cmdp, t);
1099 list_for_each (runp, &__stack_user)
1101 struct pthread *t = list_entry (runp, struct pthread, list);
1105 setxid_unmark_thread (cmdp, t);
1108 /* This must be last, otherwise the current thread might not have
1109 permissions to send SIGSETXID syscall to the other threads. */
1110 INTERNAL_SYSCALL_DECL (err);
1111 result = INTERNAL_SYSCALL_NCS (cmdp->syscall_no, err, 3,
1112 cmdp->id[0], cmdp->id[1], cmdp->id[2]);
1113 if (INTERNAL_SYSCALL_ERROR_P (result, err))
1115 __set_errno (INTERNAL_SYSCALL_ERRNO (result, err));
1119 lll_unlock (stack_cache_lock, LLL_PRIVATE);
1123 static inline void __attribute__((always_inline))
1124 init_one_static_tls (struct pthread *curp, struct link_map *map)
1126 dtv_t *dtv = GET_DTV (TLS_TPADJ (curp));
1127 # if defined(TLS_TCB_AT_TP)
1128 void *dest = (char *) curp - map->l_tls_offset;
1129 # elif defined(TLS_DTV_AT_TP)
1130 void *dest = (char *) curp + map->l_tls_offset + TLS_PRE_TCB_SIZE;
1132 # error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
1135 /* Fill in the DTV slot so that a later LD/GD access will find it. */
1136 dtv[map->l_tls_modid].pointer.val = dest;
1137 dtv[map->l_tls_modid].pointer.is_static = true;
1139 /* Initialize the memory. */
1140 memset (mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size),
1141 '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
1146 __pthread_init_static_tls (struct link_map *map)
1148 lll_lock (stack_cache_lock, LLL_PRIVATE);
1150 /* Iterate over the list with system-allocated threads first. */
1152 list_for_each (runp, &stack_used)
1153 init_one_static_tls (list_entry (runp, struct pthread, list), map);
1155 /* Now the list with threads using user-allocated stacks. */
1156 list_for_each (runp, &__stack_user)
1157 init_one_static_tls (list_entry (runp, struct pthread, list), map);
1159 lll_unlock (stack_cache_lock, LLL_PRIVATE);
1165 __wait_lookup_done (void)
1167 lll_lock (stack_cache_lock, LLL_PRIVATE);
1169 struct pthread *self = THREAD_SELF;
1171 /* Iterate over the list with system-allocated threads first. */
1173 list_for_each (runp, &stack_used)
1175 struct pthread *t = list_entry (runp, struct pthread, list);
1176 if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
1179 int *const gscope_flagp = &t->header.gscope_flag;
1181 /* We have to wait until this thread is done with the global
1182 scope. First tell the thread that we are waiting and
1183 possibly have to be woken. */
1184 if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
1185 THREAD_GSCOPE_FLAG_WAIT,
1186 THREAD_GSCOPE_FLAG_USED))
1190 lll_futex_wait (gscope_flagp, THREAD_GSCOPE_FLAG_WAIT, LLL_PRIVATE);
1191 while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
1194 /* Now the list with threads using user-allocated stacks. */
1195 list_for_each (runp, &__stack_user)
1197 struct pthread *t = list_entry (runp, struct pthread, list);
1198 if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
1201 int *const gscope_flagp = &t->header.gscope_flag;
1203 /* We have to wait until this thread is done with the global
1204 scope. First tell the thread that we are waiting and
1205 possibly have to be woken. */
1206 if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
1207 THREAD_GSCOPE_FLAG_WAIT,
1208 THREAD_GSCOPE_FLAG_USED))
1212 lll_futex_wait (gscope_flagp, THREAD_GSCOPE_FLAG_WAIT, LLL_PRIVATE);
1213 while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
1216 lll_unlock (stack_cache_lock, LLL_PRIVATE);