1 /* Linuxthreads - a simple clone()-based implementation of Posix */
2 /* threads for Linux. */
3 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
5 /* This program is free software; you can redistribute it and/or */
6 /* modify it under the terms of the GNU Library General Public License */
7 /* as published by the Free Software Foundation; either version 2 */
8 /* of the License, or (at your option) any later version. */
10 /* This program is distributed in the hope that it will be useful, */
11 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
12 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
13 /* GNU Library General Public License for more details. */
15 /* The "thread manager" thread: manages creation and termination of threads */
25 #include <sys/poll.h> /* for poll */
26 #include <sys/mman.h> /* for mmap */
27 #include <sys/param.h>
29 #include <sys/wait.h> /* for waitpid macros */
30 #include <locale.h> /* for __uselocale */
31 #include <resolv.h> /* for __resp */
34 #include "internals.h"
37 #include "semaphore.h"
38 #include <not-cancel.h>
41 #if !(USE_TLS && HAVE___THREAD) && defined __UCLIBC_HAS_XLOCALE__
42 #define __uselocale(x) uselocale(x)
45 /* For debugging purposes put the maximum number of threads in a variable. */
46 const int __linuxthreads_pthread_threads_max = PTHREAD_THREADS_MAX;
49 /* Indicate whether at least one thread has a user-defined stack (if 1),
50 or if all threads have stacks supplied by LinuxThreads (if 0). */
51 int __pthread_nonstandard_stacks;
54 /* Number of active entries in __pthread_handles (used by gdb) */
55 volatile int __pthread_handles_num = 2;
57 /* Whether to use debugger additional actions for thread creation
59 volatile int __pthread_threads_debug;
61 /* Globally enabled events. */
62 volatile td_thr_events_t __pthread_threads_events;
64 /* Pointer to thread descriptor with last event. */
65 volatile pthread_descr __pthread_last_event;
67 static pthread_descr manager_thread;
69 /* Mapping from stack segment to thread descriptor. */
70 /* Stack segment numbers are also indices into the __pthread_handles array. */
71 /* Stack segment number 0 is reserved for the initial thread. */
74 # define thread_segment(seq) NULL
76 static inline pthread_descr thread_segment(int seg)
78 # ifdef _STACK_GROWS_UP
79 return (pthread_descr)(THREAD_STACK_START_ADDRESS + (seg - 1) * STACK_SIZE)
82 return (pthread_descr)(THREAD_STACK_START_ADDRESS - (seg - 1) * STACK_SIZE)
88 /* Flag set in signal handler to record child termination */
90 static volatile int terminated_children;
92 /* Flag set when the initial thread is blocked on pthread_exit waiting
93 for all other threads to terminate */
95 static int main_thread_exiting;
97 /* Counter used to generate unique thread identifier.
98 Thread identifier is pthread_threads_counter + segment. */
100 static pthread_t pthread_threads_counter;
102 /* Forward declarations */
104 static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
105 void * (*start_routine)(void *), void *arg,
106 sigset_t *mask, int father_pid,
108 td_thr_events_t *event_maskp);
109 static void pthread_handle_free(pthread_t th_id);
110 static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode)
111 __attribute__ ((noreturn));
112 static void pthread_reap_children(void);
113 static void pthread_kill_all_threads(int sig, int main_thread_also);
114 static void pthread_for_each_thread(void *arg,
115 void (*fn)(void *, pthread_descr));
117 /* The server thread managing requests for thread creation and termination */
120 __attribute__ ((noreturn))
121 __pthread_manager(void *arg)
123 pthread_descr self = manager_thread = arg;
124 int reqfd = __pthread_manager_reader;
126 sigset_t manager_mask;
128 struct pthread_request request;
130 /* If we have special thread_self processing, initialize it. */
131 #ifdef INIT_THREAD_SELF
132 INIT_THREAD_SELF(self, 1);
134 #if !(USE_TLS && HAVE___THREAD)
135 /* Set the error variable. */
136 self->p_errnop = &self->p_errno;
137 self->p_h_errnop = &self->p_h_errno;
139 /* Block all signals except __pthread_sig_cancel and SIGTRAP */
140 sigfillset(&manager_mask);
141 sigdelset(&manager_mask, __pthread_sig_cancel); /* for thread termination */
142 sigdelset(&manager_mask, SIGTRAP); /* for debugging purposes */
143 if (__pthread_threads_debug && __pthread_sig_debug > 0)
144 sigdelset(&manager_mask, __pthread_sig_debug);
145 sigprocmask(SIG_SETMASK, &manager_mask, NULL);
146 /* Raise our priority to match that of main thread */
147 __pthread_manager_adjust_prio(__pthread_main_thread->p_priority);
148 /* Synchronize debugging of the thread manager */
149 n = TEMP_FAILURE_RETRY(read_not_cancel(reqfd, (char *)&request,
151 ASSERT(n == sizeof(request) && request.req_kind == REQ_DEBUG);
154 /* Enter server loop */
156 n = __poll(&ufd, 1, 2000);
158 /* Check for termination of the main thread */
159 if (getppid() == 1) {
160 pthread_kill_all_threads(SIGKILL, 0);
163 /* Check for dead children */
164 if (terminated_children) {
165 terminated_children = 0;
166 pthread_reap_children();
168 /* Read and execute request */
169 if (n == 1 && (ufd.revents & POLLIN)) {
170 n = TEMP_FAILURE_RETRY(read_not_cancel(reqfd, (char *)&request,
175 write(STDERR_FILENO, d, snprintf(d, sizeof(d), "*** read err %m\n"));
176 } else if (n != sizeof(request)) {
177 write(STDERR_FILENO, "*** short read in manager\n", 26);
181 switch(request.req_kind) {
183 request.req_thread->p_retcode =
184 pthread_handle_create((pthread_t *) &request.req_thread->p_retval,
185 request.req_args.create.attr,
186 request.req_args.create.fn,
187 request.req_args.create.arg,
188 &request.req_args.create.mask,
189 request.req_thread->p_pid,
190 request.req_thread->p_report_events,
191 &request.req_thread->p_eventbuf.eventmask);
192 restart(request.req_thread);
195 pthread_handle_free(request.req_args.free.thread_id);
197 case REQ_PROCESS_EXIT:
198 pthread_handle_exit(request.req_thread,
199 request.req_args.exit.code);
202 case REQ_MAIN_THREAD_EXIT:
203 main_thread_exiting = 1;
204 /* Reap children in case all other threads died and the signal handler
205 went off before we set main_thread_exiting to 1, and therefore did
207 pthread_reap_children();
209 if (__pthread_main_thread->p_nextlive == __pthread_main_thread) {
210 restart(__pthread_main_thread);
211 /* The main thread will now call exit() which will trigger an
212 __on_exit handler, which in turn will send REQ_PROCESS_EXIT
213 to the thread manager. In case you are wondering how the
214 manager terminates from its loop here. */
218 sem_post(request.req_args.post);
221 /* Make gdb aware of new thread and gdb will restart the
222 new thread when it is ready to handle the new thread. */
223 if (__pthread_threads_debug && __pthread_sig_debug > 0)
224 raise(__pthread_sig_debug);
227 /* This is just a prod to get the manager to reap some
228 threads right away, avoiding a potential delay at shutdown. */
230 case REQ_FOR_EACH_THREAD:
231 pthread_for_each_thread(request.req_args.for_each.arg,
232 request.req_args.for_each.fn);
233 restart(request.req_thread);
240 int __pthread_manager_event(void *arg)
242 pthread_descr self = arg;
243 /* If we have special thread_self processing, initialize it. */
244 #ifdef INIT_THREAD_SELF
245 INIT_THREAD_SELF(self, 1);
248 /* Get the lock the manager will free once all is correctly set up. */
249 __pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
250 /* Free it immediately. */
251 __pthread_unlock (THREAD_GETMEM(self, p_lock));
253 return __pthread_manager(arg);
256 /* Process creation */
259 __attribute__ ((noreturn))
260 pthread_start_thread(void *arg)
262 pthread_descr self = (pthread_descr) arg;
263 struct pthread_request request;
266 hp_timing_t tmpclock;
268 /* Initialize special thread_self processing, if any. */
269 #ifdef INIT_THREAD_SELF
270 INIT_THREAD_SELF(self, self->p_nr);
273 HP_TIMING_NOW (tmpclock);
274 THREAD_SETMEM (self, p_cpuclock_offset, tmpclock);
276 /* Make sure our pid field is initialized, just in case we get there
277 before our father has initialized it. */
278 THREAD_SETMEM(self, p_pid, __getpid());
279 /* Initial signal mask is that of the creating thread. (Otherwise,
280 we'd just inherit the mask of the thread manager.) */
281 sigprocmask(SIG_SETMASK, &self->p_start_args.mask, NULL);
282 /* Set the scheduling policy and priority for the new thread, if needed */
283 if (THREAD_GETMEM(self, p_start_args.schedpolicy) >= 0)
284 /* Explicit scheduling attributes were provided: apply them */
285 __sched_setscheduler(THREAD_GETMEM(self, p_pid),
286 THREAD_GETMEM(self, p_start_args.schedpolicy),
287 &self->p_start_args.schedparam);
288 else if (manager_thread->p_priority > 0)
289 /* Default scheduling required, but thread manager runs in realtime
290 scheduling: switch new thread to SCHED_OTHER policy */
292 struct sched_param default_params;
293 default_params.sched_priority = 0;
294 __sched_setscheduler(THREAD_GETMEM(self, p_pid),
295 SCHED_OTHER, &default_params);
297 #if !(USE_TLS && HAVE___THREAD)
298 /* Initialize thread-locale current locale to point to the global one.
299 With __thread support, the variable's initializer takes care of this. */
300 #ifdef __UCLIBC_HAS_XLOCALE__
301 __uselocale (LC_GLOBAL_LOCALE);
304 /* Initialize __resp. */
305 __resp = &self->p_res;
307 /* Make gdb aware of new thread */
308 if (__pthread_threads_debug && __pthread_sig_debug > 0) {
309 request.req_thread = self;
310 request.req_kind = REQ_DEBUG;
311 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
312 (char *) &request, sizeof(request)));
315 /* Run the thread code */
316 outcome = self->p_start_args.start_routine(THREAD_GETMEM(self,
318 /* Exit with the given return value */
319 __pthread_do_exit(outcome, CURRENT_STACK_FRAME);
323 __attribute__ ((noreturn))
324 pthread_start_thread_event(void *arg)
326 pthread_descr self = (pthread_descr) arg;
328 #ifdef INIT_THREAD_SELF
329 INIT_THREAD_SELF(self, self->p_nr);
331 /* Make sure our pid field is initialized, just in case we get there
332 before our father has initialized it. */
333 THREAD_SETMEM(self, p_pid, __getpid());
334 /* Get the lock the manager will free once all is correctly set up. */
335 __pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
336 /* Free it immediately. */
337 __pthread_unlock (THREAD_GETMEM(self, p_lock));
339 /* Continue with the real function. */
340 pthread_start_thread (arg);
343 #if defined USE_TLS && !FLOATING_STACKS
344 # error "TLS can only work with floating stacks"
347 static int pthread_allocate_stack(const pthread_attr_t *attr,
348 pthread_descr default_new_thread,
350 char ** out_new_thread,
351 char ** out_new_thread_bottom,
352 char ** out_guardaddr,
353 size_t * out_guardsize,
354 size_t * out_stacksize)
356 pthread_descr new_thread;
357 char * new_thread_bottom;
359 size_t stacksize, guardsize;
362 /* TLS cannot work with fixed thread descriptor addresses. */
363 assert (default_new_thread == NULL);
366 if (attr != NULL && attr->__stackaddr_set)
368 #ifdef _STACK_GROWS_UP
369 /* The user provided a stack. */
371 /* This value is not needed. */
372 new_thread = (pthread_descr) attr->__stackaddr;
373 new_thread_bottom = (char *) new_thread;
375 new_thread = (pthread_descr) attr->__stackaddr;
376 new_thread_bottom = (char *) (new_thread + 1);
378 guardaddr = attr->__stackaddr + attr->__stacksize;
381 /* The user provided a stack. For now we interpret the supplied
382 address as 1 + the highest addr. in the stack segment. If a
383 separate register stack is needed, we place it at the low end
384 of the segment, relying on the associated stacksize to
385 determine the low end of the segment. This differs from many
386 (but not all) other pthreads implementations. The intent is
387 that on machines with a single stack growing toward higher
388 addresses, stackaddr would be the lowest address in the stack
389 segment, so that it is consistently close to the initial sp
392 new_thread = (pthread_descr) attr->__stackaddr;
395 (pthread_descr) ((long)(attr->__stackaddr) & -sizeof(void *)) - 1;
397 new_thread_bottom = (char *) attr->__stackaddr - attr->__stacksize;
398 guardaddr = new_thread_bottom;
402 __pthread_nonstandard_stacks = 1;
405 /* Clear the thread data structure. */
406 memset (new_thread, '\0', sizeof (*new_thread));
408 stacksize = attr->__stacksize;
412 #ifdef NEED_SEPARATE_REGISTER_STACK
413 const size_t granularity = 2 * pagesize;
414 /* Try to make stacksize/2 a multiple of pagesize */
416 const size_t granularity = pagesize;
420 /* Allocate space for stack and thread descriptor at default address */
424 guardsize = page_roundup (attr->__guardsize, granularity);
425 stacksize = __pthread_max_stacksize - guardsize;
426 stacksize = MIN (stacksize,
427 page_roundup (attr->__stacksize, granularity));
431 guardsize = granularity;
432 stacksize = __pthread_max_stacksize - guardsize;
435 map_addr = mmap(NULL, stacksize + guardsize,
436 PROT_READ | PROT_WRITE | PROT_EXEC,
437 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
438 if (map_addr == MAP_FAILED)
439 /* No more memory available. */
442 # ifdef NEED_SEPARATE_REGISTER_STACK
443 guardaddr = map_addr + stacksize / 2;
445 mprotect (guardaddr, guardsize, PROT_NONE);
447 new_thread_bottom = (char *) map_addr;
449 new_thread = ((pthread_descr) (new_thread_bottom + stacksize
452 new_thread = ((pthread_descr) (new_thread_bottom + stacksize
455 # elif _STACK_GROWS_DOWN
456 guardaddr = map_addr;
458 mprotect (guardaddr, guardsize, PROT_NONE);
460 new_thread_bottom = (char *) map_addr + guardsize;
462 new_thread = ((pthread_descr) (new_thread_bottom + stacksize));
464 new_thread = ((pthread_descr) (new_thread_bottom + stacksize)) - 1;
466 # elif _STACK_GROWS_UP
467 guardaddr = map_addr + stacksize;
469 mprotect (guardaddr, guardsize, PROT_NONE);
471 new_thread = (pthread_descr) map_addr;
473 new_thread_bottom = (char *) new_thread;
475 new_thread_bottom = (char *) (new_thread + 1);
478 # error You must define a stack direction
479 # endif /* Stack direction */
480 #else /* !FLOATING_STACKS */
481 # if !defined NEED_SEPARATE_REGISTER_STACK && defined _STACK_GROWS_DOWN
487 guardsize = page_roundup (attr->__guardsize, granularity);
488 stacksize = STACK_SIZE - guardsize;
489 stacksize = MIN (stacksize,
490 page_roundup (attr->__stacksize, granularity));
494 guardsize = granularity;
495 stacksize = STACK_SIZE - granularity;
498 # ifdef NEED_SEPARATE_REGISTER_STACK
499 new_thread = default_new_thread;
500 new_thread_bottom = (char *) (new_thread + 1) - stacksize - guardsize;
501 /* Includes guard area, unlike the normal case. Use the bottom
502 end of the segment as backing store for the register stack.
503 Needed on IA64. In this case, we also map the entire stack at
504 once. According to David Mosberger, that's cheaper. It also
505 avoids the risk of intermittent failures due to other mappings
506 in the same region. The cost is that we might be able to map
507 slightly fewer stacks. */
509 /* First the main stack: */
510 map_addr = (caddr_t)((char *)(new_thread + 1) - stacksize / 2);
511 res_addr = mmap(map_addr, stacksize / 2,
512 PROT_READ | PROT_WRITE | PROT_EXEC,
513 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
514 if (res_addr != map_addr)
516 /* Bad luck, this segment is already mapped. */
517 if (res_addr != MAP_FAILED)
518 munmap(res_addr, stacksize / 2);
521 /* Then the register stack: */
522 map_addr = (caddr_t)new_thread_bottom;
523 res_addr = mmap(map_addr, stacksize/2,
524 PROT_READ | PROT_WRITE | PROT_EXEC,
525 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
526 if (res_addr != map_addr)
528 if (res_addr != MAP_FAILED)
529 munmap(res_addr, stacksize / 2);
530 munmap((caddr_t)((char *)(new_thread + 1) - stacksize/2),
535 guardaddr = new_thread_bottom + stacksize/2;
536 /* We leave the guard area in the middle unmapped. */
537 # else /* !NEED_SEPARATE_REGISTER_STACK */
538 # ifdef _STACK_GROWS_DOWN
539 new_thread = default_new_thread;
540 new_thread_bottom = (char *) (new_thread + 1) - stacksize;
541 map_addr = new_thread_bottom - guardsize;
542 res_addr = mmap(map_addr, stacksize + guardsize,
543 PROT_READ | PROT_WRITE | PROT_EXEC,
544 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
545 if (res_addr != map_addr)
547 /* Bad luck, this segment is already mapped. */
548 if (res_addr != MAP_FAILED)
549 munmap (res_addr, stacksize + guardsize);
553 /* We manage to get a stack. Protect the guard area pages if
555 guardaddr = map_addr;
557 mprotect (guardaddr, guardsize, PROT_NONE);
559 /* The thread description goes at the bottom of this area, and
560 * the stack starts directly above it.
562 new_thread = (pthread_descr)((unsigned long)default_new_thread &~ (STACK_SIZE - 1));
563 map_addr = mmap(new_thread, stacksize + guardsize,
564 PROT_READ | PROT_WRITE | PROT_EXEC,
565 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
566 if (map_addr == MAP_FAILED)
569 new_thread_bottom = map_addr + sizeof(*new_thread);
570 guardaddr = map_addr + stacksize;
572 mprotect (guardaddr, guardsize, PROT_NONE);
574 # endif /* stack direction */
575 # endif /* !NEED_SEPARATE_REGISTER_STACK */
576 #endif /* !FLOATING_STACKS */
578 *out_new_thread = (char *) new_thread;
579 *out_new_thread_bottom = new_thread_bottom;
580 *out_guardaddr = guardaddr;
581 *out_guardsize = guardsize;
582 #ifdef NEED_SEPARATE_REGISTER_STACK
583 *out_stacksize = stacksize / 2;
585 *out_stacksize = stacksize;
590 static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
591 void * (*start_routine)(void *), void *arg,
592 sigset_t * mask, int father_pid,
594 td_thr_events_t *event_maskp)
598 pthread_descr new_thread;
600 char * new_thread_bottom;
601 pthread_t new_thread_id;
602 char *guardaddr = NULL;
603 size_t guardsize = 0, stksize = 0;
604 int pagesize = __getpagesize();
608 new_thread = _dl_allocate_tls (NULL);
609 if (new_thread == NULL)
612 /* pthread_descr is below TP. */
613 new_thread = (pthread_descr) ((char *) new_thread - TLS_PRE_TCB_SIZE);
616 /* Prevent warnings. */
620 /* First check whether we have to change the policy and if yes, whether
621 we can do this. Normally this should be done by examining the
622 return value of the __sched_setscheduler call in pthread_start_thread
623 but this is hard to implement. FIXME */
624 if (attr != NULL && attr->__schedpolicy != SCHED_OTHER && geteuid () != 0)
626 /* Find a free segment for the thread, and allocate a stack if needed */
627 for (sseg = 2; ; sseg++)
629 if (sseg >= PTHREAD_THREADS_MAX)
633 new_thread = (pthread_descr) ((char *) new_thread + TLS_PRE_TCB_SIZE);
635 _dl_deallocate_tls (new_thread, true);
639 if (__pthread_handles[sseg].h_descr != NULL)
641 if (pthread_allocate_stack(attr, thread_segment(sseg),
642 pagesize, &stack_addr, &new_thread_bottom,
643 &guardaddr, &guardsize, &stksize) == 0)
646 new_thread->p_stackaddr = stack_addr;
648 new_thread = (pthread_descr) stack_addr;
651 #ifndef __ARCH_HAS_MMU__
653 /* When there is MMU, mmap () is used to allocate the stack. If one
654 * segment is already mapped, we should continue to see if we can
655 * use the next one. However, when there is no MMU, malloc () is used.
656 * It's waste of CPU cycles to continue to try if it fails. */
661 __pthread_handles_num++;
662 /* Allocate new thread identifier */
663 pthread_threads_counter += PTHREAD_THREADS_MAX;
664 new_thread_id = sseg + pthread_threads_counter;
665 /* Initialize the thread descriptor. Elements which have to be
666 initialized to zero already have this value. */
667 #if !defined USE_TLS || !TLS_DTV_AT_TP
668 new_thread->p_header.data.tcb = new_thread;
669 new_thread->p_header.data.self = new_thread;
671 #if TLS_MULTIPLE_THREADS_IN_TCB || !defined USE_TLS || !TLS_DTV_AT_TP
672 new_thread->p_multiple_threads = 1;
674 new_thread->p_tid = new_thread_id;
675 new_thread->p_lock = &(__pthread_handles[sseg].h_lock);
676 new_thread->p_cancelstate = PTHREAD_CANCEL_ENABLE;
677 new_thread->p_canceltype = PTHREAD_CANCEL_DEFERRED;
678 #if !(USE_TLS && HAVE___THREAD)
679 new_thread->p_errnop = &new_thread->p_errno;
680 new_thread->p_h_errnop = &new_thread->p_h_errno;
681 new_thread->p_resp = &new_thread->p_res;
683 new_thread->p_guardaddr = guardaddr;
684 new_thread->p_guardsize = guardsize;
685 new_thread->p_nr = sseg;
686 new_thread->p_inheritsched = attr ? attr->__inheritsched : 0;
687 new_thread->p_alloca_cutoff = stksize / 4 > __MAX_ALLOCA_CUTOFF
688 ? __MAX_ALLOCA_CUTOFF : stksize / 4;
689 /* Initialize the thread handle */
690 __pthread_init_lock(&__pthread_handles[sseg].h_lock);
691 __pthread_handles[sseg].h_descr = new_thread;
692 __pthread_handles[sseg].h_bottom = new_thread_bottom;
693 /* Determine scheduling parameters for the thread */
694 new_thread->p_start_args.schedpolicy = -1;
696 new_thread->p_detached = attr->__detachstate;
697 new_thread->p_userstack = attr->__stackaddr_set;
699 switch(attr->__inheritsched) {
700 case PTHREAD_EXPLICIT_SCHED:
701 new_thread->p_start_args.schedpolicy = attr->__schedpolicy;
702 memcpy (&new_thread->p_start_args.schedparam, &attr->__schedparam,
703 sizeof (struct sched_param));
705 case PTHREAD_INHERIT_SCHED:
706 new_thread->p_start_args.schedpolicy = __sched_getscheduler(father_pid);
707 __sched_getparam(father_pid, &new_thread->p_start_args.schedparam);
710 new_thread->p_priority =
711 new_thread->p_start_args.schedparam.sched_priority;
713 /* Finish setting up arguments to pthread_start_thread */
714 new_thread->p_start_args.start_routine = start_routine;
715 new_thread->p_start_args.arg = arg;
716 new_thread->p_start_args.mask = *mask;
717 /* Make the new thread ID available already now. If any of the later
718 functions fail we return an error value and the caller must not use
719 the stored thread ID. */
720 *thread = new_thread_id;
721 /* Raise priority of thread manager if needed */
722 __pthread_manager_adjust_prio(new_thread->p_priority);
723 /* Do the cloning. We have to use two different functions depending
724 on whether we are debugging or not. */
725 pid = 0; /* Note that the thread never can have PID zero. */
728 /* See whether the TD_CREATE event bit is set in any of the
730 int idx = __td_eventword (TD_CREATE);
731 uint32_t mask = __td_eventmask (TD_CREATE);
733 if ((mask & (__pthread_threads_events.event_bits[idx]
734 | event_maskp->event_bits[idx])) != 0)
736 /* Lock the mutex the child will use now so that it will stop. */
737 __pthread_lock(new_thread->p_lock, NULL);
739 /* We have to report this event. */
740 #ifdef NEED_SEPARATE_REGISTER_STACK
741 /* Perhaps this version should be used on all platforms. But
742 this requires that __clone2 be uniformly supported
745 And there is some argument for changing the __clone2
746 interface to pass sp and bsp instead, making it more IA64
747 specific, but allowing stacks to grow outward from each
748 other, to get less paging and fewer mmaps. */
749 pid = __clone2(pthread_start_thread_event,
750 (void **)new_thread_bottom,
751 (char *)stack_addr - new_thread_bottom,
752 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
753 __pthread_sig_cancel, new_thread);
754 #elif _STACK_GROWS_UP
755 pid = __clone(pthread_start_thread_event, (void *) new_thread_bottom,
756 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
757 __pthread_sig_cancel, new_thread);
759 pid = __clone(pthread_start_thread_event, stack_addr,
760 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
761 __pthread_sig_cancel, new_thread);
766 /* Now fill in the information about the new thread in
767 the newly created thread's data structure. We cannot let
768 the new thread do this since we don't know whether it was
769 already scheduled when we send the event. */
770 new_thread->p_eventbuf.eventdata = new_thread;
771 new_thread->p_eventbuf.eventnum = TD_CREATE;
772 __pthread_last_event = new_thread;
774 /* We have to set the PID here since the callback function
775 in the debug library will need it and we cannot guarantee
776 the child got scheduled before the debugger. */
777 new_thread->p_pid = pid;
779 /* Now call the function which signals the event. */
780 __linuxthreads_create_event ();
782 /* Now restart the thread. */
783 __pthread_unlock(new_thread->p_lock);
789 #ifdef NEED_SEPARATE_REGISTER_STACK
790 pid = __clone2(pthread_start_thread,
791 (void **)new_thread_bottom,
792 (char *)stack_addr - new_thread_bottom,
793 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
794 __pthread_sig_cancel, new_thread);
795 #elif _STACK_GROWS_UP
796 pid = __clone(pthread_start_thread, (void *) new_thread_bottom,
797 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
798 __pthread_sig_cancel, new_thread);
800 pid = __clone(pthread_start_thread, stack_addr,
801 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
802 __pthread_sig_cancel, new_thread);
803 #endif /* !NEED_SEPARATE_REGISTER_STACK */
806 /* Check if cloning succeeded */
808 /* Free the stack if we allocated it */
809 if (attr == NULL || !attr->__stackaddr_set)
811 #ifdef NEED_SEPARATE_REGISTER_STACK
812 size_t stacksize = ((char *)(new_thread->p_guardaddr)
813 - new_thread_bottom);
814 munmap((caddr_t)new_thread_bottom,
815 2 * stacksize + new_thread->p_guardsize);
816 #elif _STACK_GROWS_UP
818 size_t stacksize = guardaddr - stack_addr;
819 munmap(stack_addr, stacksize + guardsize);
821 size_t stacksize = guardaddr - (char *)new_thread;
822 munmap(new_thread, stacksize + guardsize);
826 size_t stacksize = stack_addr - new_thread_bottom;
828 size_t stacksize = (char *)(new_thread+1) - new_thread_bottom;
830 munmap(new_thread_bottom - guardsize, guardsize + stacksize);
835 new_thread = (pthread_descr) ((char *) new_thread + TLS_PRE_TCB_SIZE);
837 _dl_deallocate_tls (new_thread, true);
839 __pthread_handles[sseg].h_descr = NULL;
840 __pthread_handles[sseg].h_bottom = NULL;
841 __pthread_handles_num--;
844 /* Insert new thread in doubly linked list of active threads */
845 new_thread->p_prevlive = __pthread_main_thread;
846 new_thread->p_nextlive = __pthread_main_thread->p_nextlive;
847 __pthread_main_thread->p_nextlive->p_prevlive = new_thread;
848 __pthread_main_thread->p_nextlive = new_thread;
849 /* Set pid field of the new thread, in case we get there before the
851 new_thread->p_pid = pid;
856 /* Try to free the resources of a thread when requested by pthread_join
857 or pthread_detach on a terminated thread. */
859 static void pthread_free(pthread_descr th)
861 pthread_handle handle;
862 pthread_readlock_info *iter, *next;
864 ASSERT(th->p_exited);
865 /* Make the handle invalid */
866 handle = thread_handle(th->p_tid);
867 __pthread_lock(&handle->h_lock, NULL);
868 handle->h_descr = NULL;
869 handle->h_bottom = (char *)(-1L);
870 __pthread_unlock(&handle->h_lock);
872 FREE_THREAD(th, th->p_nr);
874 /* One fewer threads in __pthread_handles */
875 __pthread_handles_num--;
877 /* Destroy read lock list, and list of free read lock structures.
878 If the former is not empty, it means the thread exited while
879 holding read locks! */
881 for (iter = th->p_readlock_list; iter != NULL; iter = next)
883 next = iter->pr_next;
887 for (iter = th->p_readlock_free; iter != NULL; iter = next)
889 next = iter->pr_next;
893 /* If initial thread, nothing to free */
894 if (!th->p_userstack)
896 size_t guardsize = th->p_guardsize;
897 /* Free the stack and thread descriptor area */
898 char *guardaddr = th->p_guardaddr;
899 #ifdef _STACK_GROWS_UP
901 size_t stacksize = guardaddr - th->p_stackaddr;
903 size_t stacksize = guardaddr - (char *)th;
905 guardaddr = (char *)th;
907 /* Guardaddr is always set, even if guardsize is 0. This allows
908 us to compute everything else. */
910 size_t stacksize = th->p_stackaddr - guardaddr - guardsize;
912 size_t stacksize = (char *)(th+1) - guardaddr - guardsize;
914 # ifdef NEED_SEPARATE_REGISTER_STACK
915 /* Take account of the register stack, which is below guardaddr. */
916 guardaddr -= stacksize;
920 /* Unmap the stack. */
921 munmap(guardaddr, stacksize + guardsize);
927 th = (pthread_descr) ((char *) th + TLS_PRE_TCB_SIZE);
929 _dl_deallocate_tls (th, true);
933 /* Handle threads that have exited */
935 static void pthread_exited(pid_t pid)
939 /* Find thread with that pid */
940 for (th = __pthread_main_thread->p_nextlive;
941 th != __pthread_main_thread;
942 th = th->p_nextlive) {
943 if (th->p_pid == pid) {
944 /* Remove thread from list of active threads */
945 th->p_nextlive->p_prevlive = th->p_prevlive;
946 th->p_prevlive->p_nextlive = th->p_nextlive;
947 /* Mark thread as exited, and if detached, free its resources */
948 __pthread_lock(th->p_lock, NULL);
950 /* If we have to signal this event do it now. */
951 if (th->p_report_events)
953 /* See whether TD_REAP is in any of the mask. */
954 int idx = __td_eventword (TD_REAP);
955 uint32_t mask = __td_eventmask (TD_REAP);
957 if ((mask & (__pthread_threads_events.event_bits[idx]
958 | th->p_eventbuf.eventmask.event_bits[idx])) != 0)
960 /* Yep, we have to signal the reapage. */
961 th->p_eventbuf.eventnum = TD_REAP;
962 th->p_eventbuf.eventdata = th;
963 __pthread_last_event = th;
965 /* Now call the function to signal the event. */
966 __linuxthreads_reap_event();
969 detached = th->p_detached;
970 __pthread_unlock(th->p_lock);
976 /* If all threads have exited and the main thread is pending on a
977 pthread_exit, wake up the main thread and terminate ourselves. */
978 if (main_thread_exiting &&
979 __pthread_main_thread->p_nextlive == __pthread_main_thread) {
980 restart(__pthread_main_thread);
981 /* Same logic as REQ_MAIN_THREAD_EXIT. */
985 static void pthread_reap_children(void)
990 while ((pid = waitpid_not_cancel(-1, &status, WNOHANG | __WCLONE)) > 0) {
992 if (WIFSIGNALED(status)) {
993 /* If a thread died due to a signal, send the same signal to
994 all other threads, including the main thread. */
995 pthread_kill_all_threads(WTERMSIG(status), 1);
1001 /* Try to free the resources of a thread when requested by pthread_join
1002 or pthread_detach on a terminated thread. */
1004 static void pthread_handle_free(pthread_t th_id)
1006 pthread_handle handle = thread_handle(th_id);
1009 __pthread_lock(&handle->h_lock, NULL);
1010 if (nonexisting_handle(handle, th_id)) {
1011 /* pthread_reap_children has deallocated the thread already,
1012 nothing needs to be done */
1013 __pthread_unlock(&handle->h_lock);
1016 th = handle->h_descr;
1018 __pthread_unlock(&handle->h_lock);
1021 /* The Unix process of the thread is still running.
1022 Mark the thread as detached so that the thread manager will
1023 deallocate its resources when the Unix process exits. */
1025 __pthread_unlock(&handle->h_lock);
1029 /* Send a signal to all running threads */
1031 static void pthread_kill_all_threads(int sig, int main_thread_also)
1034 for (th = __pthread_main_thread->p_nextlive;
1035 th != __pthread_main_thread;
1036 th = th->p_nextlive) {
1037 kill(th->p_pid, sig);
1039 if (main_thread_also) {
1040 kill(__pthread_main_thread->p_pid, sig);
1044 static void pthread_for_each_thread(void *arg,
1045 void (*fn)(void *, pthread_descr))
1049 for (th = __pthread_main_thread->p_nextlive;
1050 th != __pthread_main_thread;
1051 th = th->p_nextlive) {
1055 fn(arg, __pthread_main_thread);
1058 /* Process-wide exit() */
1060 static void pthread_handle_exit(pthread_descr issuing_thread, int exitcode)
1063 __pthread_exit_requested = 1;
1064 __pthread_exit_code = exitcode;
1065 /* A forced asynchronous cancellation follows. Make sure we won't
1066 get stuck later in the main thread with a system lock being held
1067 by one of the cancelled threads. Ideally one would use the same
1068 code as in pthread_atfork(), but we can't distinguish system and
1069 user handlers there. */
1071 /* Send the CANCEL signal to all running threads, including the main
1072 thread, but excluding the thread from which the exit request originated
1073 (that thread must complete the exit, e.g. calling atexit functions
1074 and flushing stdio buffers). */
1075 for (th = issuing_thread->p_nextlive;
1076 th != issuing_thread;
1077 th = th->p_nextlive) {
1078 kill(th->p_pid, __pthread_sig_cancel);
1080 /* Now, wait for all these threads, so that they don't become zombies
1081 and their times are properly added to the thread manager's times. */
1082 for (th = issuing_thread->p_nextlive;
1083 th != issuing_thread;
1084 th = th->p_nextlive) {
1085 waitpid(th->p_pid, NULL, __WCLONE);
1087 __fresetlockfiles();
1088 restart(issuing_thread);
1092 /* Handler for __pthread_sig_cancel in thread manager thread */
1094 void __pthread_manager_sighandler(int sig)
1096 int kick_manager = terminated_children == 0 && main_thread_exiting;
1097 terminated_children = 1;
1099 /* If the main thread is terminating, kick the thread manager loop
1100 each time some threads terminate. This eliminates a two second
1101 shutdown delay caused by the thread manager sleeping in the
1102 call to __poll(). Instead, the thread manager is kicked into
1103 action, reaps the outstanding threads and resumes the main thread
1104 so that it can complete the shutdown. */
1107 struct pthread_request request;
1108 request.req_thread = 0;
1109 request.req_kind = REQ_KICK;
1110 TEMP_FAILURE_RETRY(write_not_cancel(__pthread_manager_request,
1111 (char *) &request, sizeof(request)));
1115 /* Adjust priority of thread manager so that it always run at a priority
1116 higher than all threads */
1118 void __pthread_manager_adjust_prio(int thread_prio)
1120 struct sched_param param;
1122 if (thread_prio <= manager_thread->p_priority) return;
1123 param.sched_priority =
1124 thread_prio < __sched_get_priority_max(SCHED_FIFO)
1125 ? thread_prio + 1 : thread_prio;
1126 __sched_setscheduler(manager_thread->p_pid, SCHED_FIFO, ¶m);
1127 manager_thread->p_priority = thread_prio;